-/* $OpenBSD: uvm_fault.c,v 1.116 2021/02/23 10:41:59 mpi Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.117 2021/03/01 09:09:35 mpi Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
return 0;
}
+/*
+ * uvm_fault_lower_lookup: look up on-memory uobj pages.
+ *
+ * 1. get on-memory pages.
+ * 2. if failed, give up (get only center page later).
+ * 3. if succeeded, enter h/w mapping of neighbor pages.
+ */
+
+struct vm_page *
+uvm_fault_lower_lookup(
+ struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
+ struct vm_page **pages)
+{
+ struct uvm_object *uobj = ufi->entry->object.uvm_obj;
+ struct vm_page *uobjpage = NULL;
+ int lcv, gotpages;
+ vaddr_t currva;
+
+ counters_inc(uvmexp_counters, flt_lget);
+ gotpages = flt->npages;
+ (void) uobj->pgops->pgo_get(uobj,
+ ufi->entry->offset + (flt->startva - ufi->entry->start),
+ pages, &gotpages, flt->centeridx,
+ flt->access_type & MASK(ufi->entry), ufi->entry->advice,
+ PGO_LOCKED);
+
+ /*
+ * check for pages to map, if we got any
+ */
+ if (gotpages == 0) {
+ return NULL;
+ }
+
+ currva = flt->startva;
+ for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
+ if (pages[lcv] == NULL ||
+ pages[lcv] == PGO_DONTCARE)
+ continue;
+
+ KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0);
+
+ /*
+ * if center page is resident and not
+ * PG_BUSY, then pgo_get made it PG_BUSY
+ * for us and gave us a handle to it.
+ * remember this page as "uobjpage."
+ * (for later use).
+ */
+ if (lcv == flt->centeridx) {
+ uobjpage = pages[lcv];
+ continue;
+ }
+
+ /*
+ * note: calling pgo_get with locked data
+ * structures returns us pages which are
+ * neither busy nor released, so we don't
+ * need to check for this. we can just
+ * directly enter the page (after moving it
+ * to the head of the active queue [useful?]).
+ */
+
+ uvm_lock_pageq();
+ uvm_pageactivate(pages[lcv]); /* reactivate */
+ uvm_unlock_pageq();
+ counters_inc(uvmexp_counters, flt_nomap);
+
+ /*
+ * Since this page isn't the page that's
+ * actually faulting, ignore pmap_enter()
+ * failures; it's not critical that we
+ * enter these right now.
+ */
+ (void) pmap_enter(ufi->orig_map->pmap, currva,
+ VM_PAGE_TO_PHYS(pages[lcv]) | flt->pa_flags,
+ flt->enter_prot & MASK(ufi->entry),
+ PMAP_CANFAIL |
+ (flt->wired ? PMAP_WIRED : 0));
+
+ /*
+ * NOTE: page can't be PG_WANTED because
+ * we've held the lock the whole time
+ * we've had the handle.
+ */
+ atomic_clearbits_int(&pages[lcv]->pg_flags, PG_BUSY);
+ UVM_PAGE_OWN(pages[lcv], NULL);
+ }
+ pmap_update(ufi->orig_map->pmap);
+
+ return uobjpage;
+}
+
/*
* uvm_fault_lower: handle lower fault.
*
struct vm_amap *amap = ufi->entry->aref.ar_amap;
struct uvm_object *uobj = ufi->entry->object.uvm_obj;
boolean_t promote, locked;
- int result, lcv, gotpages;
+ int result;
struct vm_page *uobjpage, *pg = NULL;
struct vm_anon *anon = NULL;
- vaddr_t currva;
voff_t uoff;
/*
* we ask (with pgo_get) the object for resident pages that we care
* about and attempt to map them in. we do not let pgo_get block
* (PGO_LOCKED).
- *
- * ("get" has the option of doing a pmap_enter for us)
*/
- if (uobj != NULL) {
- counters_inc(uvmexp_counters, flt_lget);
- gotpages = flt->npages;
- (void) uobj->pgops->pgo_get(uobj, ufi->entry->offset +
- (flt->startva - ufi->entry->start),
- pages, &gotpages, flt->centeridx,
- flt->access_type & MASK(ufi->entry),
- ufi->entry->advice, PGO_LOCKED);
-
- /* check for pages to map, if we got any */
+ if (uobj == NULL) {
uobjpage = NULL;
- if (gotpages) {
- currva = flt->startva;
- for (lcv = 0 ; lcv < flt->npages ;
- lcv++, currva += PAGE_SIZE) {
- if (pages[lcv] == NULL ||
- pages[lcv] == PGO_DONTCARE)
- continue;
-
- KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0);
-
- /*
- * if center page is resident and not
- * PG_BUSY, then pgo_get made it PG_BUSY
- * for us and gave us a handle to it.
- * remember this page as "uobjpage."
- * (for later use).
- */
- if (lcv == flt->centeridx) {
- uobjpage = pages[lcv];
- continue;
- }
-
- /*
- * note: calling pgo_get with locked data
- * structures returns us pages which are
- * neither busy nor released, so we don't
- * need to check for this. we can just
- * directly enter the page (after moving it
- * to the head of the active queue [useful?]).
- */
-
- uvm_lock_pageq();
- uvm_pageactivate(pages[lcv]); /* reactivate */
- uvm_unlock_pageq();
- counters_inc(uvmexp_counters, flt_nomap);
-
- /*
- * Since this page isn't the page that's
- * actually faulting, ignore pmap_enter()
- * failures; it's not critical that we
- * enter these right now.
- */
- (void) pmap_enter(ufi->orig_map->pmap, currva,
- VM_PAGE_TO_PHYS(pages[lcv]) | flt->pa_flags,
- flt->enter_prot & MASK(ufi->entry),
- PMAP_CANFAIL |
- (flt->wired ? PMAP_WIRED : 0));
-
- /*
- * NOTE: page can't be PG_WANTED because
- * we've held the lock the whole time
- * we've had the handle.
- */
- atomic_clearbits_int(&pages[lcv]->pg_flags,
- PG_BUSY);
- UVM_PAGE_OWN(pages[lcv], NULL);
- } /* for "lcv" loop */
- pmap_update(ufi->orig_map->pmap);
- } /* "gotpages" != 0 */
- /* note: object still _locked_ */
} else {
- uobjpage = NULL;
+ uobjpage = uvm_fault_lower_lookup(ufi, flt, pages);
}
/*
/* update rusage counters */
curproc->p_ru.ru_minflt++;
} else {
+ int gotpages;
+
/* update rusage counters */
curproc->p_ru.ru_majflt++;