-/* $OpenBSD: uvm_pdaemon.c,v 1.100 2022/06/28 19:23:08 mpi Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.101 2022/06/28 19:31:30 mpi Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
* local prototypes
*/
-void uvmpd_scan(void);
-boolean_t uvmpd_scan_inactive(struct pglist *);
+void uvmpd_scan(struct uvm_pmalloc *);
+boolean_t uvmpd_scan_inactive(struct uvm_pmalloc *, struct pglist *);
void uvmpd_tune(void);
void uvmpd_drop(struct pglist *);
if (pma != NULL ||
((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg) ||
((uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) {
- uvmpd_scan();
+ uvmpd_scan(pma);
}
/*
*/
boolean_t
-uvmpd_scan_inactive(struct pglist *pglst)
+uvmpd_scan_inactive(struct uvm_pmalloc *pma, struct pglist *pglst)
{
boolean_t retval = FALSE; /* assume we haven't hit target */
int free, result;
swnpages = swcpages = 0;
free = 0;
dirtyreacts = 0;
+ p = NULL;
- for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
+ /* Start with the first page on the list that fit in pma's ranges */
+ if (pma != NULL) {
+ paddr_t paddr;
+
+ TAILQ_FOREACH(p, pglst, pageq) {
+ paddr = atop(VM_PAGE_TO_PHYS(p));
+ if (paddr >= pma->pm_constraint.ucr_low &&
+ paddr < pma->pm_constraint.ucr_high)
+ break;
+ }
+
+ }
+
+ if (p == NULL) {
+ p = TAILQ_FIRST(pglst);
+ pma = NULL;
+ }
+
+ for (; p != NULL || swslot != 0; p = nextpg) {
/*
* note that p can be NULL iff we have traversed the whole
* list and need to do one final swap-backed clustered pageout.
* our target
*/
free = uvmexp.free - BUFPAGES_DEFICIT;
-
- if (free + uvmexp.paging >= uvmexp.freetarg << 2 ||
+ if (((pma == NULL || (pma->pm_flags & UVM_PMA_FREED)) &&
+ (free + uvmexp.paging >= uvmexp.freetarg << 2)) ||
dirtyreacts == UVMPD_NUMDIRTYREACTS) {
retval = TRUE;
* this page is dirty, skip it if we'll have met our
* free target when all the current pageouts complete.
*/
- if (free + uvmexp.paging > uvmexp.freetarg << 2) {
+ if ((pma == NULL || (pma->pm_flags & UVM_PMA_FREED)) &&
+ (free + uvmexp.paging > uvmexp.freetarg << 2)) {
if (anon) {
rw_exit(anon->an_lock);
} else {
*/
void
-uvmpd_scan(void)
+uvmpd_scan(struct uvm_pmalloc *pma)
{
int free, inactive_shortage, swap_shortage, pages_freed;
struct vm_page *p, *nextpg;
* low bit of uvmexp.pdrevs (which we bump by one each call).
*/
pages_freed = uvmexp.pdfreed;
- (void) uvmpd_scan_inactive(&uvm.page_inactive);
+ (void) uvmpd_scan_inactive(pma, &uvm.page_inactive);
pages_freed = uvmexp.pdfreed - pages_freed;
/*