No functionnal change.
ok kettenis@
-/* $OpenBSD: uvm_addr.c,v 1.29 2020/09/22 14:31:08 mpi Exp $ */
+/* $OpenBSD: uvm_addr.c,v 1.30 2021/03/20 10:24:21 mpi Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
#endif
};
-/* Definition of a pivot in pivot selector. */
+/*
+ * Definition of a pivot in pivot selector.
+ */
struct uaddr_pivot {
vaddr_t addr; /* End of prev. allocation. */
int expire;/* Best before date. */
extern const struct uvm_addr_functions uaddr_kernel_functions;
struct uvm_addr_state uaddr_kbootstrap;
-/* Support functions. */
+
+/*
+ * Support functions.
+ */
+
#ifndef SMALL_KERNEL
struct vm_map_entry *uvm_addr_entrybyspace(struct uaddr_free_rbtree*,
vsize_t);
if (fspace - before_gap - after_gap < sz)
return ENOMEM;
- /* Calculate lowest address. */
+ /*
+ * Calculate lowest address.
+ */
low_addr += before_gap;
low_addr = uvm_addr_align_forward(tmp = low_addr, align, offset);
if (low_addr < tmp) /* Overflow during alignment. */
if (high_addr - after_gap - sz < low_addr)
return ENOMEM;
- /* Calculate highest address. */
+ /*
+ * Calculate highest address.
+ */
high_addr -= after_gap + sz;
high_addr = uvm_addr_align_backward(tmp = high_addr, align, offset);
if (high_addr > tmp) /* Overflow during alignment. */
(before_gap & PAGE_MASK) == 0 && (after_gap & PAGE_MASK) == 0);
KASSERT(high + sz > high); /* Check for overflow. */
- /* Hint magic. */
+ /*
+ * Hint magic.
+ */
if (hint == 0)
hint = (direction == 1 ? low : high);
else if (hint > high) {
* If hint is set, search will start at the hint position.
* Only searches forward.
*/
+
const struct uvm_addr_functions uaddr_lin_functions = {
.uaddr_select = &uaddr_lin_select,
.uaddr_destroy = &uaddr_destroy,
{
vaddr_t guard_sz;
- /* Deal with guardpages: search for space with one extra page. */
+ /*
+ * Deal with guardpages: search for space with one extra page.
+ */
guard_sz = ((map->flags & VM_MAP_GUARDPAGES) == 0 ? 0 : PAGE_SIZE);
if (uaddr->uaddr_maxaddr - uaddr->uaddr_minaddr - guard_sz < sz)
/*
* Kernel allocation bootstrap logic.
*/
+
const struct uvm_addr_functions uaddr_kernel_functions = {
.uaddr_select = &uaddr_kbootstrap_select,
.uaddr_destroy = &uaddr_kbootstrap_destroy,
if (entry == NULL)
return ENOMEM;
- /* Walk the tree until we find an entry that fits. */
+ /*
+ * Walk the tree until we find an entry that fits.
+ */
while (uvm_addr_fitspace(&min, &max,
VMMAP_FREE_START(entry), VMMAP_FREE_END(entry),
sz, align, offset, 0, guardsz) != 0) {
return ENOMEM;
}
- /* Return the address that generates the least fragmentation. */
+ /*
+ * Return the address that generates the least fragmentation.
+ */
*entry_out = entry;
*addr_out = (min - VMMAP_FREE_START(entry) <=
VMMAP_FREE_END(entry) - guardsz - sz - max ?
if (pivot->addr == 0 || pivot->entry == NULL || pivot->expire == 0)
goto expired; /* Pivot is invalid (null or expired). */
- /* Attempt to use the pivot to map the entry. */
+ /*
+ * Attempt to use the pivot to map the entry.
+ */
entry = pivot->entry;
if (pivot->dir > 0) {
if (uvm_addr_fitspace(&min, &max,
-/* $OpenBSD: uvm_amap.c,v 1.87 2021/01/19 13:21:36 mpi Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.88 2021/03/20 10:24:21 mpi Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
* when enabled, an array of ints is allocated for the pprefs. this
* array is allocated only when a partial reference is added to the
* map (either by unmapping part of the amap, or gaining a reference
- * to only a part of an amap). if the malloc of the array fails
+ * to only a part of an amap). if the allocation of the array fails
* (M_NOWAIT), then we set the array pointer to PPREF_NONE to indicate
* that we tried to do ppref's but couldn't alloc the array so just
* give up (after all, this is an optional feature!).
* chunk. note that the "plus one" part is needed because a reference
* count of zero is neither positive or negative (need a way to tell
* if we've got one zero or a bunch of them).
- *
+ *
* here are some in-line functions to help us.
*/
/*
* pp_getreflen: get the reference and length for a specific offset
+ *
+ * => ppref's amap must be locked
*/
static inline void
pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
/*
* pp_setreflen: set the reference and length for a specific offset
+ *
+ * => ppref's amap must be locked
*/
static inline void
pp_setreflen(int *ppref, int offset, int ref, int len)
ppref[offset+1] = len;
}
}
-#endif
+#endif /* UVM_AMAP_PPREF */
/*
* amap_init: called at boot time to init global amap data structures
}
/*
- * amap_alloc1: internal function that allocates an amap, but does not
- * init the overlay.
+ * amap_alloc1: allocate an amap, but do not initialise the overlay.
+ *
+ * => Note: lock is not set.
*/
static inline struct vm_amap *
amap_alloc1(int slots, int waitf, int lazyalloc)
*
* => caller should ensure sz is a multiple of PAGE_SIZE
* => reference count to new amap is set to one
+ * => new amap is returned unlocked
*/
struct vm_amap *
amap_alloc(vaddr_t sz, int waitf, int lazyalloc)
/*
* amap_free: free an amap
*
+ * => the amap must be unlocked
* => the amap should have a zero reference count and be empty
*/
void
/*
* amap_wipeout: wipeout all anon's in an amap; then free the amap!
*
- * => called from amap_unref when the final reference to an amap is
- * discarded (i.e. when reference count == 1)
+ * => Called from amap_unref(), when reference count drops to zero.
* => amap must be locked.
*/
-
void
amap_wipeout(struct vm_amap *amap)
{
KASSERT(amap->am_ref == 0);
if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
- /* amap_swap_off will call us again. */
+ /*
+ * Note: amap_swap_off() will call us again.
+ */
amap_unlock(amap);
return;
}
panic("amap_wipeout: corrupt amap");
KASSERT(anon->an_lock == amap->am_lock);
+ /*
+ * Drop the reference.
+ */
refs = --anon->an_ref;
if (refs == 0) {
- /*
- * we had the last reference to a vm_anon.
- * free it.
- */
uvm_anfree_list(anon, &pgl);
}
}
/* free the pages */
uvm_pglistfree(&pgl);
- /* now we free the map */
+ /*
+ * Finally, destroy the amap.
+ */
amap->am_ref = 0; /* ... was one */
amap->am_nused = 0;
amap_unlock(amap);
/*
* amap_copy: ensure that a map entry's "needs_copy" flag is false
* by copying the amap if necessary.
- *
+ *
* => an entry with a null amap pointer will get a new (blank) one.
- * => the map that the map entry blocks to must be locked by caller.
- * => the amap (if any) currently attached to the entry must be unlocked.
+ * => the map that the map entry belongs to must be locked by caller.
+ * => the amap currently attached to "entry" (if any) must be unlocked.
* => if canchunk is true, then we may clip the entry into a chunk
* => "startva" and "endva" are used only if canchunk is true. they are
* used to limit chunking (e.g. if you have a large space that you
KASSERT(map != kernel_map); /* we use sleeping locks */
- /* is there a map to copy? if not, create one from scratch. */
+ /*
+ * Is there an amap to copy? If not, create one.
+ */
if (entry->aref.ar_amap == NULL) {
/*
- * check to see if we have a large amap that we can
- * chunk. we align startva/endva to chunk-sized
+ * Check to see if we have a large amap that we can
+ * chunk. We align startva/endva to chunk-sized
* boundaries and then clip to them.
*
- * if we cannot chunk the amap, allocate it in a way
+ * If we cannot chunk the amap, allocate it in a way
* that makes it grow or shrink dynamically with
* the number of slots.
*/
}
/*
- * first check and see if we are the only map entry
- * referencing the amap we currently have. if so, then we can
- * just take it over rather than copying it. the value can only
- * be one if we have the only reference to the amap
+ * First check and see if we are the only map entry referencing
+ * he amap we currently have. If so, then just take it over instead
+ * of copying it. Note that we are reading am_ref without lock held
+ * as the value value can only be one if we have the only reference
+ * to the amap (via our locked map). If the value is greater than
+ * one, then allocate amap and re-check the value.
*/
if (entry->aref.ar_amap->am_ref == 1) {
entry->etype &= ~UVM_ET_NEEDSCOPY;
return;
}
- /* looks like we need to copy the map. */
+ /*
+ * Allocate a new amap (note: not initialised, etc).
+ */
AMAP_B2SLOT(slots, entry->end - entry->start);
if (!UVM_AMAP_SMALL(entry->aref.ar_amap) &&
entry->aref.ar_amap->am_hashshift != 0)
amap_lock(srcamap);
/*
- * need to double check reference count now. the reference count
- * could have changed while we were in malloc. if the reference count
- * dropped down to one we take over the old map rather than
- * copying the amap.
+ * Re-check the reference count with the lock held. If it has
+ * dropped to one - we can take over the existing map.
*/
- if (srcamap->am_ref == 1) { /* take it over? */
+ if (srcamap->am_ref == 1) {
+ /* Just take over the existing amap. */
entry->etype &= ~UVM_ET_NEEDSCOPY;
amap_unlock(srcamap);
- amap->am_ref--; /* drop final reference to map */
- amap_free(amap); /* dispose of new (unused) amap */
+ /* Destroy the new (unused) amap. */
+ amap->am_ref--;
+ amap_free(amap);
return;
}
- /* we must copy it now. */
+ /*
+ * Copy the slots.
+ */
for (lcv = 0; lcv < slots; lcv += n) {
srcslot = entry->aref.ar_pageoff + lcv;
i = UVM_AMAP_SLOTIDX(lcv);
}
/*
- * drop our reference to the old amap (srcamap).
- * we know that the reference count on srcamap is greater than
- * one (we checked above), so there is no way we could drop
- * the count to zero. [and no need to worry about freeing it]
+ * Drop our reference to the old amap (srcamap) and unlock.
+ * Since the reference count on srcamap is greater than one,
+ * (we checked above), it cannot drop to zero while it is locked.
*/
srcamap->am_ref--;
KASSERT(srcamap->am_ref > 0);
if (amap->am_lock == NULL)
amap_lock_alloc(amap);
- /* install new amap. */
+ /*
+ * Install new amap.
+ */
entry->aref.ar_pageoff = 0;
entry->aref.ar_amap = amap;
entry->etype &= ~UVM_ET_NEEDSCOPY;
struct vm_amap_chunk *chunk;
/*
- * note that if we wait, we must ReStart the "lcv" for loop because
- * some other process could reorder the anon's in the
- * am_anon[] array on us.
+ * note that if we unlock the amap then we must ReStart the "lcv" for
+ * loop because some other process could reorder the anon's in the
+ * am_anon[] array on us while the lock is dropped.
*/
ReStart:
amap_lock(amap);
pg = anon->an_page;
KASSERT(anon->an_lock == amap->am_lock);
- /* page must be resident since parent is wired */
+ /*
+ * The old page must be resident since the parent is
+ * wired.
+ */
KASSERT(pg != NULL);
/*
continue;
/*
- * if the page is busy then we have to wait for
+ * If the page is busy, then we have to unlock, wait for
* it and then restart.
*/
if (pg->pg_flags & PG_BUSY) {
goto ReStart;
}
- /* ok, time to do a copy-on-write to a new anon */
+ /*
+ * Perform a copy-on-write.
+ * First - get a new anon and a page.
+ */
nanon = uvm_analloc();
if (nanon != NULL) {
/* the new anon will share the amap's lock */
}
/*
- * got it... now we can copy the data and replace anon
- * with our new one...
+ * Copy the data and replace anon with the new one.
+ * Also, setup its lock (share the with amap's lock).
*/
- uvm_pagecopy(pg, npg); /* old -> new */
- anon->an_ref--; /* can't drop to zero */
+ uvm_pagecopy(pg, npg);
+ anon->an_ref--;
KASSERT(anon->an_ref > 0);
- chunk->ac_anon[slot] = nanon; /* replace */
+ chunk->ac_anon[slot] = nanon;
/*
- * drop PG_BUSY on new page ... since we have had its
- * owner locked the whole time it can't be
- * PG_RELEASED | PG_WANTED.
+ * Drop PG_BUSY on new page. Since its owner was write
+ * locked all this time - it cannot be PG_RELEASED or
+ * PG_WANTED.
*/
atomic_clearbits_int(&npg->pg_flags, PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(npg, NULL);
* amap_splitref: split a single reference into two separate references
*
* => called from uvm_map's clip routines
+ * => origref's map should be locked
+ * => origref->ar_amap should be unlocked (we will lock)
*/
void
amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
amap_lock(amap);
- /* now: we have a valid am_mapped array. */
if (amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
panic("amap_splitref: map size check failed");
#ifdef UVM_AMAP_PPREF
- /* Establish ppref before we add a duplicate reference to the amap. */
+ /* Establish ppref before we add a duplicate reference to the amap. */
if (amap->am_ppref == NULL)
amap_pp_establish(amap);
#endif
#ifdef UVM_AMAP_PPREF
/*
- * amap_pp_establish: add a ppref array to an amap, if possible
+ * amap_pp_establish: add a ppref array to an amap, if possible.
+ *
+ * => amap should be locked by caller* => amap should be locked by caller
*/
void
amap_pp_establish(struct vm_amap *amap)
amap->am_ppref = mallocarray(amap->am_nslot, sizeof(int),
M_UVMAMAP, M_NOWAIT|M_ZERO);
- /* if we fail then we just won't use ppref for this amap */
if (amap->am_ppref == NULL) {
- amap->am_ppref = PPREF_NONE; /* not using it */
+ /* Failure - just do not use ppref. */
+ amap->am_ppref = PPREF_NONE;
return;
}
- /* init ppref */
pp_setreflen(amap->am_ppref, 0, amap->am_ref, amap->am_nslot);
}
* amap_pp_adjref: adjust reference count to a part of an amap using the
* per-page reference count array.
*
- * => caller must check that ppref != PPREF_NONE before calling
+ * => caller must check that ppref != PPREF_NONE before calling.
+ * => map and amap must be locked.
*/
void
amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
prevlcv = 0;
/*
- * first advance to the correct place in the ppref array,
- * fragment if needed.
+ * Advance to the correct place in the array, fragment if needed.
*/
for (lcv = 0 ; lcv < curslot ; lcv += len) {
pp_getreflen(ppref, lcv, &ref, &len);
if (lcv != 0)
pp_getreflen(ppref, prevlcv, &prevref, &prevlen);
else {
- /* Ensure that the "prevref == ref" test below always
- * fails, since we're starting from the beginning of
- * the ppref array; that is, there is no previous
- * chunk.
+ /*
+ * Ensure that the "prevref == ref" test below always
+ * fails, since we are starting from the beginning of
+ * the ppref array; that is, there is no previous chunk.
*/
prevref = -1;
prevlen = 0;
}
/*
- * now adjust reference counts in range. merge the first
+ * Now adjust reference counts in range. Merge the first
* changed entry with the last unchanged entry if possible.
*/
if (lcv != curslot)
if (refs == 0) {
uvm_anfree(anon);
}
- }
+
+ /*
+ * done with this anon, next ...!
+ */
+
+ } /* end of 'for' loop */
}
/*
- * amap_wiperange: wipe out a range of an amap
- * [different from amap_wipeout because the amap is kept intact]
+ * amap_wiperange: wipe out a range of an amap.
+ * Note: different from amap_wipeout because the amap is kept intact.
+ *
+ * => Both map and amap must be locked by caller.
*/
void
amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
endbucket = UVM_AMAP_BUCKET(amap, slotoff + slots - 1);
/*
- * we can either traverse the amap by am_chunks or by am_buckets
- * depending on which is cheaper. decide now.
+ * We can either traverse the amap by am_chunks or by am_buckets.
+ * Determine which way is less expensive.
*/
if (UVM_AMAP_SMALL(amap))
amap_wiperange_chunk(amap, &amap->am_small, slotoff, slots);
}
/*
- * amap_lookup: look up a page in an amap
+ * amap_lookup: look up a page in an amap.
+ *
+ * => amap should be locked by caller.
*/
struct vm_anon *
amap_lookup(struct vm_aref *aref, vaddr_t offset)
}
/*
- * amap_lookups: look up a range of pages in an amap
+ * amap_lookups: look up a range of pages in an amap.
*
+ * => amap should be locked by caller.
* => XXXCDC: this interface is biased toward array-based amaps. fix.
*/
void
}
/*
- * amap_add: add (or replace) a page to an amap
+ * amap_add: add (or replace) a page to an amap.
*
- * => returns 0 if adding the page was successful or 1 when not.
+ * => amap should be locked by caller.
+ * => anon must have the lock associated with this amap.
*/
int
amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
}
/*
- * amap_unadd: remove a page from an amap
+ * amap_unadd: remove a page from an amap.
+ *
+ * => amap should be locked by caller.
*/
void
amap_unadd(struct vm_aref *aref, vaddr_t offset)
-/* $OpenBSD: uvm_anon.c,v 1.52 2021/03/04 09:00:03 mpi Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.53 2021/03/20 10:24:21 mpi Exp $ */
/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
/*
struct pool uvm_anon_pool;
-/*
- * allocate anons
- */
void
uvm_anon_init(void)
{
}
/*
- * allocate an anon
+ * uvm_analloc: allocate a new anon.
+ *
+ * => anon will have no lock associated.
*/
struct vm_anon *
uvm_analloc(void)
KASSERT(anon->an_lock != NULL);
/*
- * if page is busy then we just mark it as released (who ever
- * has it busy must check for this when they wake up). if the
- * page is not busy then we can free it now.
+ * If the page is busy, mark it as PG_RELEASED, so
+ * that uvm_anon_release(9) would release it later.
*/
if ((pg->pg_flags & PG_BUSY) != 0) {
- /* tell them to dump it when done */
atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
rw_obj_hold(anon->an_lock);
return;
}
anon->an_lock = NULL;
- /* free any swap resources. */
- uvm_anon_dropswap(anon);
-
/*
- * now that we've stripped the data areas from the anon, free the anon
- * itself!
+ * Free any swap resources, leave a page replacement hint.
*/
+ uvm_anon_dropswap(anon);
+
KASSERT(anon->an_page == NULL);
KASSERT(anon->an_swslot == 0);
}
/*
- * fetch an anon's page.
+ * uvm_anon_pagein: fetch an anon's page.
*
- * => returns TRUE if pagein was aborted due to lack of memory.
+ * => anon must be locked, and is unlocked upon return.
+ * => returns true if pagein was aborted due to lack of memory.
*/
boolean_t
KASSERT(rw_write_held(anon->an_lock));
KASSERT(anon->an_lock == amap->am_lock);
+ /*
+ * Get the page of the anon.
+ */
rv = uvmfault_anonget(NULL, amap, anon);
switch (rv) {
case VM_PAGER_OK:
KASSERT(rw_write_held(anon->an_lock));
break;
+
case VM_PAGER_ERROR:
case VM_PAGER_REFAULT:
+
/*
- * nothing more to do on errors.
- * VM_PAGER_REFAULT can only mean that the anon was freed,
- * so again there's nothing to do.
+ * Nothing more to do on errors.
+ * VM_PAGER_REFAULT means that the anon was freed.
*/
+
return FALSE;
+
default:
#ifdef DIAGNOSTIC
panic("anon_pagein: uvmfault_anonget -> %d", rv);
}
/*
- * ok, we've got the page now.
- * mark it as dirty, clear its swslot and un-busy it.
+ * Mark the page as dirty and clear its swslot.
*/
pg = anon->an_page;
if (anon->an_swslot > 0) {
anon->an_swslot = 0;
atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
- /* deactivate the page (to put it on a page queue) */
+ /*
+ * Deactivate the page (to put it on a page queue).
+ */
pmap_clear_reference(pg);
pmap_page_protect(pg, PROT_NONE);
uvm_lock_pageq();
}
/*
- * uvm_anon_dropswap: release any swap resources from this anon.
+ * uvm_anon_dropswap: release any swap resources from this anon.
*
* => anon must be locked or have a reference count of 0.
*/
-/* $OpenBSD: uvm_aobj.c,v 1.91 2021/03/04 09:00:03 mpi Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.92 2021/03/20 10:24:21 mpi Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
#include <uvm/uvm.h>
/*
- * an aobj manages anonymous-memory backed uvm_objects. in addition
- * to keeping the list of resident pages, it also keeps a list of
- * allocated swap blocks. depending on the size of the aobj this list
- * of allocated swap blocks is either stored in an array (small objects)
- * or in a hash table (large objects).
+ * An anonymous UVM object (aobj) manages anonymous-memory. In addition to
+ * keeping the list of resident pages, it may also keep a list of allocated
+ * swap blocks. Depending on the size of the object, this list is either
+ * stored in an array (small objects) or in a hash table (large objects).
*/
/*
- * local structures
- */
-
-/*
- * for hash tables, we break the address space of the aobj into blocks
- * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
- * be a power of two.
+ * Note: for hash tables, we break the address space of the aobj into blocks
+ * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
*/
#define UAO_SWHASH_CLUSTER_SHIFT 4
#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
-/* get the "tag" for this page index */
+/* Get the "tag" for this page index. */
#define UAO_SWHASH_ELT_TAG(PAGEIDX) \
((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
-/* given an ELT and a page index, find the swap slot */
+/* Given an ELT and a page index, find the swap slot. */
#define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
#define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
-/* given an ELT, return its pageidx base */
+/* Given an ELT, return its pageidx base. */
#define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
-/*
- * the swhash hash function
- */
+/* The hash function. */
#define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
& (AOBJ)->u_swhashmask)])
/*
- * the swhash threshold determines if we will use an array or a
+ * The threshold which determines whether we will use an array or a
* hash table to store the list of allocated swap blocks.
*/
-
#define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
-/*
- * the number of buckets in a swhash, with an upper bound
- */
+/* The number of buckets in a hash, with an upper bound. */
#define UAO_SWHASH_MAXBUCKETS 256
#define UAO_SWHASH_BUCKETS(pages) \
(min((pages) >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
};
-/*
- * uvm_aobj_pool: pool of uvm_aobj structures
- */
struct pool uvm_aobj_pool;
-/*
- * local functions
- */
static struct uao_swhash_elt *uao_find_swhash_elt(struct uvm_aobj *, int,
boolean_t);
static int uao_find_swslot(struct uvm_aobj *, int);
swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
- /* now search the bucket for the requested tag */
+ /*
+ * now search the bucket for the requested tag
+ */
LIST_FOREACH(elt, swhash, list) {
if (elt->tag == page_tag)
return(elt);
}
- /* fail now if we are not allowed to create a new entry in the bucket */
if (!create)
return NULL;
- /* allocate a new entry for the bucket and init/insert it in */
+ /*
+ * allocate a new entry for the bucket and init/insert it in
+ */
elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT | PR_ZERO);
/*
* XXX We cannot sleep here as the hash table might disappear
uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
{
- /* if noswap flag is set, then we never return a slot */
+ /*
+ * if noswap flag is set, then we never return a slot
+ */
if (aobj->u_flags & UAO_FLAG_NOSWAP)
return(0);
- /* if hashing, look in hash table. */
+ /*
+ * if hashing, look in hash table.
+ */
if (aobj->u_pages > UAO_SWHASH_THRESHOLD) {
struct uao_swhash_elt *elt =
uao_find_swhash_elt(aobj, pageidx, FALSE);
return(0);
}
- /* otherwise, look in the array */
+ /*
+ * otherwise, look in the array
+ */
return(aobj->u_swslots[pageidx]);
}
* uao_set_swslot: set the swap slot for a page in an aobj.
*
* => setting a slot to zero frees the slot
+ * => we return the old slot number, or -1 if we failed to allocate
+ * memory to record the new slot number
*/
int
uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
KERNEL_ASSERT_LOCKED();
- /* if noswap flag is set, then we can't set a slot */
+ /*
+ * if noswap flag is set, then we can't set a slot
+ */
if (aobj->u_flags & UAO_FLAG_NOSWAP) {
if (slot == 0)
return(0); /* a clear is ok */
/* but a set is not */
printf("uao_set_swslot: uobj = %p\n", uobj);
- panic("uao_set_swslot: attempt to set a slot"
- " on a NOSWAP object");
+ panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
}
- /* are we using a hash table? if so, add it in the hash. */
+ /*
+ * are we using a hash table? if so, add it in the hash.
+ */
if (aobj->u_pages > UAO_SWHASH_THRESHOLD) {
/*
* Avoid allocating an entry just to free it again if
* now adjust the elt's reference counter and free it if we've
* dropped it to zero.
*/
- /* an allocation? */
if (slot) {
if (oldslot == 0)
elt->count++;
- } else { /* freeing slot ... */
- if (oldslot) /* to be safe */
+ } else {
+ if (oldslot)
elt->count--;
if (elt->count == 0) {
pool_put(&uao_swhash_elt_pool, elt);
}
}
- } else {
+ } else {
/* we are using an array */
oldslot = aobj->u_swslots[pageidx];
aobj->u_swslots[pageidx] = slot;
} else {
int i;
- /* free the array */
+ /*
+ * free the array
+ */
for (i = 0; i < aobj->u_pages; i++) {
int slot = aobj->u_swslots[i];
if (slot) {
uvm_swap_free(slot, 1);
+
/* this page is no longer only in swap. */
atomic_dec_int(&uvmexp.swpgonly);
}
free(aobj->u_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int));
}
- /* finally free the aobj itself */
+ /*
+ * finally free the aobj itself
+ */
pool_put(&uvm_aobj_pool, aobj);
}
struct uvm_object *
uao_create(vsize_t size, int flags)
{
- static struct uvm_aobj kernel_object_store; /* home of kernel_object */
- static int kobj_alloced = 0; /* not allocated yet */
+ static struct uvm_aobj kernel_object_store;
+ static int kobj_alloced = 0;
int pages = round_page(size) >> PAGE_SHIFT;
int refs = UVM_OBJ_KERN;
int mflags;
struct uvm_aobj *aobj;
- /* malloc a new aobj unless we are asked for the kernel object */
- if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
+ /*
+ * Allocate a new aobj, unless kernel object is requested.
+ */
+ if (flags & UAO_FLAG_KERNOBJ) {
if (kobj_alloced)
panic("uao_create: kernel object already allocated");
aobj = &kernel_object_store;
aobj->u_pages = pages;
- aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */
- /* we are special, we never die */
+ aobj->u_flags = UAO_FLAG_NOSWAP;
kobj_alloced = UAO_FLAG_KERNOBJ;
} else if (flags & UAO_FLAG_KERNSWAP) {
aobj = &kernel_object_store;
if (kobj_alloced != UAO_FLAG_KERNOBJ)
panic("uao_create: asked to enable swap on kernel object");
kobj_alloced = UAO_FLAG_KERNSWAP;
- } else { /* normal object */
+ } else {
aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
aobj->u_pages = pages;
- aobj->u_flags = 0; /* normal object */
- refs = 1; /* normal object so 1 ref */
+ aobj->u_flags = 0;
+ refs = 1;
}
- /* allocate hash/array if necessary */
+ /*
+ * allocate hash/array if necessary
+ */
if (flags == 0 || (flags & (UAO_FLAG_KERNSWAP | UAO_FLAG_CANFAIL))) {
if (flags)
mflags = M_NOWAIT;
}
}
+ /*
+ * Initialise UVM object.
+ */
uvm_objinit(&aobj->u_obj, &aobj_pager, refs);
- /* now that aobj is ready, add it to the global list */
+ /*
+ * now that aobj is ready, add it to the global list
+ */
mtx_enter(&uao_list_lock);
LIST_INSERT_HEAD(&uao_list, aobj, u_list);
mtx_leave(&uao_list_lock);
}
/*
- * uao_reference: add a ref to an aobj
+ * uao_reference: hold a reference to an anonymous UVM object.
*/
void
uao_reference(struct uvm_object *uobj)
uao_reference_locked(uobj);
}
-/*
- * uao_reference_locked: add a ref to an aobj
- */
void
uao_reference_locked(struct uvm_object *uobj)
{
- /* kernel_object already has plenty of references, leave it alone. */
+ /* Kernel object is persistent. */
if (UVM_OBJ_IS_KERN_OBJECT(uobj))
return;
- uobj->uo_refs++; /* bump! */
+ uobj->uo_refs++;
}
/*
- * uao_detach: drop a reference to an aobj
+ * uao_detach: drop a reference to an anonymous UVM object.
*/
void
uao_detach(struct uvm_object *uobj)
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct vm_page *pg;
- /* detaching from kernel_object is a noop. */
+ /*
+ * Detaching from kernel_object is a NOP.
+ */
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
return;
}
- uobj->uo_refs--; /* drop ref! */
- if (uobj->uo_refs) { /* still more refs? */
+ /*
+ * Drop the reference. If it was the last one, destroy the object.
+ */
+ uobj->uo_refs--;
+ if (uobj->uo_refs) {
return;
}
- /* remove the aobj from the global list. */
+ /*
+ * Remove the aobj from the global list.
+ */
mtx_enter(&uao_list_lock);
LIST_REMOVE(aobj, u_list);
mtx_leave(&uao_list_lock);
/*
- * Free all pages left in the object. If they're busy, wait
- * for them to become available before we kill it.
- * Release swap resources then free the page.
- */
+ * Free all the pages left in the aobj. For each page, when the
+ * page is no longer busy (and thus after any disk I/O that it is
+ * involved in is complete), release any swap resources and free
+ * the page itself.
+ */
uvm_lock_pageq();
while((pg = RBT_ROOT(uvm_objtree, &uobj->memt)) != NULL) {
if (pg->pg_flags & PG_BUSY) {
}
uvm_unlock_pageq();
- /* finally, free the rest. */
+ /*
+ * Finally, free the anonymous UVM object itself.
+ */
uao_free(aobj);
}
/*
- * uao_flush: "flush" pages out of a uvm object
+ * uao_flush: flush pages out of a uvm object
*
* => if PGO_CLEANIT is not set, then we will not block.
* => if PGO_ALLPAGE is set, then all pages in the object are valid targets
/* FALLTHROUGH */
case PGO_DEACTIVATE:
deactivate_it:
- /* skip the page if it's wired */
if (pp->wire_count != 0)
continue;
uvm_lock_pageq();
- /* zap all mappings for the page. */
pmap_page_protect(pp, PROT_NONE);
-
- /* ...and deactivate the page. */
uvm_pagedeactivate(pp);
uvm_unlock_pageq();
if (pp->wire_count != 0)
continue;
- /* zap all mappings for the page. */
+ /*
+ * free the swap slot and the page.
+ */
pmap_page_protect(pp, PROT_NONE);
+ /*
+ * freeing swapslot here is not strictly necessary.
+ * however, leaving it here doesn't save much
+ * because we need to update swap accounting anyway.
+ */
uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
uvm_lock_pageq();
uvm_pagefree(pp);
KERNEL_ASSERT_LOCKED();
- /* get number of pages */
+ /*
+ * get number of pages
+ */
maxpages = *npagesp;
- /* step 1: handled the case where fault data structures are locked. */
if (flags & PGO_LOCKED) {
- /* step 1a: get pages that are already resident. */
+ /*
+ * step 1a: get pages that are already resident. only do
+ * this if the data structures are locked (i.e. the first
+ * time through).
+ */
done = TRUE; /* be optimistic */
gotpages = 0; /* # of pages we got so far */
}
}
- /* to be useful must get a non-busy page */
+ /*
+ * to be useful must get a non-busy page
+ */
if (ptmp == NULL ||
(ptmp->pg_flags & PG_BUSY) != 0) {
if (lcv == centeridx ||
}
/*
- * useful page: busy it and plug it in our
- * result array
+ * useful page: plug it in our result array
*/
- /* caller must un-busy this page */
atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
UVM_PAGE_OWN(ptmp, "uao_get1");
pps[lcv] = ptmp;
/* out of RAM? */
if (ptmp == NULL) {
uvm_wait("uao_getpage");
- /* goto top of pps while loop */
- continue;
+ continue;
}
/*
tsleep_nsec(ptmp, PVM, "uao_get", INFSLP);
continue; /* goto top of pps while loop */
}
-
- /*
- * if we get here then the page has become resident and
- * unbusy between steps 1 and 2. we busy it now (so we
- * own it) and set pps[lcv] (so that we exit the while
- * loop).
+
+ /*
+ * if we get here then the page is resident and
+ * unbusy. we busy it now (so we own it).
*/
/* we own it, caller must un-busy */
atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
/* page hasn't existed before, just zero it. */
uvm_pagezero(ptmp);
} else {
- /* page in the swapped-out page. */
+ /*
+ * page in the swapped-out page.
+ */
rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
- /* I/O done. check for errors. */
+ /*
+ * I/O done. check for errors.
+ */
if (rv != VM_PAGER_OK) {
/*
* remove the swap slot from the aobj
}
}
- /*
+ /*
* we got the page! clear the fake flag (indicates valid
* data now in page) and plug into our result array. note
- * that page is still busy.
+ * that page is still busy.
*
* it is the callers job to:
* => check if the page is released
* => unbusy the page
* => activate the page
*/
-
- /* data is valid ... */
atomic_clearbits_int(&ptmp->pg_flags, PG_FAKE);
pmap_clear_modify(ptmp); /* ... and clean */
pps[lcv] = ptmp;
{
struct uvm_aobj *aobj, *nextaobj, *prevaobj = NULL;
- /* walk the list of all aobjs. */
+ /*
+ * Walk the list of all anonymous UVM objects.
+ */
mtx_enter(&uao_list_lock);
for (aobj = LIST_FIRST(&uao_list);
prevaobj = aobj;
}
- /* done with traversal, unlock the list */
+ /*
+ * done with traversal, unlock the list
+ */
mtx_leave(&uao_list_lock);
if (prevaobj) {
uao_detach_locked(&prevaobj->u_obj);
for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
int slot = elt->slots[i];
- /* if slot isn't in range, skip it. */
- if (slot < startslot ||
+ /*
+ * if the slot isn't in range, skip it.
+ */
+ if (slot < startslot ||
slot >= endslot) {
continue;
}
for (i = 0; i < aobj->u_pages; i++) {
int slot = aobj->u_swslots[i];
- /* if the slot isn't in range, skip it */
+ /*
+ * if the slot isn't in range, skip it
+ */
if (slot < startslot || slot >= endslot) {
continue;
}
- /* process the page. */
+ /*
+ * process the page.
+ */
rv = uao_pagein_page(aobj, i);
if (rv) {
return rv;
}
/*
- * page in a page from an aobj. used for swap_off.
- * returns TRUE if pagein was aborted due to lack of memory.
+ * uao_pagein_page: page in a single page from an anonymous UVM object.
+ *
+ * => Returns TRUE if pagein was aborted due to lack of memory.
*/
static boolean_t
uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
- /* deactivate the page (to put it on a page queue). */
+ /*
+ * deactivate the page (to put it on a page queue).
+ */
pmap_clear_reference(pg);
uvm_lock_pageq();
uvm_pagedeactivate(pg);
-/* $OpenBSD: uvm_device.c,v 1.60 2020/11/06 11:52:39 mpi Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.61 2021/03/20 10:24:21 mpi Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
.pgo_flush = udv_flush,
};
+/*
+ * the ops!
+ */
+
+
/*
* udv_attach
*
struct uvm_object *obj;
#endif
- /* before we do anything, ensure this device supports mmap */
+ /*
+ * before we do anything, ensure this device supports mmap
+ */
mapfn = cdevsw[major(device)].d_mmap;
if (mapfn == NULL ||
mapfn == (paddr_t (*)(dev_t, off_t, int)) enodev ||
mapfn == (paddr_t (*)(dev_t, off_t, int)) nullop)
return(NULL);
- /* Negative offsets on the object are not allowed. */
+ /*
+ * Negative offsets on the object are not allowed.
+ */
if (off < 0)
return(NULL);
off += PAGE_SIZE; size -= PAGE_SIZE;
}
- /* keep looping until we get it */
+ /*
+ * keep looping until we get it
+ */
for (;;) {
- /* first, attempt to find it on the main list */
+ /*
+ * first, attempt to find it on the main list
+ */
mtx_enter(&udv_lock);
LIST_FOREACH(lcv, &udv_list, u_list) {
if (device == lcv->u_device)
break;
}
- /* got it on main list. put a hold on it and unlock udv_lock. */
+ /*
+ * got it on main list. put a hold on it and unlock udv_lock.
+ */
if (lcv) {
/*
* if someone else has a hold on it, sleep and start
lcv->u_flags |= UVM_DEVICE_HOLD;
mtx_leave(&udv_lock);
- /* bump reference count, unhold, return. */
+ /*
+ * bump reference count, unhold, return.
+ */
lcv->u_obj.uo_refs++;
mtx_enter(&udv_lock);
return(&lcv->u_obj);
}
- /* did not find it on main list. need to malloc a new one. */
+ /*
+ * Did not find it on main list. Need to allocate a new one.
+ */
mtx_leave(&udv_lock);
/* NOTE: we could sleep in the following malloc() */
udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK);
KERNEL_ASSERT_LOCKED();
- /* loop until done */
+ /*
+ * loop until done
+ */
again:
if (uobj->uo_refs > 1) {
uobj->uo_refs--;
}
KASSERT(uobj->uo_npages == 0 && RBT_EMPTY(uvm_objtree, &uobj->memt));
- /* is it being held? if so, wait until others are done. */
+ /*
+ * is it being held? if so, wait until others are done.
+ */
mtx_enter(&udv_lock);
if (udv->u_flags & UVM_DEVICE_HOLD) {
udv->u_flags |= UVM_DEVICE_WANTED;
goto again;
}
- /* got it! nuke it now. */
+ /*
+ * got it! nuke it now.
+ */
LIST_REMOVE(udv, u_list);
if (udv->u_flags & UVM_DEVICE_WANTED)
wakeup(udv);
return(VM_PAGER_ERROR);
}
- /* get device map function. */
+ /*
+ * get device map function.
+ */
device = udv->u_device;
mapfn = cdevsw[major(device)].d_mmap;
/* pmap va = vaddr (virtual address of pps[0]) */
curr_va = vaddr;
- /* loop over the page range entering in as needed */
+ /*
+ * loop over the page range entering in as needed
+ */
retval = VM_PAGER_OK;
for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
curr_va += PAGE_SIZE) {
-/* $OpenBSD: uvm_fault.c,v 1.118 2021/03/12 14:15:49 jsg Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.119 2021/03/20 10:24:21 mpi Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
uvmfault_amapcopy(struct uvm_faultinfo *ufi)
{
for (;;) {
- /* no mapping? give up. */
+ /*
+ * no mapping? give up.
+ */
if (uvmfault_lookup(ufi, TRUE) == FALSE)
return;
- /* copy if needed. */
+ /*
+ * copy if needed.
+ */
if (UVM_ET_ISNEEDSCOPY(ufi->entry))
amap_copy(ufi->map, ufi->entry, M_NOWAIT,
UVM_ET_ISSTACK(ufi->entry) ? FALSE : TRUE,
ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
- /* didn't work? must be out of RAM. sleep. */
+ /*
+ * didn't work? must be out of RAM. unlock and sleep.
+ */
if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
uvmfault_unlockmaps(ufi, TRUE);
uvm_wait("fltamapcopy");
continue;
}
- /* got it! */
+ /*
+ * got it! unlock and return.
+ */
uvmfault_unlockmaps(ufi, TRUE);
return;
}
uvmfault_unlockall(ufi, amap, NULL);
/*
- * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
- * page into the uvm_swap_get function with
- * all data structures unlocked. note that
- * it is ok to read an_swslot here because
- * we hold PG_BUSY on the page.
+ * Pass a PG_BUSY+PG_FAKE+PG_CLEAN page into
+ * the uvm_swap_get() function with all data
+ * structures unlocked. Note that it is OK
+ * to read an_swslot here, because we hold
+ * PG_BUSY on the page.
*/
counters_inc(uvmexp_counters, pageins);
error = uvm_swap_get(pg, anon->an_swslot,
if (pg->pg_flags & PG_RELEASED) {
pmap_page_protect(pg, PROT_NONE);
KASSERT(anon->an_ref == 0);
+ /*
+ * Released while we had unlocked amap.
+ */
if (locked)
uvmfault_unlockall(ufi, NULL, NULL);
uvm_anon_release(anon); /* frees page for us */
/*
* Note: page was never !PG_BUSY, so it
* cannot be mapped and thus no need to
- * pmap_page_protect it...
+ * pmap_page_protect() it.
*/
uvm_lock_pageq();
uvm_pagefree(pg);
}
/*
- * must be OK, clear modify (already PG_CLEAN)
- * and activate
+ * We have successfully read the page, activate it.
*/
pmap_clear_modify(pg);
uvm_lock_pageq();
if (amap)
uvmfault_anonflush(*ranons, nback);
- /* flush object? */
+ /*
+ * flush object?
+ */
if (uobj) {
voff_t uoff;
}
/*
- * unmapped or center page. check if any anon at this level.
+ * unmapped or center page. check if any anon at this level.
*/
if (amap == NULL || anons[lcv] == NULL) {
pages[lcv] = NULL;
struct vm_page *pg = NULL;
int error, ret;
+ /* locked: maps(read), amap, anon */
KASSERT(rw_write_held(amap->am_lock));
KASSERT(anon->an_lock == amap->am_lock);
return ERESTART;
}
- /* ... update the page queues. */
+ /*
+ * ... update the page queues.
+ */
uvm_lock_pageq();
if (fault_type == VM_FAULT_WIRE) {
* (PGO_LOCKED).
*/
if (uobj == NULL) {
+ /* zero fill; don't care neighbor pages */
uobjpage = NULL;
} else {
uobjpage = uvm_fault_lower_lookup(ufi, flt, pages);
0, flt->access_type & MASK(ufi->entry), ufi->entry->advice,
PGO_SYNCIO);
- /* recover from I/O */
+ /*
+ * recover from I/O
+ */
if (result != VM_PAGER_OK) {
KASSERT(result != VM_PAGER_PEND);
* out of memory resources?
*/
if (anon == NULL || pg == NULL) {
- /* arg! must unbusy our page and fail or sleep. */
+ /*
+ * arg! must unbusy our page and fail or sleep.
+ */
if (uobjpage != PGO_DONTCARE) {
uvm_lock_pageq();
uvm_pageactivate(uobjpage);
return ERESTART;
}
- /* fill in the data */
+ /*
+ * fill in the data
+ */
if (uobjpage != PGO_DONTCARE) {
counters_inc(uvmexp_counters, flt_prcopy);
/* copy page [pg now dirty] */
* the PAs from the pmap. we also lock out the page daemon so that
* we can call uvm_pageunwire.
*/
+
uvm_lock_pageq();
- /* find the beginning map entry for the region. */
+ /*
+ * find the beginning map entry for the region.
+ */
KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
panic("uvm_fault_unwire_locked: address not in map");
if (pmap_extract(pmap, va, &pa) == FALSE)
continue;
- /* find the map entry for the current address. */
+ /*
+ * find the map entry for the current address.
+ */
KASSERT(va >= entry->start);
while (va >= entry->end) {
next = RBT_NEXT(uvm_map_addr, entry);
entry = next;
}
- /* if the entry is no longer wired, tell the pmap. */
+ /*
+ * if the entry is no longer wired, tell the pmap.
+ */
if (VM_MAPENT_ISWIRED(entry) == 0)
pmap_unwire(pmap, va);
{
vm_map_t tmpmap;
- /* init ufi values for lookup. */
+ /*
+ * init ufi values for lookup.
+ */
ufi->map = ufi->orig_map;
ufi->size = ufi->orig_size;
continue;
}
- /* got it! */
+ /*
+ * got it!
+ */
ufi->mapv = ufi->map->timestamp;
return(TRUE);
- }
+ } /* while loop */
+
/*NOTREACHED*/
}
-/* $OpenBSD: uvm_init.c,v 1.41 2020/12/28 14:01:23 mpi Exp $ */
+/* $OpenBSD: uvm_init.c,v 1.42 2021/03/20 10:24:21 mpi Exp $ */
/* $NetBSD: uvm_init.c,v 1.14 2000/06/27 17:29:23 mrg Exp $ */
/*
/*
* uvm_init: init the VM system. called from kern/init_main.c.
*/
+
void
uvm_init(void)
{
vaddr_t kvm_start, kvm_end;
- /* step 0: ensure that the hardware set the page size */
+ /*
+ * Ensure that the hardware set the page size.
+ */
if (uvmexp.pagesize == 0) {
panic("uvm_init: page size not set");
}
-
- /* step 1: set up stats. */
averunnable.fscale = FSCALE;
/*
- * step 2: init the page sub-system. this includes allocating the
- * vm_page structures, and setting up all the page queues (and
- * locks). available memory will be put in the "free" queue.
- * kvm_start and kvm_end will be set to the area of kernel virtual
- * memory which is available for general use.
+ * Init the page sub-system. This includes allocating the vm_page
+ * structures, and setting up all the page queues (and locks).
+ * Available memory will be put in the "free" queue, kvm_start and
+ * kvm_end will be set to the area of kernel virtual memory which
+ * is available for general use.
*/
uvm_page_init(&kvm_start, &kvm_end);
/*
- * step 3: init the map sub-system. allocates the static pool of
- * vm_map_entry structures that are used for "special" kernel maps
- * (e.g. kernel_map, kmem_map, etc...).
+ * Init the map sub-system.
+ *
+ * Allocates the static pool of vm_map_entry structures that are
+ * used for "special" kernel maps (e.g. kernel_map, kmem_map, etc...).
*/
uvm_map_init();
/*
- * step 4: setup the kernel's virtual memory data structures. this
- * includes setting up the kernel_map/kernel_object and the kmem_map/
- * kmem_object.
+ * Setup the kernel's virtual memory data structures. This includes
+ * setting up the kernel_map/kernel_object.
*/
-
uvm_km_init(vm_min_kernel_address, kvm_start, kvm_end);
/*
uvmfault_init();
/*
- * step 5: init the pmap module. the pmap module is free to allocate
+ * Init the pmap module. The pmap module is free to allocate
* memory for its private use (e.g. pvlists).
*/
pmap_init();
uvm_km_page_init();
/*
- * step 7: init the kernel memory allocator. after this call the
- * kernel memory allocator (malloc) can be used.
+ * Make kernel memory allocators ready for use.
+ * After this call the malloc memory allocator can be used.
*/
kmeminit();
dma_alloc_init();
/*
- * step 8: init all pagers and the pager_map.
+ * Init all pagers and the pager_map.
*/
uvm_pager_init();
panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF1);
#endif
/*
- * init anonymous memory systems
+ * Init anonymous memory systems.
*/
uvm_anon_init();
-/* $OpenBSD: uvm_io.c,v 1.26 2016/01/09 11:34:27 kettenis Exp $ */
+/* $OpenBSD: uvm_io.c,v 1.27 2021/03/20 10:24:21 mpi Exp $ */
/* $NetBSD: uvm_io.c,v 1.12 2000/06/27 17:29:23 mrg Exp $ */
/*
if (flags & UVM_IO_FIXPROT)
extractflags |= UVM_EXTRACT_FIXPROT;
- /* step 1: main loop... while we've got data to move */
+ /*
+ * step 1: main loop... while we've got data to move
+ */
for (/*null*/; togo > 0 ; pageoffset = 0) {
- /* step 2: extract mappings from the map into kernel_map */
+
+ /*
+ * step 2: extract mappings from the map into kernel_map
+ */
error = uvm_map_extract(map, baseva, chunksz, &kva,
extractflags);
if (error) {
break;
}
- /* step 3: move a chunk of data */
+ /*
+ * step 3: move a chunk of data
+ */
sz = chunksz - pageoffset;
if (sz > togo)
sz = togo;
togo -= sz;
baseva += chunksz;
- /* step 4: unmap the area of kernel memory */
+
+ /*
+ * step 4: unmap the area of kernel memory
+ */
vm_map_lock(kernel_map);
TAILQ_INIT(&dead_entries);
uvm_unmap_remove(kernel_map, kva, kva+chunksz,
vm_map_unlock(kernel_map);
uvm_unmap_detach(&dead_entries, AMAP_REFALL);
- /*
- * We defer checking the error return from uiomove until
- * here so that we won't leak memory.
- */
if (error)
break;
}
-/* $OpenBSD: uvm_km.c,v 1.141 2021/03/12 14:15:49 jsg Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.142 2021/03/20 10:24:21 mpi Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
uvm_map_setup(submap, vm_map_pmap(map), *min, *max, flags);
}
- /* now let uvm_map_submap plug in it... */
+ /*
+ * now let uvm_map_submap plug in it...
+ */
if (uvm_map_submap(map, *min, *max, submap) != 0)
panic("uvm_km_suballoc: submap allocation failed");
size = round_page(size);
kva = vm_map_min(map); /* hint */
- /* allocate some virtual space, demand filled by kernel_object. */
-
+ /*
+ * allocate some virtual space. will be demand filled by kernel_object.
+ */
if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
UVM_UNKNOWN_OFFSET, align,
UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,