Extend use of vm_object_prefer() to vm_allocate_with_pager().
Make vm_object_prefer() call MD aligner for "pageless" objects too,
so we can have more control over the virtual address to be used.
Implementation could be simpler if we by-pass the object to mapped, but
we'd loose the ability to adapt alignment to objects that were previously
mmap'ed with MAP_FIXED on.
Only expect vm_fork() to return if __FORK_BRAINDAMAGE is defined.
Eliminate unused third arg to vm_fork().
-/* $NetBSD: vm_extern.h,v 1.14 1995/09/27 20:30:17 thorpej Exp $ */
+/* $NetBSD: vm_extern.h,v 1.15 1995/12/09 04:28:16 mycroft Exp $ */
/*-
* Copyright (c) 1992, 1993
vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
-int vm_fork __P((struct proc *, struct proc *, int));
+#ifdef __FORK_BRAINDAMAGE
+int vm_fork __P((struct proc *, struct proc *));
+#else
+void vm_fork __P((struct proc *, struct proc *));
+#endif
int vm_inherit __P((vm_map_t,
vm_offset_t, vm_size_t, vm_inherit_t));
void vm_init_limits __P((struct proc *));
-/* $NetBSD: vm_glue.c,v 1.46 1995/05/05 03:35:39 cgd Exp $ */
+/* $NetBSD: vm_glue.c,v 1.48 1995/12/09 04:28:19 mycroft Exp $ */
/*
* Copyright (c) 1991, 1993
* after cpu_fork returns in the child process. We do nothing here
* after cpu_fork returns.
*/
+#ifdef __FORK_BRAINDAMAGE
int
-vm_fork(p1, p2, isvfork)
+#else
+void
+#endif
+vm_fork(p1, p2)
register struct proc *p1, *p2;
- int isvfork;
{
register struct user *up;
vm_offset_t addr;
#ifdef SYSVSHM
if (p1->p_vmspace->vm_shm)
- shmfork(p1, p2, isvfork);
+ shmfork(p1, p2);
#endif
#if !defined(i386) && !defined(pc532)
/*
* Allocate a wired-down (for now) pcb and kernel stack for the process
*/
-#ifdef pica
- addr = kmem_alloc_upage(kernel_map, USPACE);
-#else
addr = kmem_alloc_pageable(kernel_map, USPACE);
-#endif
if (addr == 0)
panic("vm_fork: no more kernel virtual memory");
vm_map_pageable(kernel_map, addr, addr + USPACE, FALSE);
(void)vm_map_inherit(vp, addr, VM_MAX_ADDRESS, VM_INHERIT_NONE);
}
#endif
+
+#ifdef __FORK_BRAINDAMAGE
/*
* cpu_fork will copy and update the kernel stack and pcb,
* and make the child ready to run. It marks the child
* once in the child.
*/
return (cpu_fork(p1, p2));
+#else
+ /*
+ * cpu_fork will copy and update the kernel stack and pcb,
+ * and make the child ready to run. The child will exit
+ * directly to user mode on its first time slice, and will
+ * not return here.
+ */
+ cpu_fork(p1, p2);
+#endif
}
/*
cpu_swapin(p);
s = splstatclock();
if (p->p_stat == SRUN)
- setrunqueue(p);
+ setrunqueue(p);
p->p_flag |= P_INMEM;
splx(s);
p->p_swtime = 0;
printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
p->p_pid, p->p_comm, p->p_addr,
ppri, cnt.v_free_count);
-#endif
-#ifdef pica
- vm_map_pageable(kernel_map, (vm_offset_t)p->p_addr,
- (vm_offset_t)p->p_addr + atop(USPACE), FALSE);
#endif
swapin(p);
goto loop;
-/* $NetBSD: vm_mmap.c,v 1.42 1995/10/10 01:27:11 mycroft Exp $ */
+/* $NetBSD: vm_mmap.c,v 1.43 1995/12/05 22:54:42 pk Exp $ */
/*
* Copyright (c) 1988 University of Utah.
vm_offset_t off;
/* locate and allocate the target address space */
+ vm_map_lock(map);
if (fitit) {
/*
- * We cannot call vm_map_find() because
- * a proposed address may be vetoed by
- * the pmap module.
- * So we look for space ourselves, validate
- * it and insert it into the map.
+ * Find space in the map at a location
+ * that is compatible with the object/offset
+ * we're going to attach there.
*/
- vm_map_lock(map);
again:
if (vm_map_findspace(map, *addr, size,
addr) == 1) {
} else {
vm_object_prefer(object, foff, addr);
rv = vm_map_insert(map, NULL,
- (vm_offset_t)0,
- *addr, *addr+size);
+ (vm_offset_t)0,
+ *addr, *addr+size);
+ /*
+ * vm_map_insert() may fail if
+ * vm_object_prefer() has altered
+ * the initial address.
+ * If so, we start again.
+ */
if (rv == KERN_NO_SPACE)
- /*
- * Modified address didn't fit
- * after all, the gap must
- * have been to small.
- */
goto again;
}
- vm_map_unlock(map);
} else {
- rv = vm_map_find(map, NULL, (vm_offset_t)0,
- addr, size, 0);
+ rv = vm_map_insert(map, NULL, (vm_offset_t)0,
+ *addr, *addr + size);
+#ifdef DEBUG
/*
* Check against PMAP preferred address. If
* there's a mismatch, these pages should not
* be shared with others. <howto?>
*/
- if (rv == KERN_SUCCESS) {
+ if (rv == KERN_SUCCESS &&
+ (mmapdebug & MDB_MAPIT)) {
vm_offset_t paddr = *addr;
vm_object_prefer(object, foff, &paddr);
if (paddr != *addr)
- printf("vm_mmap: pmap botch!\n");
+ printf(
+ "vm_mmap: pmap botch! "
+ "[foff %x, addr %x, paddr %x]\n",
+ foff, *addr, paddr);
}
+#endif
}
+ vm_map_unlock(map);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
VM_MIN_ADDRESS+size, TRUE);
off = VM_MIN_ADDRESS;
rv = vm_allocate_with_pager(tmap, &off, size,
- TRUE, pager,
+ FALSE, pager,
foff, FALSE);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
-/* $NetBSD: vm_object.c,v 1.29 1995/07/13 12:35:29 pk Exp $ */
+/* $NetBSD: vm_object.c,v 1.31 1995/12/06 00:38:11 pk Exp $ */
/*
* Copyright (c) 1991, 1993
register vm_page_t p;
register vm_offset_t paddr;
+#ifdef PMAP_PREFER
if (object == NULL)
- return;
+ goto first_map;
-#ifdef PMAP_PREFER
- vm_object_lock(object);
/*
* Look for the first page that the pmap layer has something
* to say about. Since an object maps a contiguous range of
* virutal addresses, this will determine the preferred origin
* of the proposed mapping.
*/
+ vm_object_lock(object);
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
if (p->flags & (PG_FAKE | PG_FICTITIOUS))
continue;
if (paddr == (vm_offset_t)-1)
continue;
*addr = paddr - (p->offset - offset);
- break;
+ vm_object_unlock(object);
+ return;
}
vm_object_unlock(object);
+
+first_map:
+ /*
+ * No physical page attached; ask for a preferred address based
+ * only on the given virtual address.
+ */
+ paddr = PMAP_PREFER((vm_offset_t)-1, *addr);
+ if (paddr != (vm_offset_t)-1)
+ *addr = paddr;
+
#endif
}
/*
-/* $NetBSD: vm_user.c,v 1.11 1994/10/20 04:27:34 cgd Exp $ */
+/* $NetBSD: vm_user.c,v 1.12 1995/12/05 22:54:39 pk Exp $ */
/*
* Copyright (c) 1991, 1993
{
register vm_object_t object;
register int result;
+ vm_offset_t start;
if (map == NULL)
return(KERN_INVALID_ARGUMENT);
cnt.v_nzfod -= atop(size);
}
- result = vm_map_find(map, object, poffset, addr, size, anywhere);
+ start = *addr;
+ vm_map_lock(map);
+ if (anywhere) {
+ again:
+ if (vm_map_findspace(map, start, size, addr))
+ result = KERN_NO_SPACE;
+ else {
+ vm_object_prefer(object, poffset, addr);
+ start = *addr;
+ result = vm_map_insert(map, object, poffset,
+ start, start + size);
+ if (result == KERN_NO_SPACE)
+ goto again;
+ }
+ } else
+ result = vm_map_insert(map, object, poffset,
+ start, start + size);
+ vm_map_unlock(map);
+
if (result != KERN_SUCCESS)
vm_object_deallocate(object);
else if (pager != NULL)