Compare commits

...

2 Commits

Author SHA1 Message Date
8fcc6b0ea4 lab3.1: locks 2020-05-28 15:27:02 +02:00
1b82f81d8c mem stuff 2020-05-28 15:25:12 +02:00
6 changed files with 1035 additions and 70 deletions

View File

@ -55,6 +55,10 @@
* it's cutting (there are many) and why, and more importantly, how.
*/
/* under dumbvm, always have 72k of user stack */
/* (this must be > 64K so argument blocks of size ARG_MAX will fit) */
#define DUMBVM_STACKPAGES 18
/*
* Bitmap utils
@ -63,11 +67,6 @@
#define resetBit(A,k) ( A[(k/32)] &= ~(1 << (k%32)) )
#define checkBit(A,k) ( A[(k/32)] & (1 << (k%32)) )
/* under dumbvm, always have 72k of user stack */
/* (this must be > 64K so argument blocks of size ARG_MAX will fit) */
#define DUMBVM_STACKPAGES 18
/*
* Wrap ram_stealmem in a spinlock.
*/
@ -77,26 +76,35 @@ static struct spinlock freemem_lock = SPINLOCK_INITIALIZER;
static uint32_t *freeRamFrames = NULL;
static unsigned long *allocSize = NULL;
static int nRamFrames = 0;
static int allocTableActive = 0;
static int isTableActive () {
int active;
spinlock_acquire(&freemem_lock);
active = allocTableActive;
spinlock_release(&freemem_lock);
return active;
}
void
vm_bootstrap(void)
{
int i;
nRamFrames = ((int)ram_getsize())/PAGE_SIZE;
/* alloc freeRamFrame and allocSize */
int i;
nRamFrames = ((int)ram_getsize())/PAGE_SIZE;
/* alloc freeRamFrame and allocSize */
freeRamFrames = kmalloc(sizeof(uint32_t)*(nRamFrames/32 + 1));
if (freeRamFrames==NULL) return;
if (freeRamFrames==NULL) return;
allocSize = kmalloc(sizeof(unsigned long)*nRamFrames);
if (allocSize==NULL) {
if (allocSize==NULL) {
/* reset to disable this vm management */
freeRamFrames = NULL; return;
}
for (i=0; i<nRamFrames; i++) {
for (i=0; i<nRamFrames; i++) {
if (i<(nRamFrames/32 + 1)) {
freeRamFrames[i] = (uint32_t)0;
}
allocSize[i] = 0;
allocSize[i] = 0;
}
spinlock_acquire(&freemem_lock);
allocTableActive = 1;
@ -110,8 +118,7 @@ vm_bootstrap(void)
* avoid the situation where syscall-layer code that works ok with
* dumbvm starts blowing up during the VM assignment.
*/
static
void
static void
dumbvm_can_sleep(void)
{
if (CURCPU_EXISTS()) {
@ -123,16 +130,13 @@ dumbvm_can_sleep(void)
}
}
static paddr_t
static paddr_t
getfreeppages(unsigned long npages) {
paddr_t addr;
paddr_t addr;
long i, first, found, np = (long)npages;
if (!isTableActive()) return 0;
spinlock_acquire(&freemem_lock);
if (!allocTableActive) {
spinlock_release(&freemem_lock);
return 0;
}
for (i=0,first=found=-1; i<nRamFrames; i++) {
if (checkBit(freeRamFrames, i)) {
if (i==0 || !checkBit(freeRamFrames, i-1))
@ -143,7 +147,7 @@ getfreeppages(unsigned long npages) {
}
}
}
if (found>=0) {
for (i=found; i<found+np; i++) {
resetBit(freeRamFrames, i);
@ -160,28 +164,6 @@ getfreeppages(unsigned long npages) {
return addr;
}
static int
freeppages(paddr_t addr, unsigned long npages){
long i, first, np=(long)npages;
spinlock_acquire(&freemem_lock);
if (!allocTableActive) {
spinlock_release(&freemem_lock);
return 0;
}
first = addr/PAGE_SIZE;
KASSERT(allocSize!=NULL);
KASSERT(nRamFrames>first);
for (i=first; i<first+np; i++) {
setBit(freeRamFrames, i);
}
spinlock_release(&freemem_lock);
return 1;
}
static paddr_t
getppages(unsigned long npages)
{
@ -195,17 +177,31 @@ getppages(unsigned long npages)
addr = ram_stealmem(npages);
spinlock_release(&stealmem_lock);
}
if (addr!=0) {
if (addr!=0 && isTableActive()) {
spinlock_acquire(&freemem_lock);
if (!allocTableActive) {
spinlock_release(&freemem_lock);
return addr;
}
allocSize[addr/PAGE_SIZE] = npages;
allocSize[addr/PAGE_SIZE] = npages;
spinlock_release(&freemem_lock);
}
}
return addr;
return addr;
}
static int
freeppages(paddr_t addr, unsigned long npages){
long i, first, np=(long)npages;
if (!isTableActive()) return 0;
first = addr/PAGE_SIZE;
KASSERT(allocSize!=NULL);
KASSERT(nRamFrames>first);
spinlock_acquire(&freemem_lock);
for (i=first; i<first+np; i++) {
setBit(freeRamFrames, i);
}
spinlock_release(&freemem_lock);
return 1;
}
/* Allocate/free some kernel-space virtual pages */
@ -222,20 +218,15 @@ alloc_kpages(unsigned npages)
return PADDR_TO_KVADDR(pa);
}
void
free_kpages(vaddr_t addr)
{
spinlock_acquire(&freemem_lock);
if (!allocTableActive) {
spinlock_release(&freemem_lock);
return;
}
spinlock_release(&freemem_lock);
paddr_t paddr = addr - MIPS_KSEG0;
long first = paddr/PAGE_SIZE;
KASSERT(allocSize!=NULL);
KASSERT(nRamFrames>first);
freeppages(paddr, allocSize[first]);
void
free_kpages(vaddr_t addr){
if (isTableActive()) {
paddr_t paddr = addr - MIPS_KSEG0;
long first = paddr/PAGE_SIZE;
KASSERT(allocSize!=NULL);
KASSERT(nRamFrames>first);
freeppages(paddr, allocSize[first]);
}
}
void

View File

@ -0,0 +1,901 @@
/*
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
* The President and Fellows of Harvard College.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <types.h>
#include <kern/errno.h>
#include <lib.h>
#include <spl.h>
#include <cpu.h>
#include <spinlock.h>
#include <proc.h>
#include <current.h>
#include <mips/tlb.h>
#include <addrspace.h>
#include <vm.h>
/*
* Dumb MIPS-only "VM system" that is intended to only be just barely
* enough to struggle off the ground. You should replace all of this
* code while doing the VM assignment. In fact, starting in that
* assignment, this file is not included in your kernel!
*
* NOTE: it's been found over the years that students often begin on
* the VM assignment by copying dumbvm.c and trying to improve it.
* This is not recommended. dumbvm is (more or less intentionally) not
* a good design reference. The first recommendation would be: do not
* look at dumbvm at all. The second recommendation would be: if you
* do, be sure to review it from the perspective of comparing it to
* what a VM system is supposed to do, and understanding what corners
* it's cutting (there are many) and why, and more importantly, how.
*/
/* under dumbvm, always have 72k of user stack */
/* (this must be > 64K so argument blocks of size ARG_MAX will fit) */
#define DUMBVM_STACKPAGES 18
/* G.Cabodi: set DUMBVM_WITH_FREE
* - 0: original dumbvm
* - 1: support for alloc/free
*/
#define DUMBVM_WITH_FREE 1
/*
* Wrap ram_stealmem in a spinlock.
*/
static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
#if DUMBVM_WITH_FREE
/* G.Cabodi - support for free/alloc */
static struct spinlock freemem_lock = SPINLOCK_INITIALIZER;
static unsigned char *freeRamFrames = NULL;
static unsigned long *allocSize = NULL;
static int nRamFrames = 0;
static int allocTableActive = 0;
static int isTableActive () {
int active;
spinlock_acquire(&freemem_lock);
active = allocTableActive;
spinlock_release(&freemem_lock);
return active;
}
void
vm_bootstrap(void)
{
int i;
nRamFrames = ((int)ram_getsize())/PAGE_SIZE;
/* alloc freeRamFrame and allocSize */
freeRamFrames = kmalloc(sizeof(unsigned char)*nRamFrames);
if (freeRamFrames==NULL) return;
allocSize = kmalloc(sizeof(unsigned long)*nRamFrames);
if (allocSize==NULL) {
/* reset to disable this vm management */
freeRamFrames = NULL; return;
}
for (i=0; i<nRamFrames; i++) {
freeRamFrames[i] = (unsigned char)0;
allocSize[i] = 0;
}
spinlock_acquire(&freemem_lock);
allocTableActive = 1;
spinlock_release(&freemem_lock);
}
/*
* Check if we're in a context that can sleep. While most of the
* operations in dumbvm don't in fact sleep, in a real VM system many
* of them would. In those, assert that sleeping is ok. This helps
* avoid the situation where syscall-layer code that works ok with
* dumbvm starts blowing up during the VM assignment.
*/
static void
dumbvm_can_sleep(void)
{
if (CURCPU_EXISTS()) {
/* must not hold spinlocks */
KASSERT(curcpu->c_spinlocks == 0);
/* must not be in an interrupt handler */
KASSERT(curthread->t_in_interrupt == 0);
}
}
static paddr_t
getfreeppages(unsigned long npages) {
paddr_t addr;
long i, first, found, np = (long)npages;
if (!isTableActive()) return 0;
spinlock_acquire(&freemem_lock);
for (i=0,first=found=-1; i<nRamFrames; i++) {
if (freeRamFrames[i]) {
if (i==0 || !freeRamFrames[i-1])
first = i; /* set first free in an interval */
if (i-first+1 >= np) {
found = first;
break;
}
}
}
if (found>=0) {
for (i=found; i<found+np; i++) {
freeRamFrames[i] = (unsigned char)0;
}
allocSize[found] = np;
addr = (paddr_t) found*PAGE_SIZE;
}
else {
addr = 0;
}
spinlock_release(&freemem_lock);
return addr;
}
static paddr_t
getppages(unsigned long npages)
{
paddr_t addr;
/* try freed pages first */
addr = getfreeppages(npages);
if (addr == 0) {
/* call stealmem */
spinlock_acquire(&stealmem_lock);
addr = ram_stealmem(npages);
spinlock_release(&stealmem_lock);
}
if (addr!=0 && isTableActive()) {
spinlock_acquire(&freemem_lock);
allocSize[addr/PAGE_SIZE] = npages;
spinlock_release(&freemem_lock);
}
return addr;
}
static int
freeppages(paddr_t addr, unsigned long npages){
long i, first, np=(long)npages;
if (!isTableActive()) return 0;
first = addr/PAGE_SIZE;
KASSERT(allocSize!=NULL);
KASSERT(nRamFrames>first);
spinlock_acquire(&freemem_lock);
for (i=first; i<first+np; i++) {
freeRamFrames[i] = (unsigned char)1;
}
spinlock_release(&freemem_lock);
return 1;
}
/* Allocate/free some kernel-space virtual pages */
vaddr_t
alloc_kpages(unsigned npages)
{
paddr_t pa;
dumbvm_can_sleep();
pa = getppages(npages);
if (pa==0) {
return 0;
}
return PADDR_TO_KVADDR(pa);
}
void
free_kpages(vaddr_t addr){
if (isTableActive()) {
paddr_t paddr = addr - MIPS_KSEG0;
long first = paddr/PAGE_SIZE;
KASSERT(allocSize!=NULL);
KASSERT(nRamFrames>first);
freeppages(paddr, allocSize[first]);
}
}
void
vm_tlbshootdown(const struct tlbshootdown *ts)
{
(void)ts;
panic("dumbvm tried to do tlb shootdown?!\n");
}
int
vm_fault(int faulttype, vaddr_t faultaddress)
{
vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
paddr_t paddr;
int i;
uint32_t ehi, elo;
struct addrspace *as;
int spl;
faultaddress &= PAGE_FRAME;
DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
switch (faulttype) {
case VM_FAULT_READONLY:
/* We always create pages read-write, so we can't get this */
panic("dumbvm: got VM_FAULT_READONLY\n");
case VM_FAULT_READ:
case VM_FAULT_WRITE:
break;
default:
return EINVAL;
}
if (curproc == NULL) {
/*
* No process. This is probably a kernel fault early
* in boot. Return EFAULT so as to panic instead of
* getting into an infinite faulting loop.
*/
return EFAULT;
}
as = proc_getas();
if (as == NULL) {
/*
* No address space set up. This is probably also a
* kernel fault early in boot.
*/
return EFAULT;
}
/* Assert that the address space has been set up properly. */
KASSERT(as->as_vbase1 != 0);
KASSERT(as->as_pbase1 != 0);
KASSERT(as->as_npages1 != 0);
KASSERT(as->as_vbase2 != 0);
KASSERT(as->as_pbase2 != 0);
KASSERT(as->as_npages2 != 0);
KASSERT(as->as_stackpbase != 0);
KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
vbase1 = as->as_vbase1;
vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
vbase2 = as->as_vbase2;
vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
stacktop = USERSTACK;
if (faultaddress >= vbase1 && faultaddress < vtop1) {
paddr = (faultaddress - vbase1) + as->as_pbase1;
}
else if (faultaddress >= vbase2 && faultaddress < vtop2) {
paddr = (faultaddress - vbase2) + as->as_pbase2;
}
else if (faultaddress >= stackbase && faultaddress < stacktop) {
paddr = (faultaddress - stackbase) + as->as_stackpbase;
}
else {
return EFAULT;
}
/* make sure it's page-aligned */
KASSERT((paddr & PAGE_FRAME) == paddr);
/* Disable interrupts on this CPU while frobbing the TLB. */
spl = splhigh();
for (i=0; i<NUM_TLB; i++) {
tlb_read(&ehi, &elo, i);
if (elo & TLBLO_VALID) {
continue;
}
ehi = faultaddress;
elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
tlb_write(ehi, elo, i);
splx(spl);
return 0;
}
kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
splx(spl);
return EFAULT;
}
struct addrspace *
as_create(void)
{
struct addrspace *as = kmalloc(sizeof(struct addrspace));
if (as==NULL) {
return NULL;
}
as->as_vbase1 = 0;
as->as_pbase1 = 0;
as->as_npages1 = 0;
as->as_vbase2 = 0;
as->as_pbase2 = 0;
as->as_npages2 = 0;
as->as_stackpbase = 0;
return as;
}
void as_destroy(struct addrspace *as){
dumbvm_can_sleep();
freeppages(as->as_pbase1, as->as_npages1);
freeppages(as->as_pbase2, as->as_npages2);
freeppages(as->as_stackpbase, DUMBVM_STACKPAGES);
kfree(as);
}
void
as_activate(void)
{
int i, spl;
struct addrspace *as;
as = proc_getas();
if (as == NULL) {
return;
}
/* Disable interrupts on this CPU while frobbing the TLB. */
spl = splhigh();
for (i=0; i<NUM_TLB; i++) {
tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
}
splx(spl);
}
void
as_deactivate(void)
{
/* nothing */
}
int
as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
int readable, int writeable, int executable)
{
size_t npages;
dumbvm_can_sleep();
/* Align the region. First, the base... */
sz += vaddr & ~(vaddr_t)PAGE_FRAME;
vaddr &= PAGE_FRAME;
/* ...and now the length. */
sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
npages = sz / PAGE_SIZE;
/* We don't use these - all pages are read-write */
(void)readable;
(void)writeable;
(void)executable;
if (as->as_vbase1 == 0) {
as->as_vbase1 = vaddr;
as->as_npages1 = npages;
return 0;
}
if (as->as_vbase2 == 0) {
as->as_vbase2 = vaddr;
as->as_npages2 = npages;
return 0;
}
/*
* Support for more than two regions is not available.
*/
kprintf("dumbvm: Warning: too many regions\n");
return ENOSYS;
}
static
void
as_zero_region(paddr_t paddr, unsigned npages)
{
bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
}
int
as_prepare_load(struct addrspace *as)
{
KASSERT(as->as_pbase1 == 0);
KASSERT(as->as_pbase2 == 0);
KASSERT(as->as_stackpbase == 0);
dumbvm_can_sleep();
as->as_pbase1 = getppages(as->as_npages1);
if (as->as_pbase1 == 0) {
return ENOMEM;
}
as->as_pbase2 = getppages(as->as_npages2);
if (as->as_pbase2 == 0) {
return ENOMEM;
}
as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
if (as->as_stackpbase == 0) {
return ENOMEM;
}
as_zero_region(as->as_pbase1, as->as_npages1);
as_zero_region(as->as_pbase2, as->as_npages2);
as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
return 0;
}
int
as_complete_load(struct addrspace *as)
{
dumbvm_can_sleep();
(void)as;
return 0;
}
int
as_define_stack(struct addrspace *as, vaddr_t *stackptr)
{
KASSERT(as->as_stackpbase != 0);
*stackptr = USERSTACK;
return 0;
}
int
as_copy(struct addrspace *old, struct addrspace **ret)
{
struct addrspace *new;
dumbvm_can_sleep();
new = as_create();
if (new==NULL) {
return ENOMEM;
}
new->as_vbase1 = old->as_vbase1;
new->as_npages1 = old->as_npages1;
new->as_vbase2 = old->as_vbase2;
new->as_npages2 = old->as_npages2;
/* (Mis)use as_prepare_load to allocate some physical memory. */
if (as_prepare_load(new)) {
as_destroy(new);
return ENOMEM;
}
KASSERT(new->as_pbase1 != 0);
KASSERT(new->as_pbase2 != 0);
KASSERT(new->as_stackpbase != 0);
memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
(const void *)PADDR_TO_KVADDR(old->as_pbase1),
old->as_npages1*PAGE_SIZE);
memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
(const void *)PADDR_TO_KVADDR(old->as_pbase2),
old->as_npages2*PAGE_SIZE);
memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
(const void *)PADDR_TO_KVADDR(old->as_stackpbase),
DUMBVM_STACKPAGES*PAGE_SIZE);
*ret = new;
return 0;
}
#else
/* G.Cabodi - original dumbvm */
void
vm_bootstrap(void)
{
/* Do nothing. */
}
/*
* Check if we're in a context that can sleep. While most of the
* operations in dumbvm don't in fact sleep, in a real VM system many
* of them would. In those, assert that sleeping is ok. This helps
* avoid the situation where syscall-layer code that works ok with
* dumbvm starts blowing up during the VM assignment.
*/
static
void
dumbvm_can_sleep(void)
{
if (CURCPU_EXISTS()) {
/* must not hold spinlocks */
KASSERT(curcpu->c_spinlocks == 0);
/* must not be in an interrupt handler */
KASSERT(curthread->t_in_interrupt == 0);
}
}
static
paddr_t
getppages(unsigned long npages)
{
paddr_t addr;
spinlock_acquire(&stealmem_lock);
addr = ram_stealmem(npages);
spinlock_release(&stealmem_lock);
return addr;
}
/* Allocate/free some kernel-space virtual pages */
vaddr_t
alloc_kpages(unsigned npages)
{
paddr_t pa;
dumbvm_can_sleep();
pa = getppages(npages);
if (pa==0) {
return 0;
}
return PADDR_TO_KVADDR(pa);
}
void
free_kpages(vaddr_t addr)
{
/* nothing - leak the memory. */
(void)addr;
}
void
vm_tlbshootdown(const struct tlbshootdown *ts)
{
(void)ts;
panic("dumbvm tried to do tlb shootdown?!\n");
}
int
vm_fault(int faulttype, vaddr_t faultaddress)
{
vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
paddr_t paddr;
int i;
uint32_t ehi, elo;
struct addrspace *as;
int spl;
faultaddress &= PAGE_FRAME;
DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
switch (faulttype) {
case VM_FAULT_READONLY:
/* We always create pages read-write, so we can't get this */
panic("dumbvm: got VM_FAULT_READONLY\n");
case VM_FAULT_READ:
case VM_FAULT_WRITE:
break;
default:
return EINVAL;
}
if (curproc == NULL) {
/*
* No process. This is probably a kernel fault early
* in boot. Return EFAULT so as to panic instead of
* getting into an infinite faulting loop.
*/
return EFAULT;
}
as = proc_getas();
if (as == NULL) {
/*
* No address space set up. This is probably also a
* kernel fault early in boot.
*/
return EFAULT;
}
/* Assert that the address space has been set up properly. */
KASSERT(as->as_vbase1 != 0);
KASSERT(as->as_pbase1 != 0);
KASSERT(as->as_npages1 != 0);
KASSERT(as->as_vbase2 != 0);
KASSERT(as->as_pbase2 != 0);
KASSERT(as->as_npages2 != 0);
KASSERT(as->as_stackpbase != 0);
KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
vbase1 = as->as_vbase1;
vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
vbase2 = as->as_vbase2;
vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
stacktop = USERSTACK;
if (faultaddress >= vbase1 && faultaddress < vtop1) {
paddr = (faultaddress - vbase1) + as->as_pbase1;
}
else if (faultaddress >= vbase2 && faultaddress < vtop2) {
paddr = (faultaddress - vbase2) + as->as_pbase2;
}
else if (faultaddress >= stackbase && faultaddress < stacktop) {
paddr = (faultaddress - stackbase) + as->as_stackpbase;
}
else {
return EFAULT;
}
/* make sure it's page-aligned */
KASSERT((paddr & PAGE_FRAME) == paddr);
/* Disable interrupts on this CPU while frobbing the TLB. */
spl = splhigh();
for (i=0; i<NUM_TLB; i++) {
tlb_read(&ehi, &elo, i);
if (elo & TLBLO_VALID) {
continue;
}
ehi = faultaddress;
elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
tlb_write(ehi, elo, i);
splx(spl);
return 0;
}
kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
splx(spl);
return EFAULT;
}
struct addrspace *
as_create(void)
{
struct addrspace *as = kmalloc(sizeof(struct addrspace));
if (as==NULL) {
return NULL;
}
as->as_vbase1 = 0;
as->as_pbase1 = 0;
as->as_npages1 = 0;
as->as_vbase2 = 0;
as->as_pbase2 = 0;
as->as_npages2 = 0;
as->as_stackpbase = 0;
return as;
}
void
as_destroy(struct addrspace *as)
{
dumbvm_can_sleep();
kfree(as);
}
void
as_activate(void)
{
int i, spl;
struct addrspace *as;
as = proc_getas();
if (as == NULL) {
return;
}
/* Disable interrupts on this CPU while frobbing the TLB. */
spl = splhigh();
for (i=0; i<NUM_TLB; i++) {
tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
}
splx(spl);
}
void
as_deactivate(void)
{
/* nothing */
}
int
as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
int readable, int writeable, int executable)
{
size_t npages;
dumbvm_can_sleep();
/* Align the region. First, the base... */
sz += vaddr & ~(vaddr_t)PAGE_FRAME;
vaddr &= PAGE_FRAME;
/* ...and now the length. */
sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
npages = sz / PAGE_SIZE;
/* We don't use these - all pages are read-write */
(void)readable;
(void)writeable;
(void)executable;
if (as->as_vbase1 == 0) {
as->as_vbase1 = vaddr;
as->as_npages1 = npages;
return 0;
}
if (as->as_vbase2 == 0) {
as->as_vbase2 = vaddr;
as->as_npages2 = npages;
return 0;
}
/*
* Support for more than two regions is not available.
*/
kprintf("dumbvm: Warning: too many regions\n");
return ENOSYS;
}
static
void
as_zero_region(paddr_t paddr, unsigned npages)
{
bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
}
int
as_prepare_load(struct addrspace *as)
{
KASSERT(as->as_pbase1 == 0);
KASSERT(as->as_pbase2 == 0);
KASSERT(as->as_stackpbase == 0);
dumbvm_can_sleep();
as->as_pbase1 = getppages(as->as_npages1);
if (as->as_pbase1 == 0) {
return ENOMEM;
}
as->as_pbase2 = getppages(as->as_npages2);
if (as->as_pbase2 == 0) {
return ENOMEM;
}
as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
if (as->as_stackpbase == 0) {
return ENOMEM;
}
as_zero_region(as->as_pbase1, as->as_npages1);
as_zero_region(as->as_pbase2, as->as_npages2);
as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
return 0;
}
int
as_complete_load(struct addrspace *as)
{
dumbvm_can_sleep();
(void)as;
return 0;
}
int
as_define_stack(struct addrspace *as, vaddr_t *stackptr)
{
KASSERT(as->as_stackpbase != 0);
*stackptr = USERSTACK;
return 0;
}
int
as_copy(struct addrspace *old, struct addrspace **ret)
{
struct addrspace *new;
dumbvm_can_sleep();
new = as_create();
if (new==NULL) {
return ENOMEM;
}
new->as_vbase1 = old->as_vbase1;
new->as_npages1 = old->as_npages1;
new->as_vbase2 = old->as_vbase2;
new->as_npages2 = old->as_npages2;
/* (Mis)use as_prepare_load to allocate some physical memory. */
if (as_prepare_load(new)) {
as_destroy(new);
return ENOMEM;
}
KASSERT(new->as_pbase1 != 0);
KASSERT(new->as_pbase2 != 0);
KASSERT(new->as_stackpbase != 0);
memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
(const void *)PADDR_TO_KVADDR(old->as_pbase1),
old->as_npages1*PAGE_SIZE);
memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
(const void *)PADDR_TO_KVADDR(old->as_pbase2),
old->as_npages2*PAGE_SIZE);
memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
(const void *)PADDR_TO_KVADDR(old->as_stackpbase),
DUMBVM_STACKPAGES*PAGE_SIZE);
*ret = new;
return 0;
}
#endif

View File

@ -30,3 +30,6 @@ options sfs # Always use the file system
#options netfs # You might write this as a project.
options dumbvm # Chewing gum and baling wire.
#options semlock #locks with semaphores
options wchanlock #locks with wchans

View File

@ -444,6 +444,9 @@ optfile net test/nettest.c
defoption hello
optfile hello main/hello.c
defoption semlock
defoption wchanlock
# LAB2
file syscall/file_syscalls.c
file syscall/proc_syscalls.c

View File

@ -37,6 +37,9 @@
#include <spinlock.h>
#include "opt-semlock.h"
#include "opt-wchanlock.h"
/*
* Dijkstra-style semaphore.
*
@ -72,10 +75,19 @@ void V(struct semaphore *);
* The name field is for easier debugging. A copy of the name is
* (should be) made internally.
*/
struct lock {
char *lk_name;
volatile struct thread *owner;
#if OPT_SEMLOCK
struct semaphore *sem;
#elif OPT_WCHANLOCK
struct spinlock slk;
struct wchan *wchan;
#else
// add what you need here
// (don't forget to mark things volatile as needed)
#endif
};
struct lock *lock_create(const char *name);

View File

@ -156,6 +156,24 @@ lock_create(const char *name)
// add stuff here as needed
#if OPT_SEMLOCK
lock->sem = sem_create(name, 1);
if (lock->sem == NULL) {
kfree(lock->lk_name);
kfree(lock);
return NULL;
}
#elif OPT_WCHANLOCK
lock->wchan = wchan_create(lock->lk_name);
if (lock->wchan == NULL) {
kfree(lock->lk_name);
kfree(lock);
return NULL;
}
spinlock_init(&lock->slk);
#endif
lock->owner = NULL;
return lock;
}
@ -167,6 +185,12 @@ lock_destroy(struct lock *lock)
// add stuff here as needed
kfree(lock->lk_name);
#if OPT_SEMLOCK
sem_destroy(lock->sem);
#elif OPT_WCHANLOCK
wchan_destroy(lock->wchan);
spinlock_cleanup(&lock->slk);
#endif
kfree(lock);
}
@ -175,7 +199,19 @@ lock_acquire(struct lock *lock)
{
// Write this
(void)lock; // suppress warning until code gets written
#if OPT_SEMLOCK
P(lock->sem);
lock->owner = curthread;
#elif OPT_WCHANLOCK
spinlock_acquire(&lock->slk);
while (lock->owner != NULL) {
wchan_sleep(lock->wchan, &lock->slk);
}
lock->owner = curthread;
spinlock_release(&lock->slk);
#else
lock->owner = curthread;
#endif
}
void
@ -183,17 +219,36 @@ lock_release(struct lock *lock)
{
// Write this
(void)lock; // suppress warning until code gets written
KASSERT(lock != NULL);
KASSERT(lock_do_i_hold(lock));
#if OPT_SEMLOCK
lock->owner = NULL;
V(lock->sem);
#elif OPT_WCHANLOCK
spinlock_acquire(&lock->slk);
lock->owner = NULL;
wchan_wakeall(lock->wchan, &lock->slk);
spinlock_release(&lock->slk);
#else
lock->owner = NULL;
#endif
}
bool
lock_do_i_hold(struct lock *lock)
{
// Write this
KASSERT(lock != NULL);
#if OPT_WCHANLOCK
bool ret = true;
spinlock_acquire(&lock->slk);
ret = lock->owner == curthread;
spinlock_release(&lock->slk);
return ret; // dummy until code gets written
#else
return lock->owner == curthread;
#endif
(void)lock; // suppress warning until code gets written
return true; // dummy until code gets written
}
////////////////////////////////////////////////////////////