trapslab initialized

This commit is contained in:
2025-06-12 10:45:04 +08:00
parent 2a8fad821b
commit 028cf61d61
40 changed files with 366 additions and 3188 deletions

View File

@@ -1,7 +1,3 @@
#ifdef LAB_MMAP
typedef unsigned long size_t;
typedef long int off_t;
#endif
struct buf;
struct context;
struct file;
@@ -121,10 +117,6 @@ void initlock(struct spinlock*, char*);
void release(struct spinlock*);
void push_off(void);
void pop_off(void);
int atomic_read4(int *addr);
#ifdef LAB_LOCK
void freelock(struct spinlock*);
#endif
// sleeplock.c
void acquiresleep(struct sleeplock*);
@@ -181,17 +173,6 @@ uint64 walkaddr(pagetable_t, uint64);
int copyout(pagetable_t, uint64, char *, uint64);
int copyin(pagetable_t, char *, uint64, uint64);
int copyinstr(pagetable_t, char *, uint64, uint64);
#if defined(LAB_PGTBL) || defined(SOL_MMAP)
void vmprint(pagetable_t);
#endif
#ifdef LAB_PGTBL
pte_t* pgpte(pagetable_t, uint64);
void superfree(void *pa);
void* superalloc();
int copyin_new(pagetable_t, char *, uint64, uint64);
int copyinstr_new(pagetable_t, char *, uint64, uint64);
uint64 sys_dirtypages(void);
#endif
// plic.c
void plicinit(void);
@@ -206,31 +187,3 @@ void virtio_disk_intr(void);
// number of elements in fixed-size array
#define NELEM(x) (sizeof(x)/sizeof((x)[0]))
#ifdef LAB_LOCK
// stats.c
void statsinit(void);
void statsinc(void);
// sprintf.c
int snprintf(char*, unsigned long, const char*, ...);
#endif
#ifdef KCSAN
void kcsaninit();
#endif
#ifdef LAB_NET
// pci.c
void pci_init();
// e1000.c
void e1000_init(uint32 *);
void e1000_intr(void);
int e1000_transmit(char *, int);
// net.c
void netinit(void);
void net_rx(char *buf, int len);
#endif

View File

@@ -128,10 +128,6 @@ exec(char *path, char **argv)
p->trapframe->sp = sp; // initial stack pointer
proc_freepagetable(oldpagetable, oldsz);
if (p->pid == 1) {
vmprint(p->pagetable);
}
return argc; // this ends up in a0, the first argument to main(argc, argv)
bad:

View File

@@ -23,46 +23,10 @@ struct {
struct run *freelist;
} kmem;
struct super_run {
struct super_run *next;
};
struct {
struct spinlock lock;
struct super_run *freelist;
} skmem;
void superfree(void *pa) {
struct super_run *r;
if(((uint64)pa % SUPERPGSIZE) != 0 || (char*)pa < end || (uint64)pa >= PHYSTOP)
panic("superfree");
// Fill with junk to catch dangling refs.
memset(pa, 1, SUPERPGSIZE);
r = (struct super_run *)pa;
acquire(&skmem.lock);
r->next = skmem.freelist;
skmem.freelist = r;
release(&skmem.lock);
}
void* superalloc() {
struct super_run *r;
acquire(&skmem.lock);
r = skmem.freelist;
if(r) skmem.freelist = r->next;
release(&skmem.lock);
if(r) memset((void*)r, 0, SUPERPGSIZE);
return (void*)r;
}
void
kinit()
{
initlock(&kmem.lock, "kmem");
initlock(&skmem.lock, "skmem");
freerange(end, (void*)PHYSTOP);
}
@@ -71,22 +35,9 @@ freerange(void *pa_start, void *pa_end)
{
char *p;
p = (char*)PGROUNDUP((uint64)pa_start);
for(; p + PGSIZE <= (char*)pa_end - 12 * 1024 * 1024; p += PGSIZE) //留5个巨页
for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE)
kfree(p);
p = (char*)SUPERPGROUNDUP((uint64)p);
for (; p + SUPERPGSIZE <= (char *)pa_end; p += SUPERPGSIZE) {
superfree(p);
}
}
// void
// freerange(void *pa_start, void *pa_end)
// {
// char *p;
// p = (char*)PGROUNDUP((uint64)pa_start);
// for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE)
// kfree(p);
// }
// Free the page of physical memory pointed at by pa,
// which normally should have been returned by a

View File

@@ -25,10 +25,6 @@
#define VIRTIO0 0x10001000
#define VIRTIO0_IRQ 1
#ifdef LAB_NET
#define E1000_IRQ 33
#endif
// qemu puts platform-level interrupt controller (PLIC) here.
#define PLIC 0x0c000000L
#define PLIC_PRIORITY (PLIC + 0x0)
@@ -49,7 +45,7 @@
// map kernel stacks beneath the trampoline,
// each surrounded by invalid guard pages.
#define KSTACK(p) (TRAMPOLINE - (p)*2*PGSIZE - 3*PGSIZE)
#define KSTACK(p) (TRAMPOLINE - ((p)+1)* 2*PGSIZE)
// User memory layout.
// Address zero first:
@@ -58,14 +54,6 @@
// fixed-size stack
// expandable heap
// ...
// USYSCALL (shared with kernel)
// TRAPFRAME (p->trapframe, used by the trampoline)
// TRAMPOLINE (the same page as in the kernel)
#define TRAPFRAME (TRAMPOLINE - PGSIZE)
#ifdef LAB_PGTBL
#define USYSCALL (TRAPFRAME - PGSIZE)
struct usyscall {
int pid; // Process ID
};
#endif

View File

@@ -123,20 +123,14 @@ allocproc(void)
found:
p->pid = allocpid();
p->state = USED;
// Allocate a trapframe page.
if((p->trapframe = (struct trapframe *)kalloc()) == 0){
release(&p->lock);
return 0;
}
// Allocate a usyscall page.
if((p->usyscall = (struct usyscall *)kalloc()) == 0){
freeproc(p);
release(&p->lock);
return 0;
}
p->usyscall->pid = p->pid ;
// An empty user page table.
p->pagetable = proc_pagetable(p);
@@ -161,9 +155,6 @@ found:
static void
freeproc(struct proc *p)
{
if (p->usyscall) {
kfree((void*)p->usyscall);
}
if(p->trapframe)
kfree((void*)p->trapframe);
p->trapframe = 0;
@@ -211,16 +202,6 @@ proc_pagetable(struct proc *p)
return 0;
}
// map the usyscall just below TRAMPOFRAME, for trampoline.S.
// 这个页需要设置PTE_U为使得用户态可以访问
if(mappages(pagetable, USYSCALL, PGSIZE,
(uint64)(p->usyscall), PTE_R | PTE_U) < 0){
uvmunmap(pagetable, TRAPFRAME, 1, 0);
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
uvmfree(pagetable, 0);
return 0;
}
return pagetable;
}
@@ -229,7 +210,6 @@ proc_pagetable(struct proc *p)
void
proc_freepagetable(pagetable_t pagetable, uint64 sz)
{
uvmunmap(pagetable, USYSCALL, 1, 0);
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
uvmunmap(pagetable, TRAPFRAME, 1, 0);
uvmfree(pagetable, sz);
@@ -401,20 +381,8 @@ exit(int status)
release(&wait_lock);
// Jump into the scheduler, never to return.
// If we somehow return from sched(), we're in a bad state
sched();
// If we reach here, something is very wrong.
// But instead of panicking immediately, try to become truly unrunnable
acquire(&p->lock);
p->state = UNUSED; // Mark as unused to prevent rescheduling
release(&p->lock);
// Try one more time to schedule
sched();
// If we still reach here after marking as UNUSED, panic
panic("zombie exit: process returned from sched twice");
panic("zombie exit");
}
// Wait for a child process to exit and return its pid.

View File

@@ -94,12 +94,11 @@ struct proc {
// wait_lock must be held when using this:
struct proc *parent; // Parent process
// these are private to the process, so p->lock need not be held.
// these are private to the process, so p->lock need not be held.
uint64 kstack; // Virtual address of kernel stack
uint64 sz; // Size of process memory (bytes)
pagetable_t pagetable; // User page table
// 进程的结构体中需要加上usyscall字段
struct usyscall *usyscall; // data page for usyscall
struct trapframe *trapframe; // data page for trampoline.S
struct context context; // swtch() here to run process
struct file *ofile[NOFILE]; // Open files

View File

@@ -1,45 +0,0 @@
//
// ramdisk that uses the disk image loaded by qemu -initrd fs.img
//
#include "types.h"
#include "riscv.h"
#include "defs.h"
#include "param.h"
#include "memlayout.h"
#include "spinlock.h"
#include "sleeplock.h"
#include "fs.h"
#include "buf.h"
void
ramdiskinit(void)
{
}
// If B_DIRTY is set, write buf to disk, clear B_DIRTY, set B_VALID.
// Else if B_VALID is not set, read buf from disk, set B_VALID.
void
ramdiskrw(struct buf *b)
{
if(!holdingsleep(&b->lock))
panic("ramdiskrw: buf not locked");
if((b->flags & (B_VALID|B_DIRTY)) == B_VALID)
panic("ramdiskrw: nothing to do");
if(b->blockno >= FSSIZE)
panic("ramdiskrw: blockno too big");
uint64 diskaddr = b->blockno * BSIZE;
char *addr = (char *)RAMDISK + diskaddr;
if(b->flags & B_DIRTY){
// write
memmove(addr, b->data, BSIZE);
b->flags &= ~B_DIRTY;
} else {
// read
memmove(b->data, addr, BSIZE);
b->flags |= B_VALID;
}
}

View File

@@ -204,7 +204,7 @@ r_menvcfg()
static inline void
w_menvcfg(uint64 x)
{
//asm volatile("csrw menvcfg, %0" : : "r" (x));
// asm volatile("csrw menvcfg, %0" : : "r" (x));
asm volatile("csrw 0x30a, %0" : : "r" (x));
}
@@ -314,14 +314,6 @@ r_sp()
return x;
}
static inline uint64
r_fp()
{
uint64 x;
asm volatile("mv %0, s0" : "=r" (x) );
return x;
}
// read and write tp, the thread pointer, which xv6 uses to hold
// this core's hartid (core number), the index into cpus[].
static inline uint64
@@ -362,11 +354,6 @@ typedef uint64 *pagetable_t; // 512 PTEs
#define PGSIZE 4096 // bytes per page
#define PGSHIFT 12 // bits of offset within a page
#ifdef LAB_PGTBL
#define SUPERPGSIZE (2 * (1 << 20)) // bytes per page
#define SUPERPGROUNDUP(sz) (((sz)+SUPERPGSIZE-1) & ~(SUPERPGSIZE-1))
#endif
#define PGROUNDUP(sz) (((sz)+PGSIZE-1) & ~(PGSIZE-1))
#define PGROUNDDOWN(a) (((a)) & ~(PGSIZE-1))
@@ -375,14 +362,6 @@ typedef uint64 *pagetable_t; // 512 PTEs
#define PTE_W (1L << 2)
#define PTE_X (1L << 3)
#define PTE_U (1L << 4) // user can access
#define PTE_A (1L << 6) // Accessed bit
#define PTE_D (1L << 7) // Dirty bit
#define PTE_PS (1L << 8) // Page Size bit in PTE (for 2MB superpages)
#if defined(LAB_MMAP) || defined(LAB_PGTBL)
#define PTE_LEAF(pte) (((pte) & PTE_R) | ((pte) & PTE_W) | ((pte) & PTE_X))
#endif
// shift a physical address to the right place for a PTE.
#define PA2PTE(pa) ((((uint64)pa) >> 12) << 10)

View File

@@ -102,19 +102,6 @@ extern uint64 sys_link(void);
extern uint64 sys_mkdir(void);
extern uint64 sys_close(void);
#ifdef LAB_NET
extern uint64 sys_bind(void);
extern uint64 sys_unbind(void);
extern uint64 sys_send(void);
extern uint64 sys_recv(void);
#endif
#ifdef LAB_PGTBL
extern uint64 sys_pgpte(void);
extern uint64 sys_kpgtbl(void);
extern uint64 sys_pgaccess(void);
extern uint64 sys_dirtypages(void);
#endif
// An array mapping syscall numbers from syscall.h
// to the function that handles the system call.
static uint64 (*syscalls[])(void) = {
@@ -139,22 +126,8 @@ static uint64 (*syscalls[])(void) = {
[SYS_link] sys_link,
[SYS_mkdir] sys_mkdir,
[SYS_close] sys_close,
#ifdef LAB_NET
[SYS_bind] sys_bind,
[SYS_unbind] sys_unbind,
[SYS_send] sys_send,
[SYS_recv] sys_recv,
#endif
#ifdef LAB_PGTBL
[SYS_pgpte] sys_pgpte,
[SYS_kpgtbl] sys_kpgtbl,
[SYS_pgaccess] sys_pgaccess,
[SYS_dirtypages] sys_dirtypages,
#endif
};
void
syscall(void)
{

View File

@@ -20,20 +20,3 @@
#define SYS_link 19
#define SYS_mkdir 20
#define SYS_close 21
// System calls for labs
#define SYS_trace 22
#define SYS_sysinfo 23
#define SYS_sigalarm 24
#define SYS_sigreturn 25
#define SYS_symlink 26
#define SYS_mmap 27
#define SYS_munmap 28
#define SYS_bind 29
#define SYS_unbind 30
#define SYS_send 31
#define SYS_recv 32
#define SYS_pgpte 33
#define SYS_kpgtbl 34
#define SYS_pgaccess 35
#define SYS_dirtypages 36

View File

@@ -1,7 +0,0 @@
#include "kernel/types.h"
struct sysinfo {
uint64 freemem;
uint64 nproc;
uint64 unused_proc_num;
uint64 load_avg;
};

View File

@@ -1,7 +1,7 @@
#include "types.h"
#include "riscv.h"
#include "param.h"
#include "defs.h"
#include "param.h"
#include "memlayout.h"
#include "spinlock.h"
#include "proc.h"
@@ -54,7 +54,6 @@ sys_sleep(void)
int n;
uint ticks0;
argint(0, &n);
if(n < 0)
n = 0;
@@ -71,37 +70,6 @@ sys_sleep(void)
return 0;
}
#ifdef LAB_PGTBL
int
sys_pgpte(void)
{
uint64 va;
struct proc *p;
p = myproc();
argaddr(0, &va);
pte_t *pte = pgpte(p->pagetable, va);
if(pte != 0) {
return (uint64) *pte;
}
return 0;
}
#endif
#ifdef LAB_PGTBL
int
sys_kpgtbl(void)
{
struct proc *p;
p = myproc();
vmprint(p->pagetable);
return 0;
}
#endif
uint64
sys_kill(void)
{
@@ -123,82 +91,3 @@ sys_uptime(void)
release(&tickslock);
return xticks;
}
uint64
sys_pgaccess(void)
{
// lab pgtbl: your code here.
struct proc *p = myproc();
unsigned int abits=0;
uint64 addr;
argaddr(0, &addr);
int num;
argint(1,&num);
uint64 dest;
argaddr(2, &dest);
for(int i=0;i<num;i++){
uint64 query_addr = addr + i * PGSIZE ;
pte_t * pte=walk(p->pagetable, query_addr, 0);
if(*pte&PTE_A)
{
abits=abits|(1<<i);
*pte=(*pte)&(~PTE_A);
}
}
if(copyout(p->pagetable,dest,(char*)&abits, sizeof(abits)) < 0)
return -1;
return 0;
}
#ifdef LAB_PGTBL
uint64
sys_dirtypages(void)
{
struct proc *p = myproc();
unsigned int dbits = 0;
uint64 addr;
argaddr(0, &addr);
int num;
argint(1, &num);
uint64 dest;
argaddr(2, &dest);
// Check each page in the range
for(int i = 0; i < num; i++){
uint64 query_addr = addr + i * PGSIZE;
pte_t *pte = walk(p->pagetable, query_addr, 0);
if(pte == 0)
continue; // Skip pages that don't exist
if(*pte & PTE_D) {
dbits = dbits | (1 << i);
// Clear the dirty bit after reading it
*pte = (*pte) & (~PTE_D);
}
}
// Copy the result back to user space
if(copyout(p->pagetable, dest, (char*)&dbits, sizeof(dbits)) < 0)
return -1;
return 0;
}
#endif

View File

@@ -4,8 +4,6 @@
#include "elf.h"
#include "riscv.h"
#include "defs.h"
#include "spinlock.h"
#include "proc.h"
#include "fs.h"
/*
@@ -17,9 +15,6 @@ extern char etext[]; // kernel.ld sets this to end of kernel code.
extern char trampoline[]; // trampoline.S
// void sub_vmprint(pagetable_t pagetable, int level);
// Make a direct-map page table for the kernel.
pagetable_t
kvmmake(void)
@@ -35,14 +30,6 @@ kvmmake(void)
// virtio mmio disk interface
kvmmap(kpgtbl, VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
#ifdef LAB_NET
// PCI-E ECAM (configuration space), for pci.c
kvmmap(kpgtbl, 0x30000000L, 0x30000000L, 0x10000000, PTE_R | PTE_W);
// pci.c maps the e1000's registers here.
kvmmap(kpgtbl, 0x40000000L, 0x40000000L, 0x20000, PTE_R | PTE_W);
#endif
// PLIC
kvmmap(kpgtbl, PLIC, PLIC, 0x4000000, PTE_R | PTE_W);
@@ -99,17 +86,12 @@ pte_t *
walk(pagetable_t pagetable, uint64 va, int alloc)
{
if(va >= MAXVA)
return 0;
panic("walk");
for(int level = 2; level > 0; level--) {
pte_t *pte = &pagetable[PX(level, va)];
if(*pte & PTE_V) {
pagetable = (pagetable_t)PTE2PA(*pte);
#ifdef LAB_PGTBL
if (*pte & PTE_PS) {
return pte;
}
#endif
} else {
if(!alloc || (pagetable = (pde_t*)kalloc()) == 0)
return 0;
@@ -120,25 +102,6 @@ walk(pagetable_t pagetable, uint64 va, int alloc)
return &pagetable[PX(0, va)];
}
pte_t *
super_walk(pagetable_t pagetable, uint64 va, int alloc)
{
if (va >= MAXVA)
return 0;
pte_t *pte = &(pagetable[PX(2, va)]);
if (*pte & PTE_V) {
pagetable = (pagetable_t)PTE2PA(*pte);
} else {
if (!alloc || (pagetable = (pde_t*)kalloc()) == 0)
return 0;
memset(pagetable, 0, PGSIZE);
*pte = PA2PTE(pagetable) | PTE_V;
}
return &pagetable[PX(1, va)];
}
// Look up a virtual address, return the physical address,
// or 0 if not mapped.
// Can only be used to look up user pages.
@@ -159,17 +122,9 @@ walkaddr(pagetable_t pagetable, uint64 va)
if((*pte & PTE_U) == 0)
return 0;
pa = PTE2PA(*pte);
if(*pte & PTE_PS) {
// For superpages, add the offset within the superpage
pa += va & (SUPERPGSIZE - 1);
} else {
// For regular pages, add the offset within the page
pa += va & (PGSIZE - 1);
}
return pa;
}
// add a mapping to the kernel page table.
// only used when booting.
// does not flush TLB or enable paging.
@@ -201,35 +156,18 @@ mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
panic("mappages: size");
a = va;
if ((perm & PTE_PS) == 0) { /*不使用巨页*/
last = va + size - PGSIZE;
for(;;){
if((pte = walk(pagetable, a, 1)) == 0)
return -1;
if(*pte & PTE_V)
panic("mappages: remap");
*pte = PA2PTE(pa) | perm | PTE_V;
if(a == last)
break;
a += PGSIZE;
pa += PGSIZE;
}
} else { /* 使用巨页 */
last = va + size - SUPERPGSIZE;
for (;;) {
if ((pte = super_walk(pagetable, a, 1)) == 0)
return -1;
if (*pte & PTE_V)
panic("super mappages: remap");
*pte = PA2PTE(pa) | perm | PTE_V;
if (a == last)
break;
a += SUPERPGSIZE;
pa += SUPERPGSIZE;
}
last = va + size - PGSIZE;
for(;;){
if((pte = walk(pagetable, a, 1)) == 0)
return -1;
if(*pte & PTE_V)
panic("mappages: remap");
*pte = PA2PTE(pa) | perm | PTE_V;
if(a == last)
break;
a += PGSIZE;
pa += PGSIZE;
}
return 0;
}
@@ -241,42 +179,22 @@ uvmunmap(pagetable_t pagetable, uint64 va, uint64 npages, int do_free)
{
uint64 a;
pte_t *pte;
uint64 end_va = va + npages * PGSIZE;
if((va % PGSIZE) != 0)
panic("uvmunmap: not aligned");
for(a = va; a < end_va; ){
if((pte = walk(pagetable, a, 0)) == 0) {
// If we can't find a PTE, skip to next page
a += PGSIZE;
continue;
}
if((*pte & PTE_V) == 0) {
// If page is not valid, skip to next page
a += PGSIZE;
continue;
}
for(a = va; a < va + npages*PGSIZE; a += PGSIZE){
if((pte = walk(pagetable, a, 0)) == 0)
panic("uvmunmap: walk");
if((*pte & PTE_V) == 0)
panic("uvmunmap: not mapped");
if(PTE_FLAGS(*pte) == PTE_V)
panic("uvmunmap: not a leaf");
if ((*pte & PTE_PS)) { /* 释放巨页 */
if(do_free){
uint64 pa = PTE2PA(*pte);
superfree((void*)pa);
}
*pte = 0;
// Make sure we don't go beyond the requested range
uint64 next_a = a + SUPERPGSIZE;
a = (next_a > end_va) ? end_va : next_a;
} else {
if(do_free){
uint64 pa = PTE2PA(*pte);
kfree((void*)pa);
}
*pte = 0;
a += PGSIZE;
if(do_free){
uint64 pa = PTE2PA(*pte);
kfree((void*)pa);
}
*pte = 0;
}
}
@@ -309,7 +227,6 @@ uvmfirst(pagetable_t pagetable, uchar *src, uint sz)
memmove(mem, src, sz);
}
// Allocate PTEs and physical memory to grow process from oldsz to
// newsz, which need not be page aligned. Returns new size or 0 on error.
uint64
@@ -317,85 +234,24 @@ uvmalloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz, int xperm)
{
char *mem;
uint64 a;
if(newsz < oldsz)
return oldsz;
oldsz = PGROUNDUP(oldsz);
// Check if the allocation should use superpages
// We use superpages if we're allocating at least 2MB AND
// the range contains a superpage-aligned 2MB region
if (newsz - oldsz >= SUPERPGSIZE) {
uint64 super_start = SUPERPGROUNDUP(oldsz);
uint64 super_end = newsz & ~(SUPERPGSIZE - 1); // Round down to superpage boundary
// Allocate regular pages before the first superpage boundary
for(a = oldsz; a < super_start; a += PGSIZE){
mem = kalloc();
if(mem == 0){
uvmdealloc(pagetable, a, oldsz);
return 0;
}
#ifndef LAB_SYSCALL
memset(mem, 0, PGSIZE);
#endif
if(mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_R|PTE_U|xperm) != 0){
kfree(mem);
uvmdealloc(pagetable, a, oldsz);
return 0;
}
for(a = oldsz; a < newsz; a += PGSIZE){
mem = kalloc();
if(mem == 0){
uvmdealloc(pagetable, a, oldsz);
return 0;
}
// Allocate superpages for aligned regions
for (a = super_start; a < super_end; a += SUPERPGSIZE) {
mem = superalloc();
if (mem == 0) {
uvmdealloc(pagetable, super_start, oldsz);
return 0;
}
if (mappages(pagetable, a, SUPERPGSIZE, (uint64)mem, PTE_R | PTE_U | PTE_PS | xperm) != 0) {
superfree(mem);
uvmdealloc(pagetable, super_start, oldsz);
return 0;
}
}
// Allocate regular pages after the last superpage boundary
for(a = super_end; a < newsz; a += PGSIZE){
mem = kalloc();
if(mem == 0){
uvmdealloc(pagetable, a, oldsz);
return 0;
}
#ifndef LAB_SYSCALL
memset(mem, 0, PGSIZE);
#endif
if(mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_R|PTE_U|xperm) != 0){
kfree(mem);
uvmdealloc(pagetable, a, oldsz);
return 0;
}
}
} else {
// Allocation is smaller than SUPERPGSIZE, use regular pages
for(a = oldsz; a < newsz; a += PGSIZE){
mem = kalloc();
if(mem == 0){
uvmdealloc(pagetable, a, oldsz);
return 0;
}
#ifndef LAB_SYSCALL
memset(mem, 0, PGSIZE);
#endif
if(mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_R|PTE_U|xperm) != 0){
kfree(mem);
uvmdealloc(pagetable, a, oldsz);
return 0;
}
memset(mem, 0, PGSIZE);
if(mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_R|PTE_U|xperm) != 0){
kfree(mem);
uvmdealloc(pagetable, a, oldsz);
return 0;
}
}
return newsz;
}
@@ -460,54 +316,26 @@ uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
uint64 pa, i;
uint flags;
char *mem;
int szinc;
for(i = 0; i < sz; i += szinc){
szinc = PGSIZE;
for(i = 0; i < sz; i += PGSIZE){
if((pte = walk(old, i, 0)) == 0)
panic("uvmcopy: pte should exist");
if((*pte & PTE_V) == 0)
panic("uvmcopy: page not present");
pa = PTE2PA(*pte);
flags = PTE_FLAGS(*pte);
if ((flags & PTE_PS) == 0) {
if((mem = kalloc()) == 0)
goto err;
memmove(mem, (char*)pa, PGSIZE);
if(mappages(new, i, PGSIZE, (uint64)mem, flags) != 0){
kfree(mem);
goto err;
}
} else {
if ((mem = superalloc()) == 0)
goto err;
if (mappages(new, i, SUPERPGSIZE, (uint64)mem, flags) != 0) {
superfree(mem);
goto err;
}
memmove(mem, (char*)pa, SUPERPGSIZE);
szinc = SUPERPGSIZE; /* 修正步长 */
if((mem = kalloc()) == 0)
goto err;
memmove(mem, (char*)pa, PGSIZE);
if(mappages(new, i, PGSIZE, (uint64)mem, flags) != 0){
kfree(mem);
goto err;
}
}
return 0;
err:
// Clean up properly - need to unmap what we've mapped so far
for(uint64 j = 0; j < i; j += PGSIZE) {
pte_t *cleanup_pte = walk(new, j, 0);
if(cleanup_pte && (*cleanup_pte & PTE_V)) {
if(*cleanup_pte & PTE_PS) {
// This is a superpage, skip ahead
superfree((void*)PTE2PA(*cleanup_pte));
*cleanup_pte = 0;
j += SUPERPGSIZE - PGSIZE; // Will be incremented by PGSIZE in loop
} else {
kfree((void*)PTE2PA(*cleanup_pte));
*cleanup_pte = 0;
}
}
}
uvmunmap(new, 0, i / PGSIZE, 1);
return -1;
}
@@ -535,32 +363,21 @@ copyout(pagetable_t pagetable, uint64 dstva, char *src, uint64 len)
while(len > 0){
va0 = PGROUNDDOWN(dstva);
if (va0 >= MAXVA)
if(va0 >= MAXVA)
return -1;
if((pte = walk(pagetable, va0, 0)) == 0) {
// printf("copyout: pte should exist 0x%x %d\n", dstva, len);
pte = walk(pagetable, va0, 0);
if(pte == 0 || (*pte & PTE_V) == 0 || (*pte & PTE_U) == 0 ||
(*pte & PTE_W) == 0)
return -1;
}
// forbid copyout over read-only user text pages.
if((*pte & PTE_W) == 0)
return -1;
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
// Calculate the correct page size and boundary
uint64 pgsize = (*pte & PTE_PS) ? SUPERPGSIZE : PGSIZE;
uint64 va_base = va0 & ~(pgsize - 1);
n = pgsize - (dstva - va_base);
pa0 = PTE2PA(*pte);
n = PGSIZE - (dstva - va0);
if(n > len)
n = len;
memmove((void *)(pa0 + (dstva - va_base)), src, n);
memmove((void *)(pa0 + (dstva - va0)), src, n);
len -= n;
src += n;
dstva = va_base + pgsize;
dstva = va0 + PGSIZE;
}
return 0;
}
@@ -572,30 +389,20 @@ int
copyin(pagetable_t pagetable, char *dst, uint64 srcva, uint64 len)
{
uint64 n, va0, pa0;
pte_t *pte;
while(len > 0){
va0 = PGROUNDDOWN(srcva);
if (va0 >= MAXVA)
return -1;
if((pte = walk(pagetable, va0, 0)) == 0) {
return -1;
}
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
// Calculate the correct page size and boundary
uint64 pgsize = (*pte & PTE_PS) ? SUPERPGSIZE : PGSIZE;
uint64 va_base = va0 & ~(pgsize - 1);
n = pgsize - (srcva - va_base);
n = PGSIZE - (srcva - va0);
if(n > len)
n = len;
memmove(dst, (void *)(pa0 + (srcva - va_base)), n);
memmove(dst, (void *)(pa0 + (srcva - va0)), n);
len -= n;
dst += n;
srcva = va_base + pgsize;
srcva = va0 + PGSIZE;
}
return 0;
}
@@ -609,27 +416,17 @@ copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
{
uint64 n, va0, pa0;
int got_null = 0;
pte_t *pte;
while(got_null == 0 && max > 0){
va0 = PGROUNDDOWN(srcva);
if (va0 >= MAXVA)
return -1;
if((pte = walk(pagetable, va0, 0)) == 0) {
return -1;
}
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
// Calculate the correct page size and boundary
uint64 pgsize = (*pte & PTE_PS) ? SUPERPGSIZE : PGSIZE;
uint64 va_base = va0 & ~(pgsize - 1);
n = pgsize - (srcva - va_base);
n = PGSIZE - (srcva - va0);
if(n > max)
n = max;
char *p = (char *) (pa0 + (srcva - va_base));
char *p = (char *) (pa0 + (srcva - va0));
while(n > 0){
if(*p == '\0'){
*dst = '\0';
@@ -644,7 +441,7 @@ copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
dst++;
}
srcva = va_base + pgsize;
srcva = va0 + PGSIZE;
}
if(got_null){
return 0;
@@ -652,46 +449,3 @@ copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
return -1;
}
}
#ifdef LAB_PGTBL
void vmprint(pagetable_t pagetable);
static void vmprint_recursive(pagetable_t pagetable, int level, uint64 va_base) {
for (int i = 0; i < 512; i++) {
pte_t pte = pagetable[i];
if (pte & PTE_V) {
uint64 pa = PTE2PA(pte);
uint64 va = va_base + ((uint64)i << (12 + 9 * (2 - level)));
for (int j = 0; j < level; j++)
printf(" ..");
if (level > 0)
printf(" ");
if (level == 0) {
printf(" ..%p\n", (void*)va);
} else {
printf("..%p\n", (void*)va);
}
// 不是叶子节点,递归下一级页表
if ((pte & (PTE_R | PTE_W | PTE_X)) == 0) {
vmprint_recursive((pagetable_t)pa, level + 1, va);
}
}
}
}
void vmprint(pagetable_t pagetable) {
printf("page table %p\n", pagetable);
vmprint_recursive(pagetable, 0, 0);
}
#endif
#ifdef LAB_PGTBL
pte_t*
pgpte(pagetable_t pagetable, uint64 va) {
return walk(pagetable, va, 0);
}
#endif