Sleep和Wakeup实现了coordination;Sleep和Wakeup总是同时出现,而且这两个函数的参数都是一个64位的数字,该数字代表对应的sleep channel。sleep函数的第二个参数一般是一个锁,该锁用来确保不会发生lost wakeup.
https://pdos.csail.mit.edu/6.S081/2020/labs/lock.html
该实验的主要目的是增加各个CPU分配物理内存的效率。在改代码之前,所有CPU在申请物理内存时使用一把锁,锁的粒度比较大。因此此实验的目的是降低锁的粒度,提升效率。主要步骤如下:
具体实现如下:
文件user/kalloc.c
// Physical memory allocator, for user processes,
// kernel stacks, page-table pages,
// and pipe buffers. Allocates whole 4096-byte pages.
#include "types.h"
#include "param.h"
#include "memlayout.h"
#include "spinlock.h"
#include "riscv.h"
#include "defs.h"
void freerange(void *pa_start, void *pa_end);
extern char end[]; // first address after kernel.
// defined by kernel.ld.
struct run {
struct run *next;
};
struct kmem {
struct spinlock lock;
struct run *freelist;
} ;
// 即每个CPU一把锁,每个CPU一个空余的物理内存链表
struct kmem cpukmems[NCPU];
// 每个CPU的锁分配一个名字
char name[NCPU][10];
void
initcpulocks()
{
for (int i = 0; i < NCPU; i++) {
// 每个CPU锁的名字为kmem + CPUID
snprintf(name[i], 10, "kmem%d", i);
struct kmem* curr = &cpukmems[i];
initlock(&curr->lock, name[i]);
curr->freelist = 0;
}
}
// Free the page of physical memory pointed at by v,
// which normally should have been returned by a
// call to kalloc(). (The exception is when
// initializing the allocator; see kinit above.)
// 释放特定CPU上的物理内存
void
kfreeforcpu(void *pa, int id)
{
struct run *r;
if(((uint64)pa % PGSIZE) != 0 || (char*)pa < end || (uint64)pa >= PHYSTOP)
panic("kfree");
// Fill with junk to catch dangling refs.
memset(pa, 1, PGSIZE);
r = (struct run*)pa;
struct kmem* curr = &cpukmems[id];
acquire(&curr->lock);
r->next = curr->freelist;
curr->freelist = r;
release(&curr->lock);
}
/**
* 每个cpu分配一个物理内存链表,初始时平均分配
* 即假设物理内存一共有100页,CPU个数为4,则每个CPU
* 初始的物理内存个数为25。若一共102页,则最后一个CPU分配
* 25 + 2 = 27页
*/
void
kinitforcpu(void *pa_start, void *pa_end) {
pa_start = (void*)PGROUNDUP((uint64)pa_start);
pa_end = (void*)PGROUNDDOWN((uint64)pa_end);
int cnt = (pa_end - pa_start) / PGSIZE; // 物理页总个数
int length = cnt / NCPU; // 每个CPU物理页的个数
void* p = pa_start;
for (int i = 0; i < NCPU; i++) {
for (int j = 0; j < length; j++) {
kfreeforcpu(p, i); // 为该CPU分配内存
p += PGSIZE;
}
}
// 将剩余的物理页分配为最后一个CPU
while (p + PGSIZE <= pa_end) {
kfreeforcpu(p, NCPU - 1);
p += PGSIZE;
}
}
void
kinit()
{
initcpulocks();
kinitforcpu(end, (void*)PHYSTOP);
}
void
kfree(void* pa)
{
push_off();
int id = cpuid();
pop_off();
kfreeforcpu(pa, id);
}
// 从剩余的CPU中偷一页内存
void*
stealing(int id)
{
struct run *r;
for (int i = 0; i < NCPU; i++) {
if (i == id) { // 如果是当前CPU,则跳过,因为当前CPU已经无内存
continue;
}
struct kmem* curr = &cpukmems[i];
acquire(&curr->lock);
r = curr->freelist;
if (r)
curr->freelist = r->next;
release(&curr->lock);
if (r) {
memset((char *) r, 5, PGSIZE);
return (void*)r;
}
}
return 0;
}
// Allocate one 4096-byte page of physical memory.
// Returns a pointer that the kernel can use.
// Returns 0 if the memory cannot be allocated.
void *
kalloc(void)
{
push_off();
int id = cpuid();
pop_off();
struct run *r;
struct kmem* curr = &cpukmems[id];
acquire(&curr->lock);
r = curr->freelist;
if(r)
curr->freelist = r->next;
release(&curr->lock);
if(r) { // 如果找到物理页,则返回
memset((char *) r, 5, PGSIZE); // fill with junk
return (void*)r;
} else { // 当前CPU无可用内存,则从别的CPU偷内存
return stealing(id);
}
}
执行结果1

执行结果2

执行结果3







执行结果4

在修改代码之前,BufferCache是通过双向链表连接在一起的,使用时通过最近最少使用(LRU)的算法进行buffer的分配。因为整个双向链表就一把锁(锁的粒度太大),导致高并发时BufferCache使用效率较低。
该实验的目标,是将BufferCache以HashTable的形式组织在一起,并在桶级别进行加锁,从而提升高并发下的BufferCache使用的效率。
代码如下:
文件param.h,增加桶长度和链表长度的宏定义:
#define NBUCKETS 107 // hashmap的bucket数量
//#define NSIZE ((NBUF)/(NBUCKETS) + 1)
#define NSIZE 2
文件kernel/buf.h,对结构体增加时间戳,用于标识最近最少使用:
struct buf {
int valid; // has data been read from disk?
int disk; // does disk "own" buf?
uint dev;
uint blockno;
struct sleeplock lock;
uint refcnt;
struct buf *prev; // LRU cache list
struct buf *next;
uchar data[BSIZE];
uint timestamp; // 增加时间戳
};
文件kernel/bio.h,主要将双向链表改为HashMap。
struct {
struct spinlock lock;
// 为每个HashMap的buckets分配一个名字
char name[10];
// Linked list of all buffers, through prev/next.
// Sorted by how recently the buffer was used.
// head.next is most recent, head.prev is least.
struct buf buf[NSIZE];
} bcache[NBUCKETS];
int hash(uint dev, uint blockno) {
return blockno % NBUCKETS;
}
void
binit(void)
{
struct buf *b;
for (int i = 0; i < NBUCKETS; i++) {
// 为每个桶设置锁并初始化
snprintf(bcache[i].name, 10, "bcache%d", i);
initlock(&bcache[i].lock, bcache[i].name);
for (int j = 0; j < NSIZE; j++) {
b = &bcache[i].buf[j];
b->refcnt = 0;
initsleeplock(&b->lock, "buffer");
b->timestamp = ticks;
}
}
}
// Look through buffer cache for block on device dev.
// If not found, allocate a buffer.
// In either case, return locked buffer.
static struct buf*
bget(uint dev, uint blockno)
{
struct buf *b;
int id = hash(dev, blockno);
acquire(&bcache[id].lock);
// Is the block already cached?
for(int i = 0; i < NSIZE; i++){
b = &bcache[id].buf[i];
if(b->dev == dev && b->blockno == blockno){
b->refcnt++;
release(&bcache[id].lock);
acquiresleep(&b->lock);
return b;
}
}
// Not cached.
// Recycle the least recently used (LRU) unused buffer.
uint min = -1; // TODO
struct buf* tmp = 0;
for (int i = 0; i < NSIZE; i++) {
b = &bcache[id].buf[i];
if (b->refcnt == 0 && b->timestamp < min) {
min = b->timestamp;
tmp = b;
}
}
if (min != -1) {
tmp->dev = dev;
tmp->blockno = blockno;
tmp->valid = 0;
tmp->refcnt = 1;
release(&bcache[id].lock);
acquiresleep(&tmp->lock);
return tmp;
}
panic("bget: no buffers");
}
// Return a locked buf with the contents of the indicated block.
struct buf*
bread(uint dev, uint blockno)
{
struct buf *b;
b = bget(dev, blockno);
if(!b->valid) {
virtio_disk_rw(b, 0);
b->valid = 1;
}
return b;
}
// Write b's contents to disk. Must be locked.
void
bwrite(struct buf *b)
{
if(!holdingsleep(&b->lock))
panic("bwrite");
virtio_disk_rw(b, 1);
}
// Release a locked buffer.
// Move to the head of the most-recently-used list.
void
brelse(struct buf *b)
{
if(!holdingsleep(&b->lock))
panic("brelse");
releasesleep(&b->lock);
int id = hash(b->dev, b->blockno);
acquire(&bcache[id].lock);
b->refcnt--;
if (b->refcnt == 0) {
// no one is waiting for it.
b->timestamp = ticks;
}
release(&bcache[id].lock);
}
void
bpin(struct buf *b) {
int id = hash(b->dev, b->blockno);
acquire(&bcache[id].lock);
b->refcnt++;
release(&bcache[id].lock);
}
void
bunpin(struct buf *b) {
int id = hash(b->dev, b->blockno);
acquire(&bcache[id].lock);
b->refcnt--;
release(&bcache[id].lock);
}
执行结果:







注:在调用usertests时,将测试案列writebig注释了,因为不注释会报panic的错误,但是将函数单独执行时,不报panic。

$ make grade

$ git commit -m "lab multithreading"
$ make handin
登录网站https://6828.scripts.mit.edu/2020/handin.py/student,可以看到提交的结果。

https://mit-public-courses-cn-translatio.gitbook.io/mit6-s081
https://pdos.csail.mit.edu/6.S081/2020/schedule.html
源码: https://github.com/aerfalwl/mit-xv6-labs-2020