cynix

x86 UNIX-like OS
git clone git://git.2f30.org/cynix
Log | Files | Refs | README | LICENSE

mm.c (20956B)


      1 /*
      2  *  core/mm.c
      3  *
      4  *  Copyright (C) 2009 stateless
      5  */
      6 
      7 #include <mm.h>
      8 #include <multiboot.h>
      9 #include <common.h>
     10 #include <string.h>
     11 #include <idt.h>
     12 #include <x86.h>
     13 #include <heap.h>
     14 #include <errno.h>
     15 #include <tss.h>
     16 #include <serial.h>
     17 
     18 #define SET_PTE(pte, p, rw_mode, priv, frame_addr) \
     19 	({ \
     20 		pte.present = p; \
     21 		pte.rw = rw_mode; \
     22 		pte.privilege = priv; \
     23 		pte.frame = frame_addr; \
     24 	})
     25 
     26 #define SET_PDE(pde, p, rw_mode, priv, pt) \
     27 	({ \
     28 		pde.present = p; \
     29 		pde.rw = rw_mode; \
     30 		pde.privilege = priv; \
     31 		pde.pt_addr = pt; \
     32 	})
     33 
     34 #define PTABLE_BASE_ADDR 0xffc00000
     35 #define PDIR_BASE_ADDR 0xfffff000
     36 
     37 enum page_fl { P_FL = 1 << 0, RW_FL = 1 << 1, US_FL = 1 << 2 };
     38 
     39 extern multiboot_info_t *info_boot;
     40 extern uint32_t initrd_end;
     41 
     42 struct page_directory_t *new_kernel_pdir = NULL;
     43 
     44 static uint32_t mm_phys_addr = 0, mm_phys_addr_end = 0;
     45 static uint8_t *framemap = NULL, *pagemap_pa = NULL;
     46 static struct page_directory_t *kernel_page_dir = NULL;
     47 static struct page_table_t *kernel_page_table = NULL;
     48 static uint32_t curr_kernel_heap_addr = KERNEL_HEAP, total_frames = 0;
     49 
     50 static inline uint32_t
     51 get_frame_block(uint32_t frame)
     52 {
     53 	return frame / 8;
     54 }
     55 
     56 static inline uint32_t
     57 get_frame_offset(uint32_t frame)
     58 {
     59 	return frame % 8;
     60 }
     61 
     62 static inline bool
     63 is_frame_free(uint32_t frame)
     64 {
     65 	return !(framemap[get_frame_block(frame)] & (1 << get_frame_offset(frame)));
     66 }
     67 
     68 static inline uint32_t
     69 get_page_block(uint32_t page)
     70 {
     71 	return page / 8;
     72 }
     73 
     74 static inline uint32_t
     75 get_page_offset(uint32_t page)
     76 {
     77 	return page % 8;
     78 }
     79 
     80 static inline bool
     81 is_page_free(uint32_t page)
     82 {
     83 	return !(pagemap_pa[get_page_block(page)] & (1 << get_page_offset(page)));
     84 }
     85 
     86 static inline void
     87 enable_paging(void)
     88 {
     89 	uint32_t cr0;
     90 
     91 	asm volatile ("movl %%cr0, %0": "=r"(cr0));
     92 	cr0 |= 0x80000000;
     93 	asm volatile ("movl %0, %%cr0":: "r"(cr0));
     94 }
     95 
     96 static inline void
     97 disable_paging(void)
     98 {
     99 	uint32_t cr0;
    100 
    101 	asm volatile ("movl %%cr0, %0": "=r"(cr0));
    102 	cr0 &= 0x7fffffff;
    103 	asm volatile ("movl %0, %%cr0":: "r"(cr0));
    104 }
    105 
    106 static inline uint32_t
    107 get_fault_addr(void)
    108 {
    109 	uint32_t cr2;
    110 
    111 	asm volatile ("movl %%cr2, %0" : "=r"(cr2));
    112 	return cr2;
    113 }
    114 
    115 static uint32_t
    116 get_free_frame(void)
    117 {
    118 	void *page;
    119 
    120 	page = palloc(PAGE_SIZE);
    121 	if (IS_ERR(page)) {
    122 		errno = -PTR_ERR(page);
    123 		return 0;
    124 	}
    125 	return (uint32_t)page;
    126 }
    127 
    128 static inline uint32_t
    129 get_pte_index(uint32_t virt_addr)
    130 {
    131 	return (virt_addr & 0x3ff000) >> 12;
    132 }
    133 
    134 static inline uint32_t
    135 get_pde_index(uint32_t virt_addr)
    136 {
    137 	return (virt_addr & PTABLE_BASE_ADDR) >> 22;
    138 }
    139 
    140 static inline struct page_directory_t *
    141 curr_pdir(void) {
    142 	return (struct page_directory_t *)PDIR_BASE_ADDR;
    143 }
    144 
    145 static inline struct page_table_t *
    146 get_ptable(uint32_t index) {
    147 	return (struct page_table_t *)(PTABLE_BASE_ADDR + index);
    148 }
    149 
    150 void
    151 switch_page_dir(struct page_directory_t *pagedir)
    152 {
    153 	uint32_t phys;
    154 	bool val = false;
    155 
    156 	if (!pagedir)
    157 		panic("invalid pagedir!");
    158 	phys = virt_to_phys((uint32_t)pagedir, &val);
    159 	if (!val)
    160 		panic("virt_to_phys failed!");
    161 	if (phys % PAGE_SIZE)
    162 		panic("address of pagedir [0x%08lx] is not page aligned!",
    163 		      (void *)pagedir);
    164 	asm volatile ("movl %0, %%cr3":: "r"(phys));
    165 	flush_tlb();
    166 }
    167 
    168 void
    169 pagefault_callback(struct trapframe_t *regs)
    170 {
    171 	uint32_t errcode = regs->err_code, addr, page;
    172 	int ret = -ENOMEM;
    173 
    174 	addr = get_fault_addr();
    175 	serial_dump("mm: faulting address is [0x%08lx]", addr);
    176 	if (curr_proc) serial_dump(", task-id is [%lu]", curr_proc->pid);
    177 	switch (errcode & 0x7) {
    178 	case P_FL:
    179 		panic("supervisory process tried to read a page and caused a protection fault");
    180 		break;
    181 	case RW_FL:
    182 		page = get_free_frame();
    183 		if (!page) kerror(errno);
    184 		if ((ret = mmap(addr, page)) < 0)
    185 			kerror(-ret);
    186 		serial_dump(", found free frame at [0x%08lx]\n", page);
    187 		break;
    188 	case RW_FL | P_FL:
    189 		panic("supervisory process tried to write a page and caused a protection fault");
    190 		break;
    191 	case US_FL:
    192 		panic("user process tried to read a non-present page entry");
    193 		break;
    194 	case US_FL | P_FL:
    195 		panic("user process tried to read a page and caused a protection fault");
    196 		break;
    197 	case US_FL | RW_FL:
    198 		panic("user process tried to write to a non-present page entry");
    199 		break;
    200 	case US_FL | RW_FL | P_FL:
    201 		panic("user process tried to write a page and caused a protection fault");
    202 		break;
    203 	default:
    204 		panic("supervisory process tried to read a non-present page entry");
    205 		break;
    206 	}
    207 }
    208 
    209 /* retrieve the memory map from GRUB and initialize the physical memory allocator */
    210 int
    211 init_mm(void)
    212 {
    213 	int mm_phys_addr_valid = 0, ret = -EFAULT;
    214 	unsigned long base_addr, i, framemap_size; /* in frames */
    215 	memory_map_t *memmap;
    216 	uint32_t state;
    217 
    218 	save_flags(&state);
    219 	cli();
    220 	if (!info_boot) {
    221 		ret = -EINVAL;
    222 		goto err;
    223 	}
    224 
    225 	if (!(info_boot->flags & (1 << 6))) {
    226 		ret = -ENOTSUP;
    227 		goto err;
    228 	}
    229 
    230 	mm_phys_addr = (initrd_end + PAGE_SIZE) & ~(PAGE_SIZE - 1);
    231 	base_addr = info_boot->mmap_addr;
    232 	do {
    233 		memmap = (memory_map_t *)base_addr;
    234 		if (memmap->type == 1) {
    235 			if (memmap->base_addr_low >= 0x100000) {
    236 				if (!mm_phys_addr_valid && mm_phys_addr < memmap->base_addr_low
    237 						+ memmap->length_low) {
    238 					mm_phys_addr_valid = 1;
    239 					total_frames = (memmap->base_addr_low + memmap->length_low
    240 							- mm_phys_addr) >> PAGE_SHIFT;
    241 					framemap_size = (total_frames >> 3);
    242 					framemap_size = roundup_pagesize(framemap_size);
    243 					framemap_size >>= PAGE_SHIFT;
    244 					total_frames -= framemap_size;
    245 					if ((total_frames >> 3) > memmap->length_low) {
    246 						ret = -ENOMEM;
    247 						goto err;
    248 					}
    249 					break;
    250 				}
    251 			}
    252 		}
    253 		base_addr += memmap->size + sizeof(uint32_t);
    254 	} while (base_addr < info_boot->mmap_addr + info_boot->mmap_length);
    255 
    256 	if (!mm_phys_addr_valid) {
    257 		ret = -EFAULT;
    258 		goto err;
    259 	}
    260 
    261 	framemap = (uint8_t *)mm_phys_addr;
    262 	for (i = 0; i < total_frames >> 3; ++i)
    263 		framemap[i] = 0;
    264 
    265 	/* first bit in bitmap refers to this frame */
    266 	mm_phys_addr = ((uint32_t)framemap + (total_frames >> 3) + PAGE_SIZE)
    267 		       & ~(PAGE_SIZE - 1);
    268 	serial_dump("mm: # of available frames = %lu, total free mem = %luK\n",
    269 		    total_frames,
    270 		    (total_frames * PAGE_SIZE) >> 10);
    271 	/* nullify all available memory, just in case */
    272 	for (i = 0; i < (total_frames - 1) >> 3; ++i)
    273 		memset((void *)(mm_phys_addr + (i << PAGE_SHIFT)), 0, PAGE_SIZE);
    274 	load_flags(state);
    275 	return 0;
    276 err:
    277 	load_flags(state);
    278 	return ret;
    279 }
    280 
    281 /* initialize the virtual memory manager */
    282 int
    283 init_vm(void)
    284 {
    285 	int ret = -ENOMEM;
    286 	uint32_t i, state, addr, virt_addr;
    287 	struct page_directory_t *tmp;
    288 	struct page_table_t *ptable;
    289 
    290 	save_flags(&state);
    291 	cli();
    292 	kernel_page_dir = palloc(sizeof(*kernel_page_dir));
    293 	if (IS_ERR(kernel_page_dir)) {
    294 		ret = PTR_ERR(kernel_page_dir);
    295 		goto error;
    296 	}
    297 	memset(kernel_page_dir, 0, sizeof(*kernel_page_dir));
    298 
    299 	kernel_page_table = palloc(sizeof(*kernel_page_table));
    300 	if (IS_ERR(kernel_page_table)) {
    301 		ret = PTR_ERR(kernel_page_table);
    302 		goto error;
    303 	}
    304 	memset(kernel_page_table, 0, sizeof(*kernel_page_table));
    305 
    306 	addr = ((uint32_t)kernel_page_table > (uint32_t)kernel_page_dir) ?
    307 	       (uint32_t)kernel_page_table : (uint32_t)kernel_page_dir;
    308 	mm_phys_addr_end = roundup_pagesize(addr + PAGE_SIZE);
    309 
    310 	for (i = 0; i < 1024; ++i) {
    311 		SET_PTE(kernel_page_table->pages[i],
    312 			(i < (mm_phys_addr_end >> PAGE_SHIFT)) ? 1 : 0, 1, 0, i);
    313 	}
    314 
    315 	SET_PDE(kernel_page_dir->ptables[0], 1, 1, 0,
    316 		(uint32_t)kernel_page_table >> PAGE_SHIFT);
    317 	SET_PDE(kernel_page_dir->ptables[1023], 1, 1, 0,
    318 		(uint32_t)kernel_page_dir->ptables >> PAGE_SHIFT);
    319 	register_isr_handler(14, pagefault_callback);
    320 	asm volatile ("movl %0, %%cr3":: "r"(kernel_page_dir->ptables));
    321 	enable_paging();
    322 
    323 	/* preallocate the page tables for the kernel heap */
    324 	tmp = curr_pdir();
    325 	for (i = 0; i < (KERNEL_HEAP_END - KERNEL_HEAP) >> PAGE_SHIFT; ++i) {
    326 		virt_addr = get_pde_index(KERNEL_HEAP + (i << PAGE_SHIFT));
    327 		ptable = get_ptable(virt_addr << PAGE_SHIFT);
    328 		if (!tmp->ptables[virt_addr].present) {
    329 			addr = (uint32_t)palloc(sizeof(struct page_table_t));
    330 			if (IS_ERR((void *)addr)) {
    331 				ret = PTR_ERR((void *)addr);
    332 				goto error;
    333 			}
    334 			SET_PDE(tmp->ptables[virt_addr], 1, 1, 0, addr >> PAGE_SHIFT);
    335 			memset(ptable, 0, sizeof(*ptable));
    336 		}
    337 	}
    338 
    339 	init_heap();
    340 	if ((ret = init_page_pa_allocator()) < 0)
    341 		goto error;
    342 	new_kernel_pdir = clone_page_dir();
    343 	if (IS_ERR(new_kernel_pdir)) {
    344 		ret = PTR_ERR(new_kernel_pdir);
    345 		goto error;
    346 	}
    347 	switch_page_dir(new_kernel_pdir);
    348 	load_flags(state);
    349 	return 0;
    350 error:
    351 	load_flags(state);
    352 	return ret;
    353 }
    354 
    355 void *
    356 sbrk(intptr_t incr)
    357 {
    358 	uint32_t page_num;
    359 	uint32_t kernel_heap_addr_old = curr_kernel_heap_addr, state;
    360 	int ret = -ENOMEM;
    361 
    362 	if (incr < 0)
    363 		panic("%ld negative offset given!", incr);
    364 
    365 	incr = roundup_pagesize(incr);
    366 	page_num = incr / PAGE_SIZE;
    367 	save_flags(&state);
    368 	cli();
    369 	if ((curr_kernel_heap_addr + incr >= KERNEL_HEAP_END)
    370 			|| (ret = mmap_range(curr_kernel_heap_addr,
    371 					     curr_kernel_heap_addr + incr)) < 0)
    372 		goto err;
    373 	curr_kernel_heap_addr += incr;
    374 	load_flags(state);
    375 	return (void *)kernel_heap_addr_old;
    376 err:
    377 	load_flags(state);
    378 	return ERR_PTR(ret);
    379 }
    380 
    381 static struct page_directory_t *
    382 create_pdir(uint32_t *addr_phys) {
    383 	struct page_directory_t *new_pdir;
    384 
    385 	if (addr_phys) {
    386 		new_pdir = alloc_page_pa(addr_phys, ALLOC_PHYS);
    387 		if (IS_ERR(new_pdir))
    388 			return new_pdir;
    389 		memset(new_pdir, 0, PAGE_SIZE);
    390 		SET_PDE(new_pdir->ptables[1023], 1, 1, 0, *addr_phys >> PAGE_SHIFT);
    391 		return new_pdir;
    392 	}
    393 	return ERR_PTR(-EINVAL);
    394 }
    395 
    396 static int
    397 destroy_pdir(struct page_directory_t *pdir, uint32_t *phys, enum page_pa_fl flags)
    398 {
    399 	if (!pdir || !phys)
    400 		return -EINVAL;
    401 	free_page_pa(pdir, phys, flags);
    402 	return 0;
    403 }
    404 
    405 static int
    406 clone_page_table(struct page_table_t *ptable, uint32_t *phys)
    407 {
    408 	uint32_t src_frame_addr_phys, dst_frame_addr_phys, frame, i;
    409 	uint32_t state;
    410 	void *src_frame_addr_virt, *dst_frame_addr_virt;
    411 	struct page_table_t *new_ptable;
    412 	int ret;
    413 
    414 	if (!ptable || !phys)
    415 		return -EINVAL;
    416 
    417 	save_flags(&state);
    418 	cli();
    419 	new_ptable = alloc_page_pa(phys, ALLOC_PHYS);
    420 	if (IS_ERR(new_ptable)) {
    421 		ret = PTR_ERR(new_ptable);
    422 		goto out;
    423 	}
    424 	memset(new_ptable, 0, PAGE_SIZE);
    425 	for (i = 0; i < 1024; ++i) {
    426 		if (!ptable->pages[i].present)
    427 			continue;
    428 		frame = (uint32_t)palloc(PAGE_SIZE);
    429 		if (!IS_ERR((void *)frame)) {
    430 			*(uint32_t *)&new_ptable->pages[i] = *(uint32_t *) & ptable->pages[i];
    431 			new_ptable->pages[i].frame = frame >> PAGE_SHIFT;
    432 			src_frame_addr_phys = ptable->pages[i].frame << PAGE_SHIFT;
    433 			dst_frame_addr_phys = new_ptable->pages[i].frame << PAGE_SHIFT;
    434 			src_frame_addr_virt = alloc_page_pa(&src_frame_addr_phys, DONT_ALLOC_PHYS);
    435 			if (!IS_ERR(src_frame_addr_virt)) {
    436 				dst_frame_addr_virt = alloc_page_pa(&dst_frame_addr_phys, DONT_ALLOC_PHYS);
    437 				if (!IS_ERR(dst_frame_addr_virt)) {
    438 					memcpy(dst_frame_addr_virt, src_frame_addr_virt, PAGE_SIZE);
    439 					free_page_pa(
    440 						src_frame_addr_virt,
    441 						&src_frame_addr_phys,
    442 						DONT_FREE_PHYS
    443 					);
    444 					free_page_pa(
    445 						dst_frame_addr_virt,
    446 						&dst_frame_addr_phys,
    447 						DONT_FREE_PHYS
    448 					);
    449 				} else { ret = PTR_ERR(dst_frame_addr_virt); goto out3; }
    450 			} else { ret = PTR_ERR( src_frame_addr_virt); goto out2; }
    451 		} else { ret = PTR_ERR((void *)frame); goto out1; }
    452 	}
    453 	free_page_pa(new_ptable, phys, DONT_FREE_PHYS);
    454 	load_flags(state);
    455 	return 0;
    456 out3:
    457 	free_page_pa(src_frame_addr_virt, &src_frame_addr_phys, FREE_PHYS);
    458 out2:
    459 	pfree((void *)frame, PAGE_SIZE);
    460 out1:
    461 	free_page_pa(new_ptable, phys, FREE_PHYS);
    462 out:
    463 	load_flags(state);
    464 	return ret;
    465 }
    466 
    467 struct page_directory_t *
    468 clone_page_dir(void) {
    469 	uint32_t new_pdir_addr_phys, state, i;
    470 	uint32_t new_ptable_addr_phys;
    471 	struct page_directory_t *new_pdir, *pdir;
    472 	int ret;
    473 
    474 	save_flags(&state);
    475 	cli();
    476 	new_pdir = create_pdir(&new_pdir_addr_phys);
    477 	if (IS_ERR(new_pdir)) {
    478 		ret = PTR_ERR(new_pdir);
    479 		goto out;
    480 	}
    481 	pdir = curr_pdir();
    482 	for (i = 0; i < 1023; ++i) { /* exclude recursive mapping */
    483 		if (!pdir->ptables[i].present) continue;
    484 		if (*(uint32_t *)&kernel_page_dir->ptables[i]
    485 				== *(uint32_t *)&pdir->ptables[i]) {
    486 			*(uint32_t *)&new_pdir->ptables[i] = *(uint32_t *) & pdir->ptables[i];
    487 			continue;
    488 		}
    489 		if ((ret = clone_page_table(get_ptable(i << PAGE_SHIFT),
    490 					    &new_ptable_addr_phys)) < 0)
    491 			goto out1;
    492 		SET_PDE(new_pdir->ptables[i], 1, 1, 0,
    493 			new_ptable_addr_phys >> PAGE_SHIFT);
    494 	}
    495 	load_flags(state);
    496 	return new_pdir;
    497 out1:
    498 	destroy_pdir(new_pdir, &new_pdir_addr_phys, FREE_PHYS);
    499 out:
    500 	load_flags(state);
    501 	return ERR_PTR(ret);
    502 }
    503 
    504 void
    505 flush_tlb(void)
    506 {
    507 	uint32_t val, state;
    508 
    509 	save_flags(&state);
    510 	cli();
    511 	asm volatile ("movl %%cr3, %0" : "=r" (val));
    512 	asm volatile ("movl %0, %%cr3" :: "r" (val));
    513 	load_flags(state);
    514 }
    515 
    516 uint32_t
    517 virt_to_phys(uint32_t virt_addr, bool *val)
    518 {
    519 	struct page_directory_t *curr_page_dir;
    520 	struct page_table_t *ptable;
    521 	uint32_t state, ret;
    522 
    523 	if (!val)
    524 		panic("arg 'val' appears to be NULL!");
    525 
    526 	save_flags(&state);
    527 	cli();
    528 	*val = true;
    529 	curr_page_dir = curr_pdir();
    530 	ptable = get_ptable(get_pde_index(virt_addr) << PAGE_SHIFT);
    531 	if (!curr_page_dir->ptables[get_pde_index(virt_addr)].present)
    532 		*val = false;
    533 	if (!ptable->pages[get_pte_index(virt_addr)].present)
    534 		*val = false;
    535 	ret = (ptable->pages[get_pte_index(virt_addr)].frame << PAGE_SHIFT)
    536 	      + (virt_addr & 0xfff);
    537 	load_flags(state);
    538 	return ret;
    539 }
    540 
    541 int
    542 mmap(uint32_t virt_addr, uint32_t phys_addr)
    543 {
    544 	struct page_directory_t *curr_page_dir;
    545 	struct page_table_t *ptable;
    546 	uint32_t pt_addr, state;
    547 	int ret = -ENOMEM;
    548 
    549 	save_flags(&state);
    550 	cli();
    551 	curr_page_dir = curr_pdir();
    552 	ptable = get_ptable(get_pde_index(virt_addr) << PAGE_SHIFT);
    553 	if (!curr_page_dir->ptables[get_pde_index(virt_addr)].present) {
    554 		pt_addr = get_free_frame();
    555 		if (!pt_addr) goto err;
    556 		SET_PDE(curr_page_dir->ptables[get_pde_index(virt_addr)],
    557 			1,
    558 			1,
    559 			0,
    560 			pt_addr >> PAGE_SHIFT);
    561 	}
    562 	if (ptable->pages[get_pte_index(virt_addr)].present)
    563 		panic("duplicate mapping: 0x%08lx -> 0x%08lx", virt_addr, phys_addr);
    564 	SET_PTE(ptable->pages[get_pte_index(virt_addr)],
    565 		1,
    566 		1,
    567 		0,
    568 		phys_addr >> PAGE_SHIFT);
    569 	flush_tlb();
    570 	load_flags(state);
    571 	return 0;
    572 err:
    573 	load_flags(state);
    574 	return ret;
    575 }
    576 
    577 int
    578 unmap(uint32_t virt_addr)
    579 {
    580 	int ret = -EFAULT;
    581 	struct page_directory_t *curr_page_dir;
    582 	struct page_table_t *ptable;
    583 	uint32_t state;
    584 	uint32_t i;
    585 
    586 	save_flags(&state);
    587 	cli();
    588 	curr_page_dir = curr_pdir();
    589 	if (!curr_page_dir->ptables[get_pde_index(virt_addr)].present)
    590 		goto err;
    591 	ptable = get_ptable(get_pde_index(virt_addr) << PAGE_SHIFT);
    592 	if (!ptable->pages[get_pte_index(virt_addr)].present)
    593 		goto err;
    594 	ptable->pages[get_pte_index(virt_addr)].present = 0;
    595 	for (i = 0; i < 1024 && !ptable->pages[i].present; ++i)
    596 		;
    597 	if (i == 1024) {
    598 		curr_page_dir->ptables[get_pde_index(virt_addr)].present = 0;
    599 		pfree((void *)(curr_page_dir->ptables[get_pde_index(virt_addr)].pt_addr
    600 			       << PAGE_SHIFT),
    601 		      PAGE_SIZE);
    602 	}
    603 	flush_tlb();
    604 	load_flags(state);
    605 	return 0;
    606 err:
    607 	load_flags(state);
    608 	return ret;
    609 }
    610 
    611 int
    612 mmap_range(uint32_t virt_addr_start, uint32_t virt_addr_end)
    613 {
    614 	uint32_t state, free_page;
    615 	uint32_t s;
    616 	int ret;
    617 
    618 	save_flags(&state);
    619 	cli();
    620 	s = virt_addr_start;
    621 	virt_addr_start &= ~(PAGE_SIZE - 1);
    622 	virt_addr_end = roundup_pagesize(virt_addr_end);
    623 	for (; virt_addr_start < virt_addr_end; virt_addr_start += PAGE_SIZE) {
    624 		free_page = get_free_frame();
    625 		if (free_page) {
    626 			if ((ret = mmap(virt_addr_start, free_page)) < 0) {
    627 				load_flags(state);
    628 				return ret;
    629 			}
    630 			continue;
    631 		}
    632 		if (s < virt_addr_start) {
    633 			ret = unmap_range(s, virt_addr_start);
    634 			if (ret < 0)
    635 				panic("can't unmap range!");
    636 		}
    637 		load_flags(state);
    638 		return -ENOMEM;
    639 	}
    640 	load_flags(state);
    641 	return 0;
    642 }
    643 
    644 int
    645 unmap_range(uint32_t virt_addr_start, uint32_t virt_addr_end)
    646 {
    647 	uint32_t state;
    648 	int ret;
    649 
    650 	save_flags(&state);
    651 	cli();
    652 	virt_addr_start &= ~(PAGE_SIZE - 1);
    653 	virt_addr_end = roundup_pagesize(virt_addr_end);
    654 	for (; virt_addr_start < virt_addr_end; virt_addr_start += PAGE_SIZE) {
    655 		if ((ret = unmap(virt_addr_start)) < 0) {
    656 			load_flags(state);
    657 			return ret;
    658 		}
    659 	}
    660 	load_flags(state);
    661 	return 0;
    662 }
    663 
    664 void
    665 dump_mappings(void)
    666 {
    667 	uint32_t i, j, nr = 0, state;
    668 	uint32_t addr, paddr;
    669 	struct page_directory_t *pdir;
    670 	struct page_table_t *ptable;
    671 	bool valid_map;
    672 
    673 	save_flags(&state);
    674 	cli();
    675 	serial_dump("Mmap dump for [%s:%d]\n", curr_proc->name, curr_proc->pid);
    676 	printf("Mmap dump for [%s:%d]\n", curr_proc->name, curr_proc->pid);
    677 	pdir = curr_pdir();
    678 	for (i = 1; i < 1023; ++i) {
    679 		if (!pdir->ptables[i].present)
    680 			continue;
    681 		ptable = get_ptable(i << PAGE_SHIFT);
    682 		for (j = 0; j < 1024; ++j) {
    683 			if (!ptable->pages[j].present)
    684 				continue;
    685 			addr = (i << 22) | (j << 12);
    686 			paddr = virt_to_phys(addr, &valid_map);
    687 			if (!valid_map)
    688 				continue;
    689 			serial_dump("[%lu] 0x%08lx -> 0x%08lx\n", nr++, addr, paddr);
    690 			printf("[%lu] 0x%08lx -> 0x%08lx\n", nr++, addr, paddr);
    691 		}
    692 	}
    693 	load_flags(state);
    694 }
    695 
    696 void
    697 remove_proc_mappings(void)
    698 {
    699 	uint32_t state, i, j;
    700 	uint32_t addr, paddr, tmp;
    701 	struct page_directory_t *pdir;
    702 	struct page_table_t *ptable;
    703 	bool valid_map;
    704 	struct list_head *iter, *tmpreg;
    705 	struct mmap_region *reg;
    706 
    707 	save_flags(&state);
    708 	cli();
    709 	list_for_each_safe(iter, tmpreg, &curr_proc->l_regions) {
    710 		reg = list_entry(iter, struct mmap_region, l_region);
    711 		if (unmap_range(reg->base_addr, reg->base_addr + reg->size) < 0)
    712 			panic("failed to unmap pages!");
    713 		list_del(iter);
    714 		kfree(reg);
    715 	}
    716 	pdir = curr_pdir();
    717 	for (i = 2; i < 1023; ++i) {
    718 		if (pdir->ptables[i].present) {
    719 			if (*(uint32_t *)&curr_proc->parent->page_dir->ptables[i]
    720 					!= *(uint32_t *)&pdir->ptables[i]) {
    721 				ptable = get_ptable(i << PAGE_SHIFT);
    722 				for (j = 0; j < 1024; ++j)
    723 					if (ptable->pages[j].present)
    724 						pfree((void *)(ptable->pages[j].frame << PAGE_SHIFT),
    725 						      PAGE_SIZE);
    726 				paddr = pdir->ptables[i].pt_addr << PAGE_SHIFT;
    727 				pfree((void *)paddr, PAGE_SIZE);
    728 			}
    729 		}
    730 	}
    731 	paddr = read_cr3();
    732 	for (i = 0; i < PAGE_HEAP_SIZE >> PAGE_SHIFT; i++) {
    733 		if (!is_page_free(i)) {
    734 			addr = PAGE_HEAP + (i << PAGE_SHIFT);
    735 			tmp = virt_to_phys(addr, &valid_map);
    736 			if (valid_map && paddr == tmp) {
    737 				free_page_pa((void *)addr,  &paddr, FREE_PHYS);
    738 				break;
    739 			}
    740 		}
    741 	}
    742 	load_flags(state);
    743 }
    744 
    745 /* a bitmap based physical memory allocator */
    746 uint32_t
    747 get_num_free_frames(void)
    748 {
    749 	uint32_t i, nfree = 0;
    750 
    751 	for (i = 0; i < total_frames; ++i)
    752 		if (is_frame_free(i))
    753 			++nfree;
    754 	return nfree;
    755 }
    756 
    757 void *
    758 palloc(size_t size)
    759 {
    760 	uint32_t i, j, frames_alloc, state;
    761 
    762 	assert(framemap);
    763 	if (!size)
    764 		return ERR_PTR(-EINVAL);
    765 
    766 	size = roundup_pagesize(size);
    767 	frames_alloc = size >> PAGE_SHIFT;
    768 	if (frames_alloc > total_frames - 1)
    769 		return ERR_PTR(-ENOMEM);
    770 
    771 	save_flags(&state);
    772 	cli();
    773 	for (i = 0; i < total_frames; ++i) {
    774 		if (is_frame_free(i)) {
    775 			for (j = 0; j < frames_alloc; ++j)
    776 				if (!is_frame_free(i + j))
    777 					break;
    778 			if (j == frames_alloc) {
    779 				for (j = 0; j < frames_alloc; ++j)
    780 					framemap[get_frame_block(i + j)] |=
    781 						(1 << get_frame_offset(i + j));
    782 				load_flags(state);
    783 				return (void *)(mm_phys_addr + (i << PAGE_SHIFT));
    784 			}
    785 		}
    786 	}
    787 	load_flags(state);
    788 	return ERR_PTR(-ENOMEM);
    789 }
    790 
    791 void
    792 pfree(void *addr, size_t size)
    793 {
    794 	uint32_t framestofree, base_addr, i, state;
    795 
    796 	if (!addr || !size)
    797 		return;
    798 
    799 	size = roundup_pagesize(size);
    800 	framestofree = size >> PAGE_SHIFT;
    801 	base_addr = (uint32_t)addr;
    802 	base_addr -= mm_phys_addr;
    803 	base_addr >>= PAGE_SHIFT;
    804 
    805 	save_flags(&state);
    806 	cli();
    807 	for (i = 0; i < framestofree; ++i)
    808 		framemap[get_frame_block(base_addr + i)] &=
    809 			~(1 << get_frame_offset(base_addr + i));
    810 	load_flags(state);
    811 }
    812 
    813 int
    814 init_page_pa_allocator(void)
    815 {
    816 	size_t len = PAGE_HEAP_SIZE >> PAGE_SHIFT;
    817 
    818 	pagemap_pa = kmalloc(len);
    819 	if (IS_ERR(pagemap_pa))
    820 		return PTR_ERR(pagemap_pa);
    821 	memset(pagemap_pa, 0, len);
    822 	return 0;
    823 }
    824 
    825 void *
    826 alloc_page_pa(uint32_t *frame, enum page_pa_fl flags)
    827 {
    828 	uint32_t i, addr, state;
    829 	int ret;
    830 
    831 	if (!frame)
    832 		return ERR_PTR(-EINVAL);
    833 
    834 	save_flags(&state);
    835 	cli();
    836 	for (i = 0; i < PAGE_HEAP_SIZE >> PAGE_SHIFT; ++i) {
    837 		if (is_page_free(i)) {
    838 			addr = (flags == ALLOC_PHYS) ? get_free_frame() : *frame;
    839 			if (addr) {
    840 				if ((ret = mmap(PAGE_HEAP
    841 						+ (i << PAGE_SHIFT), addr)) < 0) {
    842 					load_flags(state);
    843 					return ERR_PTR(ret);
    844 				}
    845 				pagemap_pa[get_page_block(i)] |= (1 << get_page_offset(i));
    846 				*frame = addr;
    847 				if (flags == ALLOC_PHYS)
    848 					memset((void *)(PAGE_HEAP + (i << PAGE_SHIFT)), 0,
    849 					       PAGE_SIZE);
    850 				load_flags(state);
    851 				return (void *)(PAGE_HEAP + (i << PAGE_SHIFT));
    852 			}
    853 			break;
    854 		}
    855 	}
    856 	load_flags(state);
    857 	return ERR_PTR((flags == ALLOC_PHYS) ? -ENOMEM : -EINVAL);
    858 }
    859 
    860 int
    861 free_page_pa(void *page, uint32_t *frame, enum page_pa_fl flags)
    862 {
    863 	uint32_t base_addr, state;
    864 	int ret;
    865 
    866 	if (!page || !frame)
    867 		return -EINVAL;
    868 
    869 	save_flags(&state);
    870 	cli();
    871 	base_addr = (uint32_t)page;
    872 	if ((ret = unmap(base_addr)) < 0) {
    873 		load_flags(state);
    874 		return ret;
    875 	}
    876 	base_addr -= PAGE_HEAP;
    877 	base_addr >>= PAGE_SHIFT;
    878 	pagemap_pa[get_page_block(base_addr)] &=
    879 		~(1 << get_page_offset(base_addr));
    880 	if (flags == FREE_PHYS)
    881 		pfree((void *)*frame, PAGE_SIZE);
    882 	load_flags(state);
    883 	return 0;
    884 }
    885