voron

experimental ARM OS
git clone git://git.2f30.org/voron
Log | Files | Refs | README | LICENSE

alloc.c (4595B)


      1 #include <kernel.h>
      2 #include <list.h>
      3 #include <spinlock.h>
      4 #include <mmu.h>
      5 #include <alloc.h>
      6 
      7 struct mem_chunk {
      8 	union {
      9 		void *start;
     10 		uintptr_t start_a;
     11 	};
     12 	size_t size;
     13 	struct list_head list;
     14 };
     15 
     16 #define get_mem_chunk(addr) (((struct mem_chunk*)(addr)) - 1)
     17 
     18 static struct list_head freelist;
     19 static struct list_head alloclist;
     20 static spinlock_t freelist_lock = SPINLOCK_INIT;
     21 static spinlock_t alloclist_lock = SPINLOCK_INIT;
     22 static void *heap_last;
     23 
     24 extern void *_kernel_heap_start;
     25 extern void *_kernel_heap_end;
     26 
     27 static size_t
     28 roundup(size_t size)
     29 {
     30 	size_t ret;
     31 
     32 	if (size <= 16)
     33 		return 16;
     34 
     35 	/* get the number of leading zeros */
     36 	asm volatile("clz %0, %1" : "=r" (ret) : "r" (size));
     37 
     38 	/* round up to the next 'power of 2' number */
     39 	ret = 1 << (32 - ret);
     40 
     41 	if (size != (ret >> 1))
     42 		return ret;
     43 	else
     44 		return ret >> 1;
     45 }
     46 
     47 void *
     48 kmalloc(size_t size)
     49 {
     50 	int ret;
     51 	uint_t npages;
     52 	uintptr_t heap_last_a, tmp_addr;
     53 	struct mem_chunk *memc, *tmp_mc;
     54 	struct list_head *pos;
     55 
     56 	size = roundup(size);
     57 	heap_last_a = (uintptr_t)heap_last;
     58 
     59 	spinlock_lock(&freelist_lock);
     60 	list_for_each(pos, &freelist) {
     61 		memc = list_entry(pos, struct mem_chunk, list);
     62 		if (memc->size == size) {
     63 			list_del(&memc->list);
     64 			spinlock_unlock(&freelist_lock);
     65 			spinlock_lock(&alloclist_lock);
     66 			list_add(&memc->list, &alloclist);
     67 			spinlock_unlock(&alloclist_lock);
     68 			return memc->start;
     69 		}
     70 	}
     71 	spinlock_unlock(&freelist_lock);
     72 
     73 	npages = PAGE_ALIGN(sizeof(struct mem_chunk) + size) >> PAGE_SHIFT;
     74 
     75 	if (heap_last_a + npages * PAGE_SIZE > (uintptr_t)&_kernel_heap_end)
     76 		return NULL;
     77 
     78 	ret = kmmap(heap_last, npages, MMU_AP_RW_NONE);
     79 	if (ret < 0)
     80 		return NULL;
     81 
     82 	memc = heap_last;
     83 	memc->start_a = sizeof(struct mem_chunk) + heap_last_a;
     84 	memc->size = size;
     85 
     86 	spinlock_lock(&alloclist_lock);
     87 	list_add(&memc->list, &alloclist);
     88 	spinlock_unlock(&alloclist_lock);
     89 
     90 	heap_last_a += npages * PAGE_SIZE;
     91 	heap_last = (void*)heap_last_a;
     92 
     93 	tmp_addr = memc->start_a + memc->size;
     94 	while (tmp_addr + sizeof(struct mem_chunk) + memc->size < heap_last_a) {
     95 		tmp_mc = (void*)tmp_addr;
     96 		tmp_mc->start_a = sizeof(struct mem_chunk) + tmp_addr;
     97 		tmp_mc->size = memc->size;
     98 		tmp_addr += sizeof(struct mem_chunk) + tmp_mc->size;
     99 		spinlock_lock(&freelist_lock);
    100 		list_add(&tmp_mc->list, &freelist);
    101 		spinlock_unlock(&freelist_lock);
    102 	}
    103 
    104 	return memc->start;
    105 }
    106 
    107 void
    108 kfree(void *addr)
    109 {
    110 	struct mem_chunk *memc, *tmp;
    111 	struct list_head *pos;
    112 
    113 	memc = get_mem_chunk(addr);
    114 
    115 	spinlock_lock(&alloclist_lock);
    116 	list_for_each(pos, &alloclist) {
    117 		tmp = list_entry(pos, struct mem_chunk, list);
    118 		if (tmp == memc) {
    119 			list_del(&memc->list);
    120 			spinlock_lock(&freelist_lock);
    121 			list_add(&memc->list, &freelist);
    122 			spinlock_unlock(&freelist_lock);
    123 			break;
    124 		}
    125 	}
    126 	spinlock_unlock(&alloclist_lock);
    127 }
    128 
    129 void *
    130 krealloc(void *addr, size_t size)
    131 {
    132 	void *p;
    133 	struct list_head *pos;
    134 	struct mem_chunk *memc = NULL;
    135 
    136 	if (!size && addr) {
    137 		kfree(addr);
    138 		return NULL;
    139 	}
    140 
    141 	/* If addr is NULL, allocate new space */
    142 	if (!addr)
    143 		return kmalloc(size);
    144 
    145 	/* Lookup for the old base pointer `addr' if it is part of a
    146 	 * previous allocation */
    147 	spinlock_lock(&alloclist_lock);
    148 	list_for_each(pos, &alloclist) {
    149 		memc = list_entry(pos, struct mem_chunk, list);
    150 		if (addr == memc->start) {
    151 			/* Is the new rounded size the same of the current one?
    152 			 * If so just return the old `addr' */
    153 			if (roundup(size) == memc->size) {
    154 				spinlock_unlock(&alloclist_lock);
    155 				return addr;
    156 			}
    157 			break;
    158 		}
    159 	}
    160 	spinlock_unlock(&alloclist_lock);
    161 
    162 	/* Allocate some space, copy over the old contents and then
    163 	 * free them */
    164 	p = kmalloc(size);
    165 	if (!p)
    166 		return NULL;
    167 
    168 	if (memc->size < size)
    169 		memcpy(p, addr, memc->size);
    170 	else
    171 		memcpy(p, addr, size);
    172 	kfree(addr);
    173 
    174 	return p;
    175 }
    176 
    177 void *
    178 kcalloc(size_t nmemb, size_t size)
    179 {
    180 	void *p;
    181 
    182 	p = kmalloc(size * nmemb);
    183 	if (!p)
    184 		return NULL;
    185 	memset(p, 0, size * nmemb);
    186 	return p;
    187 }
    188 
    189 void
    190 kdump(void)
    191 {
    192 	struct list_head *pos, *tmp;
    193 	struct mem_chunk *memc;
    194 
    195 	kprintf("alloc list\n");
    196 	list_for_each_safe(pos, tmp, &alloclist) {
    197 		memc = list_entry(pos, struct mem_chunk, list);
    198 		kprintf("%p (phys: %p): %p %p %d\n", pos, virt_to_phys(pos),
    199 			memc, memc->start, memc->size);
    200 	}
    201 
    202 	kprintf("\nfree list\n");
    203 	list_for_each_safe(pos, tmp, &freelist) {
    204 		memc = list_entry(pos, struct mem_chunk, list);
    205 		kprintf("%p (phys: %p): %p %p %d\n", pos, virt_to_phys(pos),
    206 			memc, memc->start, memc->size);
    207 	}
    208 }
    209 
    210 __attribute__ ((__constructor__))
    211 static void
    212 alloc_init(void)
    213 {
    214 	INIT_LIST_HEAD(&freelist);
    215 	INIT_LIST_HEAD(&alloclist);
    216 	heap_last = &_kernel_heap_start;
    217 }