mmu.c (4484B)
1 #include <kernel.h> 2 #include <mmu.h> 3 #include <mm.h> 4 5 static u32 mmu_ttb[4096] __attribute__((__aligned__ (16 * 1024))); 6 static u32 l2[4096][256] __attribute__((__aligned__ (1024))); 7 8 void 9 mmu_init(void) 10 { 11 int i; 12 13 for (i = 0; i < 4096; i++) 14 mmu_ttb[i] = L1_FAULT; 15 16 asm volatile ( 17 /* invalidate TLB 18 * v1 is ignored */ 19 "mcr p15, 0, v1, c8, c7, 0 \n\t" 20 /* completes the TLB invalidation */ 21 "dsb \n\t" 22 /* set TTBCR */ 23 "mov v1, #0 \n\t" 24 "mcr p15, 0, v1, c2, c0, 2 \n\t" 25 /* set TTBR0 */ 26 "ldr v1, =mmu_ttb \n\t" 27 "mcr p15, 0, v1, c2, c0, 0 \n\t" 28 /* set DACR */ 29 "ldr v1, =0x55555555 \n\t" 30 "mcr p15, 0, v1, c3, c0, 0 \n\t" 31 /* make sure that SCTLR.AFE is disabled */ 32 "mrc p15, 0, v1, c1, c0, 0 \n\t" 33 "bic v1, v1, #(1 << 29) \n\t" 34 "mcr p15, 0, v1, c1, c0, 0 \n\t" 35 /* invalidate TLB */ 36 "mcr p15, 0, v1, c8, c7, 0 \n\t" 37 /* completes the TLB invalidation */ 38 "dsb \n\t" 39 : : : "v1", "memory" 40 ); 41 } 42 43 void 44 mmu_enable(void) 45 { 46 asm volatile ( 47 /* invalidate TLB */ 48 "mcr p15, 0, v1, c8, c7, 0 \n\t" 49 /* completes the TLB invalidation */ 50 "dsb \n\t" 51 /* enable MMU */ 52 "mrc p15, 0, v1, c1, c0, 0 \n\t" 53 "orr v1, v1, #1 \n\t" 54 "mcr p15, 0, v1, c1, c0, 0 \n\t" 55 : : : "v1" 56 ); 57 } 58 59 void 60 mmu_disable(void) 61 { 62 asm volatile ( 63 /* disable MMU */ 64 "mrc p15, 0, v1, c1, c0, 0 \n\t" 65 "bic v1, v1, #1 \n\t" 66 "mcr p15, 0, v1, c1, c0, 0 \n\t" 67 : : : "v1" 68 ); 69 } 70 71 uintptr_t 72 virt_to_phys(void *virt) 73 { 74 uint_t pte_idx, pde_idx; 75 u32 *pde; 76 uintptr_t virt_a; 77 78 virt_a = (uintptr_t)virt; 79 pde_idx = virt_a >> 20; 80 81 switch (mmu_ttb[pde_idx] & L1_TYPE_MASK) { 82 case 1: /* page table */ 83 pde = (u32*)(mmu_ttb[pde_idx] & ~0x3ff); 84 pte_idx = (virt_a & 0xff000) >> 12; 85 if (pde[pte_idx] & L2_TYPE_MASK) 86 return ((pde[pte_idx] & ~0xfff) | (virt_a & 0xfff)); 87 else 88 return 0; /* not mapped */ 89 case 2: /* section */ 90 return ((mmu_ttb[pde_idx] & ~0xfffff) | (virt_a & 0xfffff)); 91 case 0: /* not mapped */ 92 default: 93 return 0; 94 } 95 96 /* not mapped */ 97 return 0; 98 } 99 100 int 101 virt_is_mapped(void *virt) 102 { 103 uint_t pte_idx, pde_idx; 104 u32 *pde; 105 uintptr_t virt_a; 106 107 virt_a = (uintptr_t)virt; 108 pde_idx = virt_a >> 20; 109 110 switch (mmu_ttb[pde_idx] & L1_TYPE_MASK) { 111 case 1: /* page table */ 112 pde = (u32*)(mmu_ttb[pde_idx] & ~0x3ff); 113 pte_idx = (virt_a & 0xff000) >> 12; 114 if (pde[pte_idx] & L2_TYPE_MASK) 115 return 1; 116 else 117 return 0; /* fault */ 118 case 2: /* section */ 119 return 1; 120 case 0: /* fault */ 121 default: 122 return 0; 123 } 124 125 return 0; 126 } 127 128 /* map physical memory to virtual memory */ 129 int 130 mmu_map_page(void *phys, void *virt, uint_t npages, mmu_ap_t perms) 131 { 132 u32 pte, pte_perms; 133 u32 *pde; 134 uintptr_t phys_a, virt_a; 135 uint_t i, pte_idx, pde_idx; 136 137 phys_a = (uintptr_t)phys; 138 virt_a = (uintptr_t)virt; 139 140 if (npages == 0 || phys_a & (PAGE_SIZE - 1) || virt_a & (PAGE_SIZE - 1)) 141 return -EINVAL; 142 143 switch (perms) { 144 case MMU_AP_RW_RW: 145 /* AP[2:0] = 011 */ 146 pte_perms = PT_AP0 | PT_AP1; 147 break; 148 case MMU_AP_RW_RO: 149 /* AP[2:0] = 010 */ 150 pte_perms = PT_AP1; 151 break; 152 case MMU_AP_RO_RO: 153 /* AP[2:0] = 111 */ 154 pte_perms = PT_AP2 | PT_AP1 | PT_AP0; 155 break; 156 case MMU_AP_RW_NONE: 157 /* AP[2:0] = 001 */ 158 pte_perms = PT_AP0; 159 break; 160 case MMU_AP_RO_NONE: 161 /* AP[2:0] = 101 */ 162 pte_perms = PT_AP2 | PT_AP0; 163 break; 164 case MMU_AP_NONE_NONE: 165 /* AP[2:0] = 000 */ 166 pte_perms = 0; 167 break; 168 default: 169 return -EINVAL; 170 } 171 172 for (i = 0; i < npages; i++) { 173 pde_idx = virt_a >> 20; 174 175 if (!mmu_ttb[pde_idx]) { 176 int j; 177 mmu_ttb[pde_idx] = (u32)(&l2[pde_idx]) | L1_PAGE_TABLE; 178 for (j = 0; j < 256; j++) 179 l2[pde_idx][j] = L2_PAGE_FAULT; 180 } 181 182 pde = (u32*)(mmu_ttb[pde_idx] & ~0x3ff); 183 pte_idx = (virt_a & 0xff000) >> 12; 184 pte = (phys_a & 0xfffff000) | L2_SMALL_PAGE; 185 pde[pte_idx] = pte | pte_perms; 186 187 phys_a += PAGE_SIZE; 188 virt_a += PAGE_SIZE; 189 } 190 191 /* invalidate TLB */ 192 asm volatile("mcr p15, 0, v1, c8, c7, 0 \n\t" 193 "dsb \n\t" 194 : : : "v1", "memory"); 195 196 return 0; 197 } 198 199 int 200 kmmap(void *virt, uint_t npages, mmu_ap_t perms) 201 { 202 uint_t i; 203 uintptr_t virt_a; 204 void *pa; 205 206 virt_a = (uintptr_t)virt; 207 208 if (npages == 0 || virt_a & (PAGE_SIZE - 1)) 209 return -EINVAL; 210 211 /* overflow */ 212 if (virt_a + npages * PAGE_SIZE < virt_a) 213 return -EFAULT; 214 215 for (i = 0; i < npages; i++) { 216 if (virt_is_mapped((void*)virt_a)) { 217 kprintf("WARNING: %p virtual address is already maped\n", virt); 218 virt_a += PAGE_SIZE; 219 continue; 220 } 221 pa = palloc(1); 222 mmu_map_page(pa, (void*)virt_a, 1, perms); 223 virt_a += PAGE_SIZE; 224 } 225 226 return 0; 227 }