khypervisor
v1
|
00001 #include <k-hypervisor-config.h> 00002 #include "mm.h" 00003 #include "vmm.h" 00004 #include "armv7_p15.h" 00005 #include "arch_types.h" 00006 00007 #include <config/memmap.cfg> 00008 #include <log/print.h> 00009 #include <log/uart_print.h> 00010 00011 00012 /* LPAE Memory region attributes, to match Linux's (non-LPAE) choices. 00013 * Indexed by the AttrIndex bits of a LPAE entry; 00014 * the 8-bit fields are packed little-endian into MAIR0 and MAIR1 00015 * 00016 * ai encoding 00017 * UNCACHED 000 0000 0000 -- Strongly Ordered 00018 * BUFFERABLE 001 0100 0100 -- Non-Cacheable 00019 * WRITETHROUGH 010 1010 1010 -- Write-through 00020 * WRITEBACK 011 1110 1110 -- Write-back 00021 * DEV_SHARED 100 0000 0100 -- Device 00022 * ?? 101 00023 * reserved 110 00024 * WRITEALLOC 111 1111 1111 -- Write-back write-allocate 00025 * 00026 * DEV_NONSHARED 100 (== DEV_SHARED) 00027 * DEV_WC 001 (== BUFFERABLE) 00028 * DEV_CACHED 011 (== WRITEBACK) 00029 */ 00030 #define INITIAL_MAIR0VAL 0xeeaa4400 00031 #define INITIAL_MAIR1VAL 0xff000004 00032 #define INITIAL_MAIRVAL (INITIAL_MAIR0VAL|INITIAL_MAIR1VAL<<32) 00033 00034 /* 00035 * Attribute Indexes. 00036 * 00037 * These are valid in the AttrIndx[2:0] field of an LPAE stage 1 page 00038 * table entry. They are indexes into the bytes of the MAIR* 00039 * registers, as defined above. 00040 * 00041 */ 00042 #define UNCACHED 0x0 00043 #define BUFFERABLE 0x1 00044 #define WRITETHROUGH 0x2 00045 #define WRITEBACK 0x3 00046 #define DEV_SHARED 0x4 00047 #define WRITEALLOC 0x7 00048 #define DEV_NONSHARED DEV_SHARED 00049 #define DEV_WC BUFFERABLE 00050 #define DEV_CACHED WRITEBACK 00051 00052 /* SCTLR System Control Register. */ 00053 /* HSCTLR is a subset of this. */ 00054 #define SCTLR_TE (1<<30) 00055 #define SCTLR_AFE (1<<29) 00056 #define SCTLR_TRE (1<<28) 00057 #define SCTLR_NMFI (1<<27) 00058 #define SCTLR_EE (1<<25) 00059 #define SCTLR_VE (1<<24) 00060 #define SCTLR_U (1<<22) 00061 #define SCTLR_FI (1<<21) 00062 #define SCTLR_WXN (1<<19) 00063 #define SCTLR_HA (1<<17) 00064 #define SCTLR_RR (1<<14) 00065 #define SCTLR_V (1<<13) 00066 #define SCTLR_I (1<<12) 00067 #define SCTLR_Z (1<<11) 00068 #define SCTLR_SW (1<<10) 00069 #define SCTLR_B (1<<7) 00070 #define SCTLR_C (1<<2) 00071 #define SCTLR_A (1<<1) 00072 #define SCTLR_M (1<<0) 00073 #define SCTLR_BASE 0x00c50078 00074 #define HSCTLR_BASE 0x30c51878 00075 00076 /* HTTBR */ 00077 #define HTTBR_INITVAL 0x0000000000000000ULL 00078 #define HTTBR_BADDR_MASK 0x000000FFFFFFF000ULL 00079 #define HTTBR_BADDR_SHIFT 12 00080 00081 /* HTCR */ 00082 #define HTCR_INITVAL 0x80000000 00083 #define HTCR_SH0_MASK 0x00003000 00084 #define HTCR_SH0_SHIFT 12 00085 #define HTCR_ORGN0_MASK 0x00000C00 00086 #define HTCR_ORGN0_SHIFT 10 00087 #define HTCR_IRGN0_MASK 0x00000300 00088 #define HTCR_IRGN0_SHIFT 8 00089 #define HTCR_T0SZ_MASK 0x00000003 00090 #define HTCR_T0SZ_SHIFT 0 00091 00092 /* PL2 Stage 1 Level 1 */ 00093 #define HMM_L1_PTE_NUM 512 00094 00095 /* PL2 Stage 1 Level 2 */ 00096 #define HMM_L2_PTE_NUM 512 00097 00098 /* PL2 Stage 1 Level 3 */ 00099 #define HMM_L3_PTE_NUM 512 00100 00101 #define HEAP_ADDR CFG_MEMMAP_MON_OFFSET + 0x02000000 00102 #define HEAP_SIZE 0x0D000000 00103 00104 #define L2_ENTRY_MASK 0x1FF 00105 #define L2_SHIFT 21 00106 00107 #define L3_ENTRY_MASK 0x1FF 00108 #define L3_SHIFT 12 00109 00110 #define HEAP_END_ADDR HEAP_ADDR + HEAP_SIZE 00111 #define NALLOC 1024 00112 00113 static lpaed_t _hmm_pgtable[HMM_L1_PTE_NUM] __attribute((__aligned__(4096))); 00114 static lpaed_t _hmm_pgtable_l2[HMM_L2_PTE_NUM] __attribute((__aligned__(4096))); 00115 static lpaed_t _hmm_pgtable_l3[HMM_L2_PTE_NUM][HMM_L3_PTE_NUM] __attribute((__aligned__(4096))); 00116 00117 /* used malloc, free, sbrk */ 00118 typedef long Align; 00119 union header { 00120 struct { 00121 union header *ptr; /* next block if on free list */ 00122 unsigned int size; /* size of this block */ 00123 } s; 00124 /* force align of blocks */ 00125 Align x; 00126 }; 00127 /* free list block header */ 00128 typedef union header fl_bheader; 00129 00130 uint32_t mm_break; /* break point for sbrk() */ 00131 uint32_t mm_prev_break; /* old break point for sbrk() */ 00132 uint32_t last_valid_address; /* last mapping address */ 00133 static fl_bheader freep_base; /* empty list to get started */ 00134 static fl_bheader *freep; /* start of free list */ 00135 00136 /* malloc init */ 00137 void hmm_heap_init(void) 00138 { 00139 mm_break = HEAP_ADDR; 00140 mm_prev_break = HEAP_ADDR; 00141 last_valid_address = HEAP_ADDR; 00142 freep = 0; 00143 } 00144 00145 /* 00146 * Initialization of Host Monitor Memory Management 00147 * PL2 Stage1 Translation 00148 * VA32 -> PA 00149 */ 00150 00151 static void _hmm_init(void) 00152 { 00153 int i, j; 00154 uint64_t pa = 0x00000000ULL; 00155 /* 00156 * Partition 0: 0x00000000 ~ 0x3FFFFFFF - Peripheral - DEV_SHARED 00157 * Partition 1: 0x40000000 ~ 0x7FFFFFFF - Unused - UNCACHED 00158 * Partition 2: 0x80000000 ~ 0xBFFFFFFF - Guest - UNCACHED 00159 * Partition 3: 0xC0000000 ~ 0xFFFFFFFF - Monitor - LV2 translation table address 00160 */ 00161 _hmm_pgtable[0] = hvmm_mm_lpaed_l1_block(pa, DEV_SHARED); pa += 0x40000000; 00162 uart_print( "&_hmm_pgtable[0]:"); uart_print_hex32((uint32_t) &_hmm_pgtable[0]); uart_print("\n\r"); 00163 uart_print( "lpaed:"); uart_print_hex64(_hmm_pgtable[0].bits); uart_print("\n\r"); 00164 _hmm_pgtable[1] = hvmm_mm_lpaed_l1_block(pa, UNCACHED); pa += 0x40000000; 00165 uart_print( "&_hmm_pgtable[1]:"); uart_print_hex32((uint32_t) &_hmm_pgtable[1]); uart_print("\n\r"); 00166 uart_print( "lpaed:"); uart_print_hex64(_hmm_pgtable[1].bits); uart_print("\n\r"); 00167 _hmm_pgtable[2] = hvmm_mm_lpaed_l1_block(pa, UNCACHED); pa += 0x40000000; 00168 uart_print( "&_hmm_pgtable[2]:"); uart_print_hex32((uint32_t) &_hmm_pgtable[2]); uart_print("\n\r"); 00169 uart_print( "lpaed:"); uart_print_hex64(_hmm_pgtable[2].bits); uart_print("\n\r"); 00170 /* _hmm_pgtable[3] refers Lv2 page table address. */ 00171 _hmm_pgtable[3] = hvmm_mm_lpaed_l1_table((uint32_t) _hmm_pgtable_l2); 00172 uart_print( "&_hmm_pgtable[3]:"); uart_print_hex32((uint32_t) &_hmm_pgtable[3]); uart_print("\n\r"); 00173 uart_print( "lpaed:"); uart_print_hex64(_hmm_pgtable[3].bits); uart_print("\n\r"); 00174 for( i = 0; i <HMM_L2_PTE_NUM; i++){ 00175 /* _hvmm_pgtable_lv2[i] refers Lv3 page table address. each element correspond 2MB */ 00176 _hmm_pgtable_l2[i] = hvmm_mm_lpaed_l2_table((uint32_t) _hmm_pgtable_l3[i]); 00177 /* _hvmm_pgtable_lv3[i][j] refers page, that size is 4KB */ 00178 for(j = 0; j < HMM_L3_PTE_NUM; pa += 0x1000 ,j++){ 00179 /* 0xF2000000 ~ 0xFF000000 - Heap memory 208MB */ 00180 if(pa >= HEAP_ADDR && pa < HEAP_ADDR + HEAP_SIZE){ 00181 _hmm_pgtable_l3[i][j] = hvmm_mm_lpaed_l3_table(pa, WRITEALLOC, 0); 00182 } 00183 else{ 00184 _hmm_pgtable_l3[i][j] = hvmm_mm_lpaed_l3_table(pa, UNCACHED, 1); 00185 } 00186 } 00187 } 00188 for ( i = 4; i < HMM_L1_PTE_NUM; i++ ) { 00189 _hmm_pgtable[i].pt.valid = 0; 00190 } 00191 } 00192 00193 int hvmm_mm_init(void) 00194 { 00195 /* 00196 * MAIR0, MAIR1 00197 * HMAIR0, HMAIR1 00198 * HTCR 00199 * HTCTLR 00200 * HTTBR 00201 * HTCTLR 00202 */ 00203 uint32_t mair, htcr, hsctlr, hcr; 00204 uint64_t httbr; 00205 uart_print( "[mm] mm_init: enter\n\r" ); 00206 00207 vmm_init(); 00208 _hmm_init(); 00209 00210 // MAIR/HMAIR 00211 uart_print(" --- MAIR ----\n\r" ); 00212 mair = read_mair0(); uart_print( "mair0:"); uart_print_hex32(mair); uart_print("\n\r"); 00213 mair = read_mair1(); uart_print( "mair1:"); uart_print_hex32(mair); uart_print("\n\r"); 00214 mair = read_hmair0(); uart_print( "hmair0:"); uart_print_hex32(mair); uart_print("\n\r"); 00215 mair = read_hmair1(); uart_print( "hmair1:"); uart_print_hex32(mair); uart_print("\n\r"); 00216 00217 write_mair0( INITIAL_MAIR0VAL ); 00218 write_mair1( INITIAL_MAIR1VAL ); 00219 write_hmair0( INITIAL_MAIR0VAL ); 00220 write_hmair1( INITIAL_MAIR1VAL ); 00221 00222 mair = read_mair0(); uart_print( "mair0:"); uart_print_hex32(mair); uart_print("\n\r"); 00223 mair = read_mair1(); uart_print( "mair1:"); uart_print_hex32(mair); uart_print("\n\r"); 00224 mair = read_hmair0(); uart_print( "hmair0:"); uart_print_hex32(mair); uart_print("\n\r"); 00225 mair = read_hmair1(); uart_print( "hmair1:"); uart_print_hex32(mair); uart_print("\n\r"); 00226 00227 // HTCR 00228 uart_print(" --- HTCR ----\n\r" ); 00229 htcr = read_htcr(); uart_print( "htcr:"); uart_print_hex32(htcr); uart_print("\n\r"); 00230 write_htcr( 0x80002500 ); 00231 htcr = read_htcr(); uart_print( "htcr:"); uart_print_hex32(htcr); uart_print("\n\r"); 00232 00233 // HSCTLR 00234 // i-Cache and Alignment Checking Enabled 00235 // MMU, D-cache, Write-implies-XN, Low-latency IRQs Disabled 00236 hsctlr = read_hsctlr(); uart_print( "hsctlr:"); uart_print_hex32(hsctlr); uart_print("\n\r"); 00237 hsctlr = HSCTLR_BASE | SCTLR_A; 00238 write_hsctlr( hsctlr ); 00239 hsctlr = read_hsctlr(); uart_print( "hsctlr:"); uart_print_hex32(hsctlr); uart_print("\n\r"); 00240 00241 00242 // HCR 00243 hcr = read_hcr(); uart_print( "hcr:"); uart_print_hex32(hcr); uart_print("\n\r"); 00244 00245 // HTCR 00246 /* 00247 * Shareability - SH0[13:12] = 0 - Not shared 00248 * Outer Cacheability - ORGN0[11:10] = 11b - Write Back no Write Allocate Cacheable 00249 * Inner Cacheability - IRGN0[9:8] = 11b - Same 00250 * T0SZ[2:0] = 0 - 2^32 Input Address 00251 */ 00252 /* Untested code commented */ 00253 /* 00254 htcr = read_htcr(); uart_print( "htcr:"); uart_print_hex32(htcr); uart_print("\n\r"); 00255 htcr &= ~HTCR_SH0_MASK; 00256 htcr |= (0x0 << HTCR_SH0_SHIFT) & HTCR_SH0_MASK; 00257 htcr &= ~HTCR_ORGN0_MASK; 00258 htcr |= (0x3 << HTCR_ORGN0_SHIFT) & HTCR_ORGN0_MASK; 00259 htcr &= ~VTCR_IRGN0_MASK; 00260 htcr |= (0x3 << HTCR_IRGN0_SHIFT) & HTCR_IRGN0_MASK; 00261 htcr &= ~VTCR_T0SZ_MASK; 00262 htcr |= (0x0 << HTCR_T0SZ_SHIFT) & HTCR_T0SZ_MASK; 00263 write_htcr( htcr ); 00264 htcr = read_htcr(); uart_print( "htcr:"); uart_print_hex32(htcr); uart_print("\n\r"); 00265 */ 00266 00267 /* HTTBR = &__hmm_pgtable */ 00268 httbr = read_httbr(); uart_print( "httbr:" ); uart_print_hex64(httbr); uart_print("\n\r"); 00269 httbr &= 0xFFFFFFFF00000000ULL; 00270 httbr |= (uint32_t) &_hmm_pgtable; 00271 httbr &= HTTBR_BADDR_MASK; 00272 uart_print( "writing httbr:" ); uart_print_hex64(httbr); uart_print("\n\r"); 00273 write_httbr( httbr ); 00274 httbr = read_httbr(); uart_print( "read back httbr:" ); uart_print_hex64(httbr); uart_print("\n\r"); 00275 00276 /* Enable PL2 Stage 1 MMU */ 00277 00278 hsctlr = read_hsctlr(); uart_print( "hsctlr:"); uart_print_hex32(hsctlr); uart_print("\n\r"); 00279 00280 /* HSCTLR Enable MMU and D-cache */ 00281 // hsctlr |= (SCTLR_M |SCTLR_C); 00282 hsctlr |= (SCTLR_M); 00283 00284 /* Flush PTE writes */ 00285 asm("dsb"); 00286 00287 write_hsctlr( hsctlr ); 00288 00289 /* Flush iCache */ 00290 asm("isb"); 00291 00292 hsctlr = read_hsctlr(); uart_print( "hsctlr:"); uart_print_hex32(hsctlr); uart_print("\n\r"); 00293 00294 hmm_heap_init(); 00295 00296 uart_print( "[mm] mm_init: exit\n\r" ); 00297 00298 return HVMM_STATUS_SUCCESS; 00299 } 00300 00301 void hmm_flushTLB(void) 00302 { 00303 /* Invalidate entire unified TLB */ 00304 invalidate_unified_tlb(0); 00305 asm volatile("dsb"); 00306 asm volatile("isb"); 00307 } 00308 00309 lpaed_t* hmm_get_l3_table_entry(unsigned long virt, unsigned long npages) 00310 { 00311 int l2_index = (virt >> L2_SHIFT) & L2_ENTRY_MASK; 00312 int l3_index = (virt >> L3_SHIFT) & L3_ENTRY_MASK; 00313 int maxsize = ((HMM_L2_PTE_NUM * HMM_L3_PTE_NUM) - ( (l2_index + 1) * (l3_index + 1) ) + 1); 00314 if( maxsize < npages ) { 00315 printh("%s[%d] : Map size \"pages\" is exceeded memory size\n", __FUNCTION__, __LINE__); 00316 if(maxsize > 0){ 00317 printh("%s[%d] : Available pages are %d\n", maxsize); 00318 } 00319 else{ 00320 printh("%s[%d] : Do not have available pages for map\n"); 00321 } 00322 return 0; 00323 } 00324 return &_hmm_pgtable_l3[l2_index][l3_index]; 00325 } 00326 00327 void hmm_umap(unsigned long virt, unsigned long npages) 00328 { 00329 int i; 00330 lpaed_t* map_table_p = hmm_get_l3_table_entry( virt, npages ); 00331 for( i = 0; i < npages; i++){ 00332 lpaed_stage1_disable_l3_table( &map_table_p[i] ); 00333 } 00334 hmm_flushTLB(); 00335 } 00336 00337 void hmm_map(unsigned long phys, unsigned long virt, unsigned long npages) 00338 { 00339 int i; 00340 lpaed_t* map_table_p = hmm_get_l3_table_entry( virt, npages ); 00341 for( i = 0; i < npages; i++){ 00342 lpaed_stage1_conf_l3_table( &map_table_p[i], (uint64_t)phys, 1 ); 00343 } 00344 hmm_flushTLB(); 00345 } 00346 00347 /* General-purpose sbrk, basic memory management system calls 00348 * Returns -1 if there was no space. 00349 */ 00350 void *hmm_sbrk(unsigned int incr) 00351 { 00352 unsigned int required_addr; 00353 unsigned int virt; 00354 unsigned int required_pages = 0; 00355 00356 mm_prev_break = mm_break; 00357 virt = mm_break; 00358 mm_break += incr; 00359 if( mm_break > last_valid_address ){ 00360 required_addr = mm_break - last_valid_address; 00361 for( ;required_addr > 0x0; required_addr -= 0x1000){ 00362 if( last_valid_address + 0x1000 > HEAP_END_ADDR ){ 00363 printh("%s[%d] required address is exceeded heap memory size\n", __FUNCTION__, __LINE__); 00364 return (void *)-1; 00365 } 00366 last_valid_address += 0x1000; 00367 required_pages++; 00368 } 00369 hmm_map(virt, virt, required_pages); 00370 } 00371 return (void *)mm_prev_break; 00372 } 00373 00374 void hmm_free(void* ap) 00375 { 00376 fl_bheader *bp, *p; 00377 bp = (fl_bheader *)ap - 1; /* point to block header */ 00378 for (p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr){ 00379 if(p >= p->s.ptr && (bp > p || bp < p->s.ptr)){ 00380 break; /* freed block at start or end of arena */ 00381 } 00382 } 00383 if(bp + bp->s.size == p->s.ptr) { /* join to upper nbr */ 00384 bp->s.size += p->s.ptr->s.size; 00385 bp->s.ptr = p->s.ptr->s.ptr; 00386 } else 00387 bp->s.ptr = p->s.ptr; 00388 if (p + p->s.size == bp) { /* join to lower nbr */ 00389 p->s.size += bp->s.size; 00390 p->s.ptr = bp->s.ptr; 00391 } else 00392 p->s.ptr = bp; 00393 freep = p; 00394 } 00395 00396 static fl_bheader *morecore(unsigned int nu) 00397 { 00398 char *cp; 00399 fl_bheader *up; 00400 if ( nu < NALLOC ) 00401 nu = NALLOC; 00402 cp = hmm_sbrk(nu * sizeof(fl_bheader)); 00403 if ( cp == (char *) -1 ) /* no space at all */ 00404 return 0; 00405 up = (fl_bheader *)cp; 00406 up->s.size = nu; 00407 hmm_free((void*)(up+1)); 00408 return freep; 00409 } 00410 00411 void* hmm_malloc(unsigned long size) 00412 { 00413 fl_bheader *p, *prevp; 00414 unsigned int nunits; 00415 nunits = (size + sizeof(fl_bheader) - 1)/sizeof(fl_bheader) + 1; 00416 if(nunits < 2){ 00417 return 0; 00418 } 00419 00420 if ((prevp = freep) == 0 ) { /* no free list yet */ 00421 freep_base.s.ptr = freep = prevp = &freep_base; 00422 freep_base.s.size = 0; 00423 } 00424 for ( p = prevp->s.ptr; ; prevp = p, p = p->s.ptr) { 00425 if ( p->s.size >= nunits ) { /* big enough */ 00426 if ( p->s.size == nunits ) /* exactly */ 00427 prevp->s.ptr = p->s.ptr; 00428 else { /* allocate tail end */ 00429 p->s.size -= nunits; 00430 p += p->s.size; 00431 p->s.size = nunits; 00432 } 00433 freep = prevp; 00434 return (void *)(p+1); 00435 } 00436 if ( p == freep ) /* wrapped around free list */ 00437 if (( p = morecore(nunits)) == 0 ) 00438 return 0; /* none avaliable memory left */ 00439 } 00440 } 00441