khypervisor  v1
vmm.c
Go to the documentation of this file.
00001 /* Virtual Machine Memory Management Module */
00002 
00003 #include <k-hypervisor-config.h>
00004 #include <arch_types.h>
00005 #include "vmm.h"
00006 #include <armv7_p15.h>
00007 #include <hvmm_trace.h>
00008 #include <gic_regs.h>
00009 
00010 #include <config/cfg_platform.h>
00011 #include <log/print.h>
00012 
00013 /* Stage 2 Level 1 */
00014 #define VMM_L1_PTE_NUM          4
00015 #define VMM_L1_PADDING_PTE_NUM   (512 - VMM_L1_PTE_NUM)
00016 /* Stage 2 Level 2 */
00017 #define VMM_L2_PTE_NUM          512
00018 #define VMM_L3_PTE_NUM          512
00019 #define VMM_L2L3_PTE_NUM_TOTAL  (VMM_L2_PTE_NUM * VMM_L3_PTE_NUM + VMM_L2_PTE_NUM)
00020 #define VMM_PTE_NUM_TOTAL  (VMM_L1_PTE_NUM + VMM_L1_PADDING_PTE_NUM + VMM_L2L3_PTE_NUM_TOTAL \
00021                              * VMM_L1_PTE_NUM)
00022 /* VTTBR */ 
00023 #define VTTBR_INITVAL                                   0x0000000000000000ULL
00024 #define VTTBR_VMID_MASK                                 0x00FF000000000000ULL
00025 #define VTTBR_VMID_SHIFT                                48
00026 #define VTTBR_BADDR_MASK                                0x000000FFFFFFF000ULL
00027 #define VTTBR_BADDR_SHIFT                               12
00028         
00029 /* VTCR */
00030 #define VTCR_INITVAL                                    0x80000000
00031 #define VTCR_SH0_MASK                                   0x00003000
00032 #define VTCR_SH0_SHIFT                                  12
00033 #define VTCR_ORGN0_MASK                                 0x00000C00
00034 #define VTCR_ORGN0_SHIFT                                10
00035 #define VTCR_IRGN0_MASK                                 0x00000300
00036 #define VTCR_IRGN0_SHIFT                                8
00037 #define VTCR_SL0_MASK                                   0x000000C0
00038 #define VTCR_SL0_SHIFT                                  6
00039 #define VTCR_S_MASK                                     0x00000010
00040 #define VTCR_S_SHIFT                                    4
00041 #define VTCR_T0SZ_MASK                                  0x00000003
00042 #define VTCR_T0SZ_SHIFT                                 0
00043 
00044 extern uint32_t guest_bin_start;
00045 extern uint32_t guest2_bin_start;
00046 
00047 /*
00048  * Stage 2 Translation Table, look up begins at second level
00049  * VTTBR.BADDR[31:x]: x=14, VTCR.T0SZ = 0, 2^32 input address range, VTCR.SL0 = 0(2nd), 16KB aligned base address
00050  * Statically allocated for now
00051  */
00052 
00053 static lpaed_t *_vmid_ttbl[NUM_GUESTS_STATIC];
00054 
00055 static lpaed_t _ttbl_guest0[VMM_PTE_NUM_TOTAL] __attribute((__aligned__(4096)));
00056 static lpaed_t _ttbl_guest1[VMM_PTE_NUM_TOTAL] __attribute((__aligned__(4096)));
00057 
00058 struct memmap_desc {
00059     char *label;
00060     uint64_t va;
00061     uint64_t pa;
00062     uint32_t size;
00063     lpaed_stage2_memattr_t attr;
00064 };
00065 
00066 static struct memmap_desc guest_md_empty[] = {
00067     {       0, 0, 0, 0,  0},
00068 };
00069 
00070 static struct memmap_desc guest_device_md0[] = {
00071     /*  label, ipa, pa, size, attr */
00072     CFG_GUEST0_DEVICE_MEMORY,
00073     { 0, 0, 0, 0,  0},
00074 };
00075 
00076 static struct memmap_desc guest_device_md1[] = {
00077     /*  label, ipa, pa, size, attr */
00078     CFG_GUEST1_DEVICE_MEMORY,
00079     { 0, 0, 0, 0,  0},
00080 };
00081 
00082 static struct memmap_desc guest_memory_md0[] = {
00083     /* 756MB */
00084     { "start", 0x00000000,          0, 0x30000000, LPAED_STAGE2_MEMATTR_NORMAL_OWT | LPAED_STAGE2_MEMATTR_NORMAL_IWT },
00085     {       0, 0, 0, 0,  0},
00086 };
00087 
00088 static struct memmap_desc guest_memory_md1[] = {
00089     /* 256MB */
00090     { "start", 0x00000000,          0, 0x10000000, LPAED_STAGE2_MEMATTR_NORMAL_OWT | LPAED_STAGE2_MEMATTR_NORMAL_IWT },
00091     {       0, 0, 0, 0,  0},
00092 };
00093 
00094 /* Memory Map for Guest 0 */
00095 static struct memmap_desc *guest_mdlist0[] = {
00096     &guest_device_md0[0],   /* 0x0000_0000 */
00097     &guest_md_empty[0],     /* 0x4000_0000 */
00098     &guest_memory_md0[0],
00099     &guest_md_empty[0],     /* 0xC000_0000 PA:0x40000000*/
00100     0
00101 };
00102 
00103 /* Memory Map for Guest 0 */
00104 static struct memmap_desc *guest_mdlist1[] = {
00105     &guest_device_md1[0],
00106     &guest_md_empty[0],
00107     &guest_memory_md1[0],
00108     &guest_md_empty[0],
00109     0
00110 };
00111 
00112 /* Returns address of L3 TTBL at 'l2index' entry of L2 
00113     lpaed_t *TTBL_L3(lpaed_t *ttbl_l2, uint32_t index_l2);
00114  */
00115 #define TTBL_L3(ttbl_l2, index_l2) (&ttbl_l2[VMM_L2_PTE_NUM + (VMM_L3_PTE_NUM * (index_l2))])
00116 #define TTBL_L2(ttbl_l1, index_l1) (&ttbl_l1[(VMM_L1_PTE_NUM + VMM_L1_PADDING_PTE_NUM) + (VMM_L2L3_PTE_NUM_TOTAL * (index_l1))])
00117 
00118 
00119 static void vmm_ttbl3_map( lpaed_t *ttbl3, uint64_t offset, uint32_t pages, uint64_t pa,
00120                             lpaed_stage2_memattr_t mattr ) 
00121 {
00122     int index_l3 = 0;
00123     int index_l3_last = 0;
00124 
00125  
00126     printh( "%s[%d]: ttbl3:%x offset:%x pte:%x pages:%d, pa:%x\n", __FUNCTION__, __LINE__, (uint32_t) ttbl3, (uint32_t) offset, &ttbl3[offset], pages, (uint32_t) pa);
00127     /* Initialize the address spaces with 'invalid' state */
00128 
00129     index_l3 = offset;
00130     index_l3_last = index_l3 + pages;
00131 
00132     for( ; index_l3 < index_l3_last; index_l3++ ) {
00133         lpaed_stage2_map_page(&ttbl3[index_l3], pa, mattr );
00134         pa += LPAE_PAGE_SIZE;
00135     }
00136 
00137 }
00138 
00139 static void vmm_ttbl3_unmap( lpaed_t *ttbl3, uint64_t offset, uint32_t pages)
00140 {
00141     int index_l3 = 0;
00142     int index_l3_last = 0;
00143 
00144     /* Initialize the address spaces with 'invalid' state */
00145 
00146     index_l3 = offset >> LPAE_PAGE_SHIFT;
00147     index_l3_last = index_l3 + pages;
00148 
00149     for( ; index_l3 < index_l3_last; index_l3++ ) {
00150         ttbl3[index_l3].pt.valid = 0;
00151     }
00152 }
00153 
00154 /*
00155  * va_offset: 0 ~ (1GB - size), start contiguous virtual address within level 1 block (1GB), 
00156  *      L2 lock size(2MB) aligned
00157  * size: <= 1GB
00158  *      page size aligned
00159  */
00160 static void vmm_ttbl2_unmap( lpaed_t *ttbl2, uint64_t va_offset, uint32_t size)
00161 {
00162     int index_l2 = 0;
00163     int index_l2_last = 0;
00164     int num_blocks = 0;
00165 
00166     /* Initialize the address spaces with 'invalid' state */
00167 
00168     num_blocks = size >> LPAE_BLOCK_L2_SHIFT;
00169     index_l2 = va_offset >> LPAE_BLOCK_L2_SHIFT;
00170     index_l2_last = num_blocks;
00171 
00172     for( ; index_l2 < index_l2_last; index_l2++ ) {
00173         ttbl2[index_l2].pt.valid = 0;
00174     }
00175 
00176     size &= LPAE_BLOCK_L2_MASK;
00177     if ( size ) {
00178         // last partial block
00179         lpaed_t *ttbl3 = TTBL_L3(ttbl2, index_l2);
00180         vmm_ttbl3_unmap( ttbl3, 0x00000000, size >> LPAE_PAGE_SHIFT);
00181     }
00182 }
00183 
00184 static void vmm_ttbl2_map(lpaed_t *ttbl2, uint64_t va_offset, uint64_t pa, uint32_t size, lpaed_stage2_memattr_t mattr)
00185 {
00186     uint64_t block_offset;
00187     uint32_t index_l2;
00188     uint32_t index_l2_last;
00189     uint32_t num_blocks;
00190     uint32_t pages;
00191     lpaed_t *ttbl3;
00192     int i;
00193 
00194     HVMM_TRACE_ENTER();
00195     printh( "ttbl2:%x va_offset:%x pa:%x size:%d\n", (uint32_t) ttbl2, (uint32_t) va_offset, (uint32_t) pa, size);
00196 
00197     index_l2 = va_offset >> LPAE_BLOCK_L2_SHIFT;
00198     block_offset = va_offset & LPAE_BLOCK_L2_MASK;
00199     printh( "- index_l2:%d block_offset:%x\n", index_l2, (uint32_t) block_offset);
00200     /* head < BLOCK */
00201     if ( block_offset ) {
00202         uint64_t offset;
00203         offset = block_offset >> LPAE_PAGE_SHIFT;
00204         pages = size >> LPAE_PAGE_SHIFT;
00205         if ( pages > VMM_L3_PTE_NUM ) {
00206             pages = VMM_L3_PTE_NUM;
00207         }
00208         ttbl3 = TTBL_L3(ttbl2, index_l2 );
00209         vmm_ttbl3_map( ttbl3, offset, pages, pa, mattr );
00210         lpaed_stage2_enable_l2_table( &ttbl2[index_l2] );
00211 
00212         va_offset |= ~LPAE_BLOCK_L2_MASK;
00213         size -= pages * LPAE_PAGE_SIZE;
00214         pa += pages * LPAE_PAGE_SIZE;
00215         index_l2 ++;
00216     }
00217 
00218     /* body : n BLOCKS */
00219     if ( size > 0 ) {
00220         num_blocks = size >> LPAE_BLOCK_L2_SHIFT;
00221         index_l2_last = index_l2 + num_blocks;
00222         printh( "- index_l2_last:%d num_blocks:%d size:%d\n", index_l2_last, (uint32_t) num_blocks, size);
00223 
00224         for( i = index_l2; i < index_l2_last; i++ ) {
00225             lpaed_stage2_enable_l2_table( &ttbl2[i] );
00226             vmm_ttbl3_map( TTBL_L3(ttbl2, i), 0, VMM_L3_PTE_NUM, pa, mattr );
00227             pa += LPAE_BLOCK_L2_SIZE;
00228             size -= LPAE_BLOCK_L2_SIZE;
00229         }
00230     }
00231 
00232     /* tail < BLOCK */
00233     if ( size > 0) {
00234         pages = size >> LPAE_PAGE_SHIFT;
00235         printh( "- pages:%d size:%d\n", pages, size);
00236         if ( pages ) {
00237             ttbl3 = TTBL_L3(ttbl2, index_l2_last);
00238             vmm_ttbl3_map( ttbl3, 0, pages, pa, mattr );
00239             lpaed_stage2_enable_l2_table( &ttbl2[index_l2_last] );
00240         }
00241     }
00242     HVMM_TRACE_EXIT();
00243 }
00244 
00245 static void vmm_ttbl2_init_entries(lpaed_t *ttbl2)
00246 {
00247     int i, j;
00248     HVMM_TRACE_ENTER();
00249 
00250     lpaed_t *ttbl3;
00251     for( i = 0; i < VMM_L2_PTE_NUM; i++ ) {
00252         ttbl3 = TTBL_L3(ttbl2, i);
00253         printh("ttbl2[%d]:%x ttbl3[]:%x\n", i, &ttbl2[i], ttbl3 );
00254         lpaed_stage2_conf_l2_table( &ttbl2[i], (uint64_t) ((uint32_t) ttbl3), 0);
00255         for( j = 0; j < VMM_L2_PTE_NUM; j++) {
00256             ttbl3[j].pt.valid = 0;
00257         }
00258     }        
00259 
00260     HVMM_TRACE_EXIT();
00261 }
00262 
00263 
00264 static void vmm_init_ttbl2(lpaed_t *ttbl2, struct memmap_desc *md)
00265 {
00266     int i = 0;
00267     HVMM_TRACE_ENTER();
00268     printh( " - ttbl2:%x\n", (uint32_t) ttbl2 );
00269     if ( ((uint64_t) ( (uint32_t) ttbl2) ) & 0x0FFFULL ) {
00270         printh( " - error: invalid ttbl2 address alignment\n" );
00271     }
00272 
00273     /* construct l2-l3 table hirerachy with invalid pages */
00274     vmm_ttbl2_init_entries(ttbl2);
00275 
00276     vmm_ttbl2_unmap( ttbl2, 0x00000000, 0x40000000 );
00277 
00278     while(md[i].label != 0) {
00279         vmm_ttbl2_map(ttbl2, md[i].va, md[i].pa, md[i].size, md[i].attr );
00280         i++;
00281     }
00282     HVMM_TRACE_EXIT();
00283 }
00284 
00285 static void vmm_init_ttbl(lpaed_t *ttbl, struct memmap_desc *mdlist[])
00286 {
00287     int i = 0;
00288     HVMM_TRACE_ENTER();
00289 
00290     while(mdlist[i]) {
00291         struct memmap_desc *md = mdlist[i];
00292 
00293         if ( md[0].label == 0 ) {
00294             lpaed_stage2_conf_l1_table(&ttbl[i], 0, 0);
00295         } else {
00296             lpaed_stage2_conf_l1_table(&ttbl[i], (uint64_t) ((uint32_t) TTBL_L2(ttbl, i)), 1);
00297             vmm_init_ttbl2(TTBL_L2(ttbl, i), md);
00298         }
00299         i++;
00300     }
00301 
00302     HVMM_TRACE_EXIT();
00303 }
00304 
00305 
00306 static void vmm_init_mmu(void)
00307 {
00308     uint32_t vtcr, vttbr;
00309 
00310     HVMM_TRACE_ENTER();
00311 
00312     vtcr = read_vtcr(); uart_print( "vtcr:"); uart_print_hex32(vtcr); uart_print("\n\r");
00313 
00314     // start lookup at level 1 table
00315     vtcr &= ~VTCR_SL0_MASK;
00316     vtcr |= (0x01 << VTCR_SL0_SHIFT) & VTCR_SL0_MASK;
00317     vtcr &= ~VTCR_ORGN0_MASK;
00318     vtcr |= (0x3 << VTCR_ORGN0_SHIFT) & VTCR_ORGN0_MASK;
00319     vtcr &= ~VTCR_IRGN0_MASK;
00320     vtcr |= (0x3 << VTCR_IRGN0_SHIFT) & VTCR_IRGN0_MASK;
00321     write_vtcr(vtcr);
00322     vtcr = read_vtcr(); uart_print( "vtcr:"); uart_print_hex32(vtcr); uart_print("\n\r");
00323     {
00324         uint32_t sl0 = (vtcr & VTCR_SL0_MASK) >> VTCR_SL0_SHIFT;
00325         uint32_t t0sz = vtcr & 0xF;
00326         uint32_t baddr_x = (sl0 == 0 ? 14 - t0sz : 5 - t0sz);
00327         uart_print( "vttbr.baddr.x:"); uart_print_hex32(baddr_x); uart_print("\n\r");
00328     }
00329 // VTTBR
00330     vttbr = read_vttbr(); uart_print( "vttbr:" ); uart_print_hex64(vttbr); uart_print("\n\r");
00331 
00332     HVMM_TRACE_EXIT();
00333 }
00334 
00335 /* 
00336  * Initialization of Virtual Machine Memory Management 
00337  * Stage 2 Translation
00338  */
00339 void vmm_init(void)
00340 {
00341     /*
00342      * Initializes Translation Table for Stage2 Translation (IPA -> PA)
00343      */
00344     int i;
00345 
00346     HVMM_TRACE_ENTER();
00347     for( i = 0; i < NUM_GUESTS_STATIC; i++ ) {
00348         _vmid_ttbl[i] = 0;
00349     }
00350 
00351     _vmid_ttbl[0] = &_ttbl_guest0[0];
00352     _vmid_ttbl[1] = &_ttbl_guest1[0];
00353 
00354 
00355     /*
00356      * VA: 0x00000000 ~ 0x3FFFFFFF,   1GB
00357      * PA: 0xA0000000 ~ 0xDFFFFFFF    guest_bin_start
00358      * PA: 0xB0000000 ~ 0xEFFFFFFF    guest2_bin_start
00359      */
00360 
00361     guest_memory_md0[0].pa = (uint64_t) ((uint32_t) &guest_bin_start);
00362     guest_memory_md1[0].pa = (uint64_t) ((uint32_t) &guest2_bin_start);
00363 
00364     vmm_init_ttbl(&_ttbl_guest0[0], &guest_mdlist0[0]);
00365     vmm_init_ttbl(&_ttbl_guest1[0], &guest_mdlist1[0]);
00366    
00367     vmm_init_mmu();
00368 
00369     HVMM_TRACE_EXIT();
00370 }
00371 
00372 /* Translation Table for the specified vmid */
00373 lpaed_t *vmm_vmid_ttbl(vmid_t vmid)
00374 {
00375     lpaed_t *ttbl = 0;
00376     if ( vmid < NUM_GUESTS_STATIC ) {
00377         ttbl = _vmid_ttbl[vmid];
00378     }
00379     return ttbl;
00380 }
00381 
00382 /* Enable/Disable Stage2 Translation */
00383 void vmm_stage2_enable(int enable)
00384 {
00385     uint32_t hcr;
00386 
00387     // HCR.VM[0] = enable
00388     hcr = read_hcr(); //uart_print( "hcr:"); uart_print_hex32(hcr); uart_print("\n\r");
00389     if ( enable ) {
00390         hcr |= (0x1);
00391     } else {
00392         hcr &= ~(0x1);
00393     }
00394     write_hcr( hcr );
00395 }
00396 
00397 hvmm_status_t vmm_set_vmid_ttbl( vmid_t vmid, lpaed_t *ttbl )
00398 {
00399     uint64_t vttbr;
00400 
00401     /* 
00402      * VTTBR.VMID = vmid
00403      * VTTBR.BADDR = ttbl
00404      */
00405     vttbr = read_vttbr();
00406 #if 0 /* ignore message due to flood log message */
00407     uart_print( "current vttbr:" ); uart_print_hex64(vttbr); uart_print("\n\r");
00408 #endif
00409     vttbr &= ~(VTTBR_VMID_MASK);
00410     vttbr |= ((uint64_t)vmid << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
00411 
00412     vttbr &= ~(VTTBR_BADDR_MASK);
00413     vttbr |= (uint32_t) ttbl & VTTBR_BADDR_MASK;
00414     write_vttbr(vttbr);
00415 
00416     vttbr = read_vttbr();
00417 #if 0 /* ignore message due to flood log message */
00418     uart_print( "changed vttbr:" ); uart_print_hex64(vttbr); uart_print("\n\r");
00419 #endif
00420     return HVMM_STATUS_SUCCESS;
00421 }
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines