diff --git a/machine/mtrap.c b/machine/mtrap.c index 42737d7..7bd772f 100644 --- a/machine/mtrap.c +++ b/machine/mtrap.c @@ -196,11 +196,26 @@ void mcall_trap(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc) retval = 0;//sm_destroy_enclave(regs, arg0,arg1); break; case SBI_ENCLAVE_OCALL: - retval = 0;//sm_enclave_ocall(regs, arg0); + retval = sm_enclave_ocall(regs, arg0, arg1, arg2); break; case SBI_EXIT_ENCLAVE: retval = sm_exit_enclave(regs, arg0); break; + case SBI_CREATE_SERVER_ENCLAVE: + retval = sm_create_server_enclave(arg0); + break; + case SBI_DESTROY_SERVER_ENCLAVE: + retval = sm_destroy_server_enclave(regs, arg0); + break; + case SBI_ACQUIRE_SERVER: + retval = sm_server_enclave_acquire(regs, arg0); + break; + case SBI_CALL_ENCLAVE: + retval = sm_call_enclave(regs, arg0, arg1); + break; + case SBI_ENCLAVE_RETURN: + retval = sm_enclave_return(regs, arg0); + break; //TODO: delete this SBI_CALL case SBI_DEBUG_PRINT: printm("SBI_DEBUG_PRINT\r\n"); diff --git a/sm/enclave.c b/sm/enclave.c index f3f0fe7..59ae08a 100644 --- a/sm/enclave.c +++ b/sm/enclave.c @@ -1,4 +1,5 @@ #include "enclave.h" +#include "enclave_vm.h" #include "sm.h" #include "math.h" #include @@ -8,6 +9,14 @@ static struct cpu_state_t cpus[MAX_HARTS] = {{0,}, }; //spinlock static spinlock_t enclave_metadata_lock = SPINLOCK_INIT; +void acquire_enclave_metadata_lock() +{ + spinlock_lock(&enclave_metadata_lock); +} +void release_enclave_metadata_lock() +{ + spinlock_unlock(&enclave_metadata_lock); +} //enclave metadata struct link_mem_t* enclave_metadata_head = NULL; @@ -31,6 +40,12 @@ int copy_word_to_host(unsigned int* ptr, uintptr_t value) return 0; } +int copy_dword_to_host(uintptr_t* ptr, uintptr_t value) +{ + *ptr = value; + return 0; +} + static void enter_enclave_world(int eid) { cpus[read_csr(mhartid)].in_enclave = 1; @@ -44,6 +59,11 @@ static int get_enclave_id() return cpus[read_csr(mhartid)].eid; } +int get_curr_enclave_id() +{ + return cpus[read_csr(mhartid)].eid; +} + static void exit_enclave_world() { cpus[read_csr(mhartid)].in_enclave = 0; @@ -85,17 +105,24 @@ struct link_mem_t* init_mem_link(unsigned long mem_size, unsigned long slab_size { int retval = 0; struct link_mem_t* head; + unsigned long resp_size = 0; - head = (struct link_mem_t*)mm_alloc(mem_size, NULL); + head = (struct link_mem_t*)mm_alloc(mem_size, &resp_size); if(head == NULL) return NULL; else - memset((void*)head, 0, mem_size); + memset((void*)head, 0, resp_size); + + if(resp_size <= sizeof(struct link_mem_t) + slab_size) + { + mm_free(head, resp_size); + return NULL; + } - head->mem_size = mem_size; + head->mem_size = resp_size; head->slab_size = slab_size; - head->slab_num = (mem_size - sizeof(struct link_mem_t)) / slab_size; + head->slab_num = (resp_size - sizeof(struct link_mem_t)) / slab_size; void* align_addr = (char*)head + sizeof(struct link_mem_t); head->addr = (char*)size_up_align((unsigned long)align_addr, slab_size); head->next_link_mem = NULL; @@ -107,21 +134,29 @@ struct link_mem_t* add_link_mem(struct link_mem_t** tail) { struct link_mem_t* new_link_mem; int retval = 0; + unsigned long resp_size = 0; - new_link_mem = (struct link_mem_t*)mm_alloc((*tail)->mem_size, NULL); + new_link_mem = (struct link_mem_t*)mm_alloc((*tail)->mem_size, &resp_size); if (new_link_mem == NULL) return NULL; else - memset((void*)new_link_mem, 0, (*tail)->mem_size); + memset((void*)new_link_mem, 0, resp_size); + + if(resp_size <= sizeof(struct link_mem_t) + (*tail)->slab_size) + { + mm_free(new_link_mem, resp_size); + } (*tail)->next_link_mem = new_link_mem; - new_link_mem->mem_size = (*tail)->mem_size; - new_link_mem->slab_num = (*tail)->slab_num; + new_link_mem->mem_size = resp_size; + new_link_mem->slab_num = (resp_size - sizeof(struct link_mem_t)) / (*tail)->slab_size; new_link_mem->slab_size = (*tail)->slab_size; void* align_addr = (char*)new_link_mem + sizeof(struct link_mem_t); new_link_mem->addr = (char*)size_up_align((unsigned long)align_addr, (*tail)->slab_size); new_link_mem->next_link_mem = NULL; + + *tail = new_link_mem; return new_link_mem; } @@ -154,14 +189,18 @@ int remove_link_mem(struct link_mem_t** head, struct link_mem_t* ptr) return retval; } -static struct enclave_t* alloc_enclave() +/** + * \brief alloc an enclave_t structure from encalve_metadata_head + * + * eid represents the location in the list + * sometimes you may need to acquire lock before calling this function + */ +struct enclave_t* alloc_enclave() { struct link_mem_t *cur, *next; struct enclave_t* enclave = NULL; int i, found, eid; - spinlock_lock(&enclave_metadata_lock); - //enclave metadata list hasn't be initialized yet if(enclave_metadata_head == NULL) { @@ -212,18 +251,16 @@ static struct enclave_t* alloc_enclave() } alloc_eid_out: - spinlock_unlock(&enclave_metadata_lock); return enclave; } -static int free_enclave(int eid) +//sometimes you may need to acquire lock before calling this function +int free_enclave(int eid) { struct link_mem_t *cur, *next; struct enclave_t *enclave = NULL; int i, found, count, ret_val; - spinlock_lock(&enclave_metadata_lock); - found = 0; count = 0; for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem) @@ -247,19 +284,16 @@ static int free_enclave(int eid) ret_val = -1; } - spinlock_unlock(&enclave_metadata_lock); - return ret_val; } +//sometimes you may need to acquire lock before calling this function struct enclave_t* get_enclave(int eid) { struct link_mem_t *cur, *next; struct enclave_t *enclave; int i, found, count; - spinlock_lock(&enclave_metadata_lock); - found = 0; count = 0; for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem) @@ -281,10 +315,29 @@ struct enclave_t* get_enclave(int eid) enclave = NULL; } - spinlock_unlock(&enclave_metadata_lock); return enclave; } +/** + * \brief this function is used to handle IPC in enclave, + * it will return the last enclave in the chain. + * This is used to help us identify the real executing encalve. + */ +struct enclave_t* __get_real_enclave(int eid) +{ + struct enclave_t* enclave = get_enclave(eid); + if(!enclave) + return NULL; + + struct enclave_t* real_enclave = NULL; + if(enclave->cur_callee_eid == -1) + real_enclave = enclave; + else + real_enclave = get_enclave(enclave->cur_callee_eid); + + return real_enclave; +} + int swap_from_host_to_enclave(uintptr_t* host_regs, struct enclave_t* enclave) { //grant encalve access to memory @@ -370,22 +423,114 @@ int swap_from_enclave_to_host(uintptr_t* regs, struct enclave_t* enclave) return 0; } +static int __enclave_call(uintptr_t* regs, struct enclave_t* top_caller_enclave, struct enclave_t* caller_enclave, struct enclave_t* callee_enclave) +{ + //move caller's host context to callee's host context + uintptr_t encl_ptbr = callee_enclave->thread_context.encl_ptbr; + memcpy((void*)(&(callee_enclave->thread_context)), (void*)(&(caller_enclave->thread_context)), sizeof(struct thread_state_t)); + callee_enclave->thread_context.encl_ptbr = encl_ptbr; + callee_enclave->host_ptbr = caller_enclave->host_ptbr; + callee_enclave->ocall_func_id = caller_enclave->ocall_func_id; + callee_enclave->ocall_arg0 = caller_enclave->ocall_arg0; + callee_enclave->ocall_arg1 = caller_enclave->ocall_arg1; + callee_enclave->ocall_syscall_num = caller_enclave->ocall_syscall_num; + + //save caller's enclave context on its prev_state + swap_prev_state(&(caller_enclave->thread_context), regs); + caller_enclave->thread_context.prev_stvec = read_csr(stvec); + caller_enclave->thread_context.prev_mie = read_csr(mie); + caller_enclave->thread_context.prev_mideleg = read_csr(mideleg); + caller_enclave->thread_context.prev_medeleg = read_csr(medeleg); + caller_enclave->thread_context.prev_mepc = read_csr(mepc); + + //clear callee's enclave context + memset((void*)regs, 0, sizeof(struct general_registers_t)); + + //different platforms have differnt ptbr switch methods + switch_to_enclave_ptbr(&(callee_enclave->thread_context), callee_enclave->thread_context.encl_ptbr); + + //callee use caller's mie/mip + clear_csr(mip, MIP_MTIP); + clear_csr(mip, MIP_STIP); + clear_csr(mip, MIP_SSIP); + clear_csr(mip, MIP_SEIP); + + //transfer control to the callee enclave + write_csr(mepc, callee_enclave->entry_point); + + //mark that cpu is in callee enclave world now + enter_enclave_world(callee_enclave->eid); + + top_caller_enclave->cur_callee_eid = callee_enclave->eid; + caller_enclave->cur_callee_eid = callee_enclave->eid; + callee_enclave->caller_eid = caller_enclave->eid; + callee_enclave->top_caller_eid = top_caller_enclave->eid; + + __asm__ __volatile__ ("sfence.vma" : : : "memory"); + + return 0; +} + +static int __enclave_return(uintptr_t* regs, struct enclave_t* callee_enclave, struct enclave_t* caller_enclave, struct enclave_t* top_caller_enclave) +{ + //restore caller's context + memcpy((void*)regs, (void*)(&(caller_enclave->thread_context.prev_state)), sizeof(struct general_registers_t)); + swap_prev_stvec(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_stvec); + swap_prev_mie(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_mie); + swap_prev_mideleg(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_mideleg); + swap_prev_medeleg(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_medeleg); + swap_prev_mepc(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_mepc); + + //restore caller's host context + memcpy((void*)(&(caller_enclave->thread_context.prev_state)), (void*)(&(callee_enclave->thread_context.prev_state)), sizeof(struct general_registers_t)); + + //clear callee's enclave context + uintptr_t encl_ptbr = callee_enclave->thread_context.encl_ptbr; + memset((void*)(&(callee_enclave->thread_context)), 0, sizeof(struct thread_state_t)); + callee_enclave->thread_context.encl_ptbr = encl_ptbr; + callee_enclave->host_ptbr = 0; + callee_enclave->ocall_func_id = NULL; + callee_enclave->ocall_arg0 = NULL; + callee_enclave->ocall_arg1 = NULL; + callee_enclave->ocall_syscall_num = NULL; + + //different platforms have differnt ptbr switch methods + switch_to_enclave_ptbr(&(caller_enclave->thread_context), caller_enclave->thread_context.encl_ptbr); + + clear_csr(mip, MIP_MTIP); + clear_csr(mip, MIP_STIP); + clear_csr(mip, MIP_SSIP); + clear_csr(mip, MIP_SEIP); + + //mark that cpu is in caller enclave world now + enter_enclave_world(caller_enclave->eid); + top_caller_enclave->cur_callee_eid = caller_enclave->eid; + caller_enclave->cur_callee_eid = -1; + callee_enclave->caller_eid = -1; + callee_enclave->top_caller_eid = -1; + + __asm__ __volatile__ ("sfence.vma" : : : "memory"); + + return 0; +} + uintptr_t create_enclave(struct enclave_sbi_param_t create_args) { - struct enclave_t* enclave; + struct enclave_t* enclave = NULL; + uintptr_t ret = 0; + acquire_enclave_metadata_lock(); + enclave = alloc_enclave(); if(!enclave) { - printm("M mode: create_enclave: enclave allocation is failed \r\n"); - return -1UL; + printm("M mode: create_enclave: enclave allocation is failed \r\n"); + ret = ENCLAVE_NO_MEMORY; + goto failed; } //TODO: check whether enclave memory is out of bound //TODO: verify enclave page table layout - - spinlock_lock(&enclave_metadata_lock); - enclave->paddr = create_args.paddr; enclave->size = create_args.size; enclave->entry_point = create_args.entry_point; @@ -396,16 +541,89 @@ uintptr_t create_enclave(struct enclave_sbi_param_t create_args) enclave->ocall_arg0 = create_args.ecall_arg1; enclave->ocall_arg1 = create_args.ecall_arg2; enclave->ocall_syscall_num = create_args.ecall_arg3; + enclave->kbuffer = create_args.kbuffer; + enclave->kbuffer_size = create_args.kbuffer_size; enclave->host_ptbr = read_csr(satp); - enclave->thread_context.encl_ptbr = (create_args.paddr >> (RISCV_PGSHIFT) | SATP_MODE_CHOICE); - enclave->root_page_table = (unsigned long*)create_args.paddr; + enclave->root_page_table = create_args.paddr + RISCV_PGSIZE; + enclave->thread_context.encl_ptbr = ((create_args.paddr + RISCV_PGSIZE) >> RISCV_PGSHIFT) | SATP_MODE_CHOICE; + enclave->type = NORMAL_ENCLAVE; enclave->state = FRESH; - - spinlock_unlock(&enclave_metadata_lock); + enclave->caller_eid = -1; + enclave->top_caller_eid = -1; + enclave->cur_callee_eid = -1; + + //traverse vmas + struct pm_area_struct* pma = (struct pm_area_struct*)(create_args.paddr); + struct vm_area_struct* vma = (struct vm_area_struct*)(create_args.paddr + sizeof(struct pm_area_struct)); + pma->paddr = create_args.paddr; + pma->size = create_args.size; + pma->free_mem = create_args.free_mem; + if(pma->free_mem < pma->paddr || pma->free_mem >= pma->paddr+pma->size + || pma->free_mem & ((1<pm_next = NULL; + enclave->pma_list = pma; + traverse_vmas(enclave->root_page_table, vma); + //FIXME: here we assume there are exactly text(include text/data/bss) vma and stack vma + while(vma) + { + if(vma->va_start == ENCLAVE_DEFAULT_TEXT_BASE) + { + enclave->text_vma = vma; + } + if(vma->va_end == ENCLAVE_DEFAULT_STACK_BASE) + { + enclave->stack_vma = vma; + enclave->_stack_top = enclave->stack_vma->va_start; + } + vma->pma = pma; + vma = vma->vm_next; + } + if(enclave->text_vma) + enclave->text_vma->vm_next = NULL; + if(enclave->stack_vma) + enclave->stack_vma->vm_next = NULL; + enclave->_heap_top = ENCLAVE_DEFAULT_HEAP_BASE; + enclave->heap_vma = NULL; + enclave->mmap_vma = NULL; + + enclave->free_pages = NULL; + enclave->free_pages_num = 0; + uintptr_t free_mem = create_args.paddr + create_args.size - RISCV_PGSIZE; + while(free_mem >= create_args.free_mem) + { + struct page_t *page = (struct page_t*)free_mem; + page->paddr = free_mem; + page->next = enclave->free_pages; + enclave->free_pages = page; + enclave->free_pages_num += 1; + free_mem -= RISCV_PGSIZE; + } + + //check kbuffer + if(create_args.kbuffer_size < RISCV_PGSIZE || create_args.kbuffer & (RISCV_PGSIZE-1) || create_args.kbuffer_size & (RISCV_PGSIZE-1)) + { + ret = ENCLAVE_ERROR; + printm("check kbuffer fail: ENCLAVE_ERROR"); + goto failed; + } + mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), ENCLAVE_DEFAULT_KBUFFER, create_args.kbuffer, create_args.kbuffer_size); copy_word_to_host((unsigned int*)create_args.eid_ptr, enclave->eid); + release_enclave_metadata_lock(); - return 0; + return ret; + +failed: + if(enclave) + { + free_enclave(enclave->eid); + } + release_enclave_metadata_lock(); + return ret; } uintptr_t run_enclave(uintptr_t* regs, unsigned int eid) @@ -413,21 +631,27 @@ uintptr_t run_enclave(uintptr_t* regs, unsigned int eid) struct enclave_t* enclave; uintptr_t retval = 0; + acquire_enclave_metadata_lock(); + enclave = get_enclave(eid); if(!enclave) { printm("M mode: run_enclave: wrong enclave id\r\n"); - return -1UL; + retval = -1UL; + goto run_enclave_out; } - - spinlock_lock(&enclave_metadata_lock); - if(enclave->state != FRESH) { printm("M mode: run_enclave: enclave is not initialized or already used\r\n"); retval = -1UL; goto run_enclave_out; } + if(enclave->type == SERVER_ENCLAVE) + { + printm("M mode: run_enclave: server enclave is no need to run\r\n"); + retval = -1UL; + goto run_enclave_out; + } if(enclave->host_ptbr != read_csr(satp)) { printm("M mode: run_enclave: enclave doesn't belong to current host process\r\n"); @@ -448,7 +672,7 @@ uintptr_t run_enclave(uintptr_t* regs, unsigned int eid) set_csr(mie, MIP_MTIP); //set default stack - regs[2] = ENCLAVE_DEFAULT_STACK; + regs[2] = ENCLAVE_DEFAULT_STACK_BASE; //pass parameters regs[11] = (uintptr_t)enclave->entry_point; @@ -458,21 +682,22 @@ uintptr_t run_enclave(uintptr_t* regs, unsigned int eid) enclave->state = RUNNING; run_enclave_out: - spinlock_unlock(&enclave_metadata_lock); + release_enclave_metadata_lock(); return retval; } uintptr_t stop_enclave(uintptr_t* regs, unsigned int eid) { uintptr_t retval = 0; + + acquire_enclave_metadata_lock(); + struct enclave_t *enclave = get_enclave(eid); if(!enclave) { printm("M mode: stop_enclave: wrong enclave id%d\r\n", eid); return -1UL; } - - spinlock_lock(&enclave_metadata_lock); if(enclave->host_ptbr != read_csr(satp)) { @@ -489,13 +714,16 @@ uintptr_t stop_enclave(uintptr_t* regs, unsigned int eid) enclave->state = STOPPED; stop_enclave_out: - spinlock_unlock(&enclave_metadata_lock); + release_enclave_metadata_lock(); return retval; } uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid) { uintptr_t retval = 0; + + acquire_enclave_metadata_lock(); + struct enclave_t* enclave = get_enclave(eid); if(!enclave) { @@ -503,8 +731,6 @@ uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid) return -1UL; } - spinlock_lock(&enclave_metadata_lock); - if(enclave->host_ptbr != read_csr(satp)) { printm("M mode: resume_from_stop: enclave doesn't belong to current host process\r\n"); @@ -522,22 +748,23 @@ uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid) enclave->state = RUNNABLE; resume_from_stop_out: - spinlock_unlock(&enclave_metadata_lock); + release_enclave_metadata_lock(); return retval; } uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid) { uintptr_t retval = 0; - struct enclave_t* enclave = get_enclave(eid); + + acquire_enclave_metadata_lock(); + + struct enclave_t* enclave = __get_real_enclave(eid); if(!enclave) { printm("M mode: resume_enclave: wrong enclave id%d\r\n", eid); return -1UL; } - spinlock_lock(&enclave_metadata_lock); - if(enclave->host_ptbr != read_csr(satp)) { printm("M mode: resume_enclave: enclave doesn't belong to current host process\r\n"); @@ -578,7 +805,83 @@ uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid) retval = regs[10]; resume_enclave_out: - spinlock_unlock(&enclave_metadata_lock); + release_enclave_metadata_lock(); + return retval; +} + +uintptr_t mmap_after_resume(struct enclave_t *enclave, uintptr_t paddr, uintptr_t size) +{ + uintptr_t retval = 0; + uintptr_t vaddr = enclave->thread_context.prev_state.a1; + if(!vaddr) + { + vaddr = ENCLAVE_DEFAULT_MMAP_BASE - (size - RISCV_PGSIZE); + } + struct pm_area_struct *pma = (struct pm_area_struct*)paddr; + struct vm_area_struct *vma = (struct vm_area_struct*)(paddr + sizeof(struct pm_area_struct)); + pma->paddr = paddr; + pma->size = size; + pma->pm_next = NULL; + vma->va_start = vaddr; + vma->va_end = vaddr + size - RISCV_PGSIZE; + vma->vm_next = NULL; + vma->pma = pma; + if(insert_vma(&(enclave->mmap_vma), vma, ENCLAVE_DEFAULT_MMAP_BASE) < 0) + { + vma->va_end = enclave->mmap_vma->va_start; + vma->va_start = vma->va_end - (size - RISCV_PGSIZE); + vma->vm_next = enclave->mmap_vma; + enclave->mmap_vma = vma; + } + insert_pma(&(enclave->pma_list), pma); + mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), vma->va_start, paddr + RISCV_PGSIZE, size - RISCV_PGSIZE); + retval = vma->va_start; + return retval; +} + +//host use this fucntion to re-enter enclave world +uintptr_t resume_from_ocall(uintptr_t* regs, unsigned int eid) +{ + uintptr_t retval = 0; + uintptr_t ocall_func_id = regs[12]; + struct enclave_t* enclave = NULL; + + acquire_enclave_metadata_lock(); + + enclave = __get_real_enclave(eid); + if(!enclave || enclave->state != OCALLING || enclave->host_ptbr != read_csr(satp)) + { + retval = -1UL; + goto out; + } + + switch(ocall_func_id) + { + case OCALL_MMAP: + retval = mmap_after_resume(enclave, regs[13], regs[14]); + if(retval == -1UL) + goto out; + break; + case OCALL_UNMAP: + retval = 0; + break; + case OCALL_SYS_WRITE: + retval = enclave->thread_context.prev_state.a0; + break; + default: + retval = 0; + break; + } + + if(swap_from_host_to_enclave(regs, enclave) < 0) + { + retval = -1UL; + goto out; + } + enclave->state = RUNNING; + +out: + release_enclave_metadata_lock(); return retval; } @@ -596,6 +899,8 @@ uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval) return -1; } + acquire_enclave_metadata_lock(); + eid = get_enclave_id(); enclave = get_enclave(eid); if(!enclave) @@ -604,8 +909,6 @@ uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval) return -1UL; } - spinlock_lock(&enclave_metadata_lock); - if(check_enclave_authentication(enclave) < 0) { printm("M mode: exit_enclave: current enclave's eid is not %d\r\n", eid); @@ -619,18 +922,127 @@ uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval) //TODO: support multiple memory region memset((void*)(enclave->paddr), 0, enclave->size); mm_free((void*)(enclave->paddr), enclave->size); - - spinlock_unlock(&enclave_metadata_lock); //free enclave struct free_enclave(eid); + + release_enclave_metadata_lock(); return 0; } +uintptr_t enclave_mmap(uintptr_t* regs, uintptr_t vaddr, uintptr_t size) +{ + uintptr_t ret = 0; + int eid = get_curr_enclave_id(); + struct enclave_t* enclave = NULL; + if(check_in_enclave_world() < 0) + return -1; + if(vaddr) + { + if(vaddr & (RISCV_PGSIZE - 1) || size < RISCV_PGSIZE || size & (RISCV_PGSIZE - 1)) + return -1; + } + + acquire_enclave_metadata_lock(); + + enclave = get_enclave(eid); + if(!enclave || check_enclave_authentication(enclave) != 0 || enclave->state != RUNNING) + { + ret = -1UL; + goto out; + } + + copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_MMAP); + copy_dword_to_host((uintptr_t*)enclave->ocall_arg1, size + RISCV_PGSIZE); + + swap_from_enclave_to_host(regs, enclave); + enclave->state = OCALLING; + ret = ENCLAVE_OCALL; + +out: + release_enclave_metadata_lock(); + return ret; +} + +uintptr_t enclave_unmap(uintptr_t* regs, uintptr_t vaddr, uintptr_t size) +{ + uintptr_t ret = 0; + int eid = get_curr_enclave_id(); + struct enclave_t* enclave = NULL; + struct vm_area_struct *vma = NULL; + struct pm_area_struct *pma = NULL; + if(check_in_enclave_world() < 0) + return -1; + + acquire_enclave_metadata_lock(); + + enclave = get_enclave(eid); + if(!enclave || check_enclave_authentication(enclave) != 0 || enclave->state != RUNNING) + { + ret = -1UL; + goto out; + } + + vma = find_vma(enclave->mmap_vma, vaddr, size); + if(!vma) + { + ret = -1UL; + goto out; + } + pma = vma->pma; + delete_vma(&(enclave->mmap_vma), vma); + delete_pma(&(enclave->pma_list), pma); + vma->vm_next = NULL; + pma->pm_next = NULL; + unmap((uintptr_t*)(enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); + + copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_UNMAP); + copy_dword_to_host((uintptr_t*)enclave->ocall_arg0, pma->paddr); + copy_dword_to_host((uintptr_t*)enclave->ocall_arg1, pma->size); + + swap_from_enclave_to_host(regs, enclave); + enclave->state = OCALLING; + ret = ENCLAVE_OCALL; + +out: + release_enclave_metadata_lock(); + return ret; +} + +uintptr_t enclave_sys_write(uintptr_t* regs) +{ + uintptr_t ret = 0; + int eid = get_curr_enclave_id(); + struct enclave_t* enclave = NULL; + if(check_in_enclave_world() < 0) + return -1; + + acquire_enclave_metadata_lock(); + + enclave = get_enclave(eid); + if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) + { + ret = -1UL; + goto out; + } + + copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_SYS_WRITE); + + swap_from_enclave_to_host(regs, enclave); + enclave->state = OCALLING; + ret = ENCLAVE_OCALL; +out: + release_enclave_metadata_lock(); + return ret; +} + uintptr_t do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) { uintptr_t retval = 0; + + acquire_enclave_metadata_lock(); + unsigned int eid = get_enclave_id(); struct enclave_t *enclave = get_enclave(eid); if(!enclave) @@ -639,8 +1051,6 @@ uintptr_t do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) return -1UL; } - spinlock_lock(&enclave_metadata_lock); - //TODO: check whether this enclave is destroyed if(enclave->state == DESTROYED) { @@ -658,6 +1068,242 @@ uintptr_t do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) regs[10] = ENCLAVE_TIMER_IRQ; timer_irq_out: - spinlock_unlock(&enclave_metadata_lock); + release_enclave_metadata_lock(); + return retval; +} + +uintptr_t call_enclave(uintptr_t* regs, unsigned int callee_eid, uintptr_t arg) +{ + printm("call_enclave start!\r\n"); + struct enclave_t* top_caller_enclave = NULL; + struct enclave_t* caller_enclave = NULL; + struct enclave_t* callee_enclave = NULL; + struct vm_area_struct* vma = NULL; + struct pm_area_struct* pma = NULL; + uintptr_t retval = 0; + int caller_eid = get_curr_enclave_id(); + if(check_in_enclave_world() < 0) + return -1; + + acquire_enclave_metadata_lock(); + caller_enclave = get_enclave(caller_eid); + if(!caller_enclave || caller_enclave->state != RUNNING || check_enclave_authentication(caller_enclave) != 0) + { + printm("M mode: call_enclave: enclave%d can not execute call_enclave!\r\n", caller_eid); + retval = -1UL; + goto out; + } + if(caller_enclave->caller_eid != -1) + top_caller_enclave = get_enclave(caller_enclave->top_caller_eid); + else + top_caller_enclave = caller_enclave; + if(!top_caller_enclave || top_caller_enclave->state != RUNNING) + { + printm("M mode: call_enclave: enclave%d can not execute call_enclave!\r\n", caller_eid); + retval = -1UL; + goto out; + } + + callee_enclave = get_enclave(callee_eid); + if(!callee_enclave || callee_enclave->type != SERVER_ENCLAVE || callee_enclave->caller_eid != -1 || callee_enclave->state != RUNNABLE) + { + printm("M mode: call_enclave: enclave%d can not be accessed!\r\n", callee_eid); + retval = -1UL; + goto out; + } + + struct call_enclave_arg_t call_arg; + struct call_enclave_arg_t* call_arg0 = va_to_pa((uintptr_t*)(caller_enclave->root_page_table), (void*)arg); + if(!call_arg0) + { + retval = -1UL; + goto out; + } + + copy_from_host(&call_arg, call_arg0, sizeof(struct call_enclave_arg_t)); + if(call_arg.req_vaddr != 0) + { + if(call_arg.req_vaddr & (RISCV_PGSIZE - 1) || call_arg.req_size < RISCV_PGSIZE || call_arg.req_size & (RISCV_PGSIZE - 1)) + { + retval = -1UL; + goto out; + } + vma = find_vma(caller_enclave->mmap_vma, call_arg.req_vaddr, call_arg.req_size); + if(!vma) + { + retval = -1UL; + goto out; + } + pma = vma->pma; + delete_vma(&(caller_enclave->mmap_vma), vma); + delete_pma(&(caller_enclave->pma_list), pma); + vma->vm_next = NULL; + pma->pm_next = NULL; + unmap((uintptr_t*)(caller_enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); + if(insert_vma(&(callee_enclave->mmap_vma), vma, ENCLAVE_DEFAULT_MMAP_BASE) < 0) + { + vma->va_end = callee_enclave->mmap_vma->va_start; + vma->va_start = vma->va_end - (pma->size - RISCV_PGSIZE); + vma->vm_next = callee_enclave->mmap_vma; + callee_enclave->mmap_vma = vma; + } + insert_pma(&(callee_enclave->pma_list), pma); + mmap((uintptr_t*)(callee_enclave->root_page_table), &(callee_enclave->free_pages), vma->va_start, pma->paddr + RISCV_PGSIZE, pma->size - RISCV_PGSIZE); + } + + if(__enclave_call(regs, top_caller_enclave, caller_enclave, callee_enclave) < 0) + { + printm("M mode: call_enclave: enclave can not be run\r\n"); + retval = -1UL; + goto out; + } + + //set return address to enclave + write_csr(mepc, (uintptr_t)(callee_enclave->entry_point)); + + //enable timer interrupt + set_csr(mie, MIP_MTIP); + + //set default stack + regs[2] = ENCLAVE_DEFAULT_STACK_BASE; + + //map kbuffer + mmap((uintptr_t*)(callee_enclave->root_page_table), &(callee_enclave->free_pages), ENCLAVE_DEFAULT_KBUFFER, top_caller_enclave->kbuffer, top_caller_enclave->kbuffer_size); + + //pass parameters + regs[10] = call_arg.req_arg; + if(call_arg.req_vaddr) + regs[11] = vma->va_start; + else + regs[11] = 0; + regs[12] = call_arg.req_size; + retval = call_arg.req_arg; + + callee_enclave->state = RUNNING; + +out: + release_enclave_metadata_lock(); + printm("call_enclave over!\r\n"); return retval; } + +uintptr_t enclave_return(uintptr_t* regs, uintptr_t arg) +{ + printm("enclave_return start!\r\n"); + struct enclave_t *enclave = NULL; + struct enclave_t *caller_enclave = NULL; + struct enclave_t *top_caller_enclave = NULL; + int eid = 0; + uintptr_t ret = 0; + struct vm_area_struct *vma = NULL; + struct pm_area_struct *pma = NULL; + + if(check_in_enclave_world() < 0) + { + printm("M mode: enclave_return: cpu is not in enclave world now\r\n"); + return -1UL; + } + + acquire_enclave_metadata_lock(); + + eid = get_curr_enclave_id(); + enclave = get_enclave(eid); + if(!enclave || check_enclave_authentication(enclave) != 0 || enclave->type != SERVER_ENCLAVE) + { + printm("M mode: enclave_return: enclave%d can not return!\r\n", eid); + ret = -1UL; + goto out; + } + struct call_enclave_arg_t ret_arg; + struct call_enclave_arg_t* ret_arg0 = va_to_pa((uintptr_t*)(enclave->root_page_table), (void*)arg); + if(!ret_arg0) + { + ret = -1UL; + goto out; + } + copy_from_host(&ret_arg, ret_arg0, sizeof(struct call_enclave_arg_t)); + + caller_enclave = get_enclave(enclave->caller_eid); + top_caller_enclave = get_enclave(enclave->top_caller_eid); + __enclave_return(regs, enclave, caller_enclave, top_caller_enclave); + unmap((uintptr_t*)(enclave->root_page_table), ENCLAVE_DEFAULT_KBUFFER, top_caller_enclave->kbuffer_size); + + //there is no need to check call_arg's validity again as it is already checked when executing call_enclave() + struct call_enclave_arg_t *call_arg = va_to_pa((uintptr_t*)(caller_enclave->root_page_table), (void*)(regs[11])); + +restore_req_addr: + if(!call_arg->req_vaddr || !ret_arg.req_vaddr || ret_arg.req_vaddr & (RISCV_PGSIZE - 1) + || ret_arg.req_size < call_arg->req_size || ret_arg.req_size & (RISCV_PGSIZE - 1)) + { + call_arg->req_vaddr = 0; + goto restore_resp_addr; + } + vma = find_vma(enclave->mmap_vma, ret_arg.req_vaddr, ret_arg.req_size); + if(!vma) + { + call_arg->req_vaddr = 0; + goto restore_resp_addr; + } + pma = vma->pma; + delete_vma(&(enclave->mmap_vma), vma); + delete_pma(&(enclave->pma_list), pma); + unmap((uintptr_t*)(enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); + vma->va_start = call_arg->req_vaddr; + vma->va_end = vma->va_start + pma->size - RISCV_PGSIZE; + vma->vm_next = NULL; + pma->pm_next = NULL; + if(insert_vma(&(caller_enclave->mmap_vma), vma, ENCLAVE_DEFAULT_MMAP_BASE) < 0) + { + vma->va_end = caller_enclave->mmap_vma->va_start; + vma->va_start = vma->va_end - (pma->size - RISCV_PGSIZE); + vma->vm_next = caller_enclave->mmap_vma; + caller_enclave->mmap_vma = vma; + } + insert_pma(&(caller_enclave->pma_list), pma); + mmap((uintptr_t*)(caller_enclave->root_page_table), &(caller_enclave->free_pages), vma->va_start, pma->paddr + RISCV_PGSIZE, pma->size - RISCV_PGSIZE); + call_arg->req_vaddr = vma->va_start; + +restore_resp_addr: + if(!ret_arg.resp_vaddr || ret_arg.resp_vaddr & (RISCV_PGSIZE - 1) + || ret_arg.resp_size < RISCV_PGSIZE || ret_arg.resp_size & (RISCV_PGSIZE - 1)) + { + call_arg->resp_vaddr = 0; + call_arg->resp_size = 0; + goto restore_return_val; + } + + vma = find_vma(enclave->mmap_vma, ret_arg.resp_vaddr, ret_arg.resp_size); + if(!vma) + { + call_arg->resp_vaddr = 0; + call_arg->resp_size = 0; + goto restore_return_val; + } + + pma = vma->pma; + delete_vma(&(enclave->mmap_vma), vma); + delete_pma(&(enclave->pma_list), pma); + unmap((uintptr_t*)(enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); + vma->vm_next = NULL; + pma->pm_next = NULL; + if(caller_enclave->mmap_vma) + vma->va_end = caller_enclave->mmap_vma->va_start; + else + vma->va_end = ENCLAVE_DEFAULT_MMAP_BASE; + vma->va_start = vma->va_end - (pma->size - RISCV_PGSIZE); + vma->vm_next = caller_enclave->mmap_vma; + caller_enclave->mmap_vma = vma; + insert_pma(&(caller_enclave->pma_list), pma); + mmap((uintptr_t*)(caller_enclave->root_page_table), &(caller_enclave->free_pages), vma->va_start, pma->paddr + RISCV_PGSIZE, pma->size - RISCV_PGSIZE); + call_arg->resp_vaddr = vma->va_start; + call_arg->resp_size = ret_arg.resp_size; + +restore_return_val: + call_arg->resp_val = ret_arg.resp_val; + enclave->state = RUNNABLE; + ret = 0; +out: + release_enclave_metadata_lock(); + printm("enclave_return over!\r\n"); + return ret; +} diff --git a/sm/enclave.h b/sm/enclave.h index 8c9641e..5e327e0 100644 --- a/sm/enclave.h +++ b/sm/enclave.h @@ -32,8 +32,33 @@ typedef enum RUNNABLE, RUNNING, STOPPED, + OCALLING } enclave_state_t; +struct vm_area_struct +{ + unsigned long va_start; + unsigned long va_end; + + struct vm_area_struct *vm_next; + struct pm_area_struct *pma; +}; + +struct pm_area_struct +{ + unsigned long paddr; + unsigned long size; + unsigned long free_mem; + + struct pm_area_struct *pm_next; +}; + +struct page_t +{ + uintptr_t paddr; + struct page_t *next; +}; + /* * enclave memory [paddr, paddr + size] * free_mem @ unused memory address in enclave mem @@ -41,8 +66,22 @@ typedef enum struct enclave_t { unsigned int eid; + enclave_type_t type; enclave_state_t state; + ///vm_area_struct lists + struct vm_area_struct* text_vma; + struct vm_area_struct* stack_vma; + uintptr_t _stack_top; ///lowest address of stack area + struct vm_area_struct* heap_vma; + uintptr_t _heap_top; ///highest address of heap area + struct vm_area_struct* mmap_vma; + + ///pm_area_struct list + struct pm_area_struct* pma_list; + struct page_t* free_pages; + uintptr_t free_pages_num; + //memory region of enclave unsigned long paddr; unsigned long size; @@ -54,12 +93,18 @@ struct enclave_t unsigned long* enclave_mem_metadata_page; //root page table of enclave - unsigned long* root_page_table; + unsigned long root_page_table; + //root page table register for host unsigned long host_ptbr; + //entry point of enclave unsigned long entry_point; + ///shared mem with kernel + unsigned long kbuffer;//paddr + unsigned long kbuffer_size; + unsigned long* ocall_func_id; unsigned long* ocall_arg0; unsigned long* ocall_arg1; @@ -72,6 +117,9 @@ struct enclave_t //enclave thread context //TODO: support multiple threads struct thread_state_t thread_context; + unsigned int top_caller_eid; + unsigned int caller_eid; + unsigned int cur_callee_eid; }; struct cpu_state_t @@ -80,8 +128,22 @@ struct cpu_state_t int eid; }; +void acquire_enclave_metadata_lock(); +void release_enclave_metadata_lock(); + +int get_curr_enclave_id(); +struct enclave_t* get_enclave(int eid); + uintptr_t copy_from_host(void* dest, void* src, size_t size); uintptr_t copy_to_host(void* dest, void* src, size_t size); +int copy_word_to_host(unsigned int* ptr, uintptr_t value); +int copy_dword_to_host(uintptr_t* ptr, uintptr_t value); + +struct link_mem_t* init_mem_link(unsigned long mem_size, unsigned long slab_size); +struct link_mem_t* add_link_mem(struct link_mem_t** tail); + +struct enclave_t* alloc_enclave(); +int free_enclave(int eid); uintptr_t create_enclave(struct enclave_sbi_param_t create_args); uintptr_t run_enclave(uintptr_t* regs, unsigned int eid); @@ -90,5 +152,23 @@ uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid); uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid); uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval); uintptr_t do_timer_irq(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc); +uintptr_t resume_from_ocall(uintptr_t* regs, unsigned int eid); + +uintptr_t enclave_mmap(uintptr_t* regs, uintptr_t vaddr, uintptr_t size); +uintptr_t enclave_unmap(uintptr_t* regs, uintptr_t vaddr, uintptr_t size); +uintptr_t enclave_sys_write(uintptr_t *regs); + +struct call_enclave_arg_t +{ + uintptr_t req_arg; + uintptr_t req_vaddr; + uintptr_t req_size; + uintptr_t resp_val; + uintptr_t resp_vaddr; + uintptr_t resp_size; +}; + +uintptr_t call_enclave(uintptr_t *regs, unsigned int enclave_id, uintptr_t arg); +uintptr_t enclave_return(uintptr_t *regs, uintptr_t arg); #endif /* _ENCLAVE_H */ diff --git a/sm/enclave_args.h b/sm/enclave_args.h index 6516f70..1e691b7 100644 --- a/sm/enclave_args.h +++ b/sm/enclave_args.h @@ -2,6 +2,8 @@ #define _ENCLAVE_ARGS_H #include "thread.h" +#define NAME_LEN 16 + struct mm_alloc_arg_t { unsigned long req_size; @@ -9,6 +11,12 @@ struct mm_alloc_arg_t unsigned long resp_size; }; +typedef enum +{ + NORMAL_ENCLAVE = 0, + SERVER_ENCLAVE = 1 +} enclave_type_t; + /* * enclave memory [paddr, paddr + size] * free_mem @ unused memory address in enclave mem @@ -16,12 +24,18 @@ struct mm_alloc_arg_t struct enclave_sbi_param_t { unsigned int *eid_ptr; + char name[NAME_LEN]; + enclave_type_t type; + unsigned long paddr; unsigned long size; unsigned long entry_point; unsigned long untrusted_ptr; unsigned long untrusted_size; unsigned long free_mem; + //enclave shared mem with kernel + unsigned long kbuffer;//paddr + unsigned long kbuffer_size; unsigned long *ecall_arg0; unsigned long *ecall_arg1; unsigned long *ecall_arg2; diff --git a/sm/enclave_vm.c b/sm/enclave_vm.c new file mode 100644 index 0000000..0e3afd0 --- /dev/null +++ b/sm/enclave_vm.c @@ -0,0 +1,403 @@ +#include "vm.h" +#include "mtrap.h" +#include "enclave_vm.h" + +/** + * \brief internal functions of check_enclave_layout, it will recursively check the region + * + * \param page_table is an PT page (physical addr), it could be non-root PT page + * \param vaddr is the start virtual addr of the PTE in page_table + * \param level is the PT page level of page_table + */ +static int __check_enclave_layout(uintptr_t page_table, uintptr_t va_start, uintptr_t va_end, uintptr_t pa_start, uintptr_t pa_end, uintptr_t vaddr, int level) +{ + if(level < 0) + { + return -1; + } + + uintptr_t* pte = (uintptr_t*)page_table; + uintptr_t region_size = RISCV_PGSIZE * (1 << (level*RISCV_PGLEVEL_BITS)); + for(int i=0; i < (RISCV_PGSIZE/sizeof(uintptr_t)); ++i) + { + uintptr_t addr0 = vaddr + i*region_size; + uintptr_t addr1 = addr0 + region_size; + if(addr1 <= va_start || addr0 >= va_end) + { + continue; + } + + if(PTE_VALID(pte[i])) + { + if(PTE_ILLEGAL(pte[i])) + { + return -1; + } + addr0 = PTE_TO_PFN(pte[i]) << RISCV_PGSHIFT; + addr1 = addr0 + region_size; + if(IS_LEAF_PTE(pte[i])) + { + if(!(addr0 >= pa_start && addr1 <= pa_end)) + { + // printm("here: addr0: %lx, addr1: 0x%lx, pa_start: 0x%lx, pa_end: 0x%lx\r\n", addr0, addr1, pa_start, pa_end); + return -1; + } + } + else if(__check_enclave_layout(PTE_TO_PFN(pte[i]) << RISCV_PGSHIFT, va_start, va_end, + pa_start, pa_end, addr0, level-1)) + { + return -1; + } + } + } + return 0; +} + +/** + * \brief check whether a VM region is mapped (only) to a PM region + * + * \param root_page_table is the root of the pgae table + * \param va_start is the start of the VM region + * \param va_end is the end of the VM region + * \param pa_start is the start of the PM region + * \param pa_end is the end of the PM region + */ +int check_enclave_layout(uintptr_t root_page_table, uintptr_t va_start, uintptr_t va_end, uintptr_t pa_start, uintptr_t pa_end) +{ + return __check_enclave_layout(root_page_table, va_start, va_end, pa_start, pa_end, 0, RISCV_PGLEVELS-1); +} + +static void __traverse_vmas(uintptr_t page_table, struct vm_area_struct *vma_list, int *vma_num, uintptr_t *va_start, uintptr_t *va_end, uintptr_t vaddr, int level) +{ + if(level < 0) + { + return; + } + + uintptr_t *pte = (uintptr_t*)page_table; + uintptr_t region_size = RISCV_PGSIZE * (1 << (level * RISCV_PGLEVEL_BITS)); + for(int i = 0; i < (RISCV_PGSIZE / sizeof(uintptr_t)); ++i) + { + if(!PTE_VALID(pte[i])) + { + if((*va_start) && (*va_end)) + { + vma_list[*vma_num].va_start = *va_start; + vma_list[*vma_num].va_end = *va_end; + vma_list[*vma_num].vm_next = (struct vm_area_struct*)(&vma_list[*vma_num + 1]); + //printm("here1:vma_num:%d, va_start:0x%lx, va_end:0x%lx\r\n", *vma_num, *va_start, *va_end); + *va_start = 0; + *va_end = 0; + *vma_num += 1; + } + continue; + } + + if(IS_LEAF_PTE(pte[i])) + { + if(!(*va_start)) + { + *va_start = vaddr + i*region_size; + } + *va_end = vaddr + (i+1)*region_size; + } + else + { + __traverse_vmas(PTE_TO_PFN(pte[i]) << RISCV_PGSHIFT, vma_list, vma_num, + va_start, va_end, vaddr + i * region_size, level - 1); + } + } + + if(level == (RISCV_PGLEVELS - 1) && (*va_start) && (*va_end)) + { + vma_list[*vma_num].va_start = *va_start; + vma_list[*vma_num].va_end = *va_end; + vma_list[*vma_num].vm_next = 0; + //printm("here2:vma_num:%d, va_start:0x%lx, va_end:0x%lx\r\n", *vma_num, *va_start, *va_end); + *va_start = 0; + *va_end = 0; + *vma_num += 1; + } + else if(level == (RISCV_PGLEVELS - 1) && *vma_num) + { + //printm("here3:vma_num:%d, va_start:0x%lx, va_end:0x%lx\r\n", *vma_num, vma_list[*vma_num-1].va_start, vma_list[*vma_num-1].va_end); + vma_list[*vma_num - 1].vm_next = 0; + } +} + +//should only be called during create_enclave as two vma may be mistakely regarded as one +//after monitor map new pages for enclave +void traverse_vmas(uintptr_t root_page_table, struct vm_area_struct *vma_list) +{ + uintptr_t va_start = 0; + uintptr_t va_end = 0; + int vma_num = 0; + __traverse_vmas(root_page_table, vma_list, &vma_num, &va_start, &va_end, 0, RISCV_PGLEVELS - 1); + //printm("traverse_vmas: vma_num is %d\r\n", vma_num); +} + +void* __va_to_pa(uintptr_t* page_table, uintptr_t *va, int level) +{ + if(!page_table || level<0) + return NULL; + + uintptr_t page_size_bits = RISCV_PGSHIFT + level * RISCV_PGLEVEL_BITS; + uintptr_t pos = (((uintptr_t)va) >> page_size_bits) & ((1<va_end > up_bound) + return -1; + + struct vm_area_struct* first_vma = *vma_list; + if(!first_vma || (first_vma->va_start >= vma->va_end)) + { + vma->vm_next = first_vma; + *vma_list = vma; + return 0; + } + + int found = 0; + struct vm_area_struct* second_vma = first_vma->vm_next; + while(second_vma) + { + if((first_vma->va_end <= vma->va_start) && (second_vma->va_start >= vma->va_end)) + { + vma->vm_next = second_vma; + first_vma->vm_next = vma; + found = 1; + break; + } + first_vma = second_vma; + second_vma = second_vma->vm_next; + } + if(!found) + { + if(first_vma && (first_vma->va_end <= vma->va_start)) + { + first_vma->vm_next = vma; + vma->vm_next = NULL; + return 0; + } + return -1; + } + + return 0; +} + +int delete_vma(struct vm_area_struct **vma_list, struct vm_area_struct *vma) +{ + struct vm_area_struct *last_vma = (struct vm_area_struct*)(*vma_list); + if(last_vma->va_start <= vma->va_start && last_vma->va_end >= vma->va_end) + { + *vma_list = last_vma->vm_next; + vma->vm_next = NULL; + last_vma->vm_next = NULL; + return 0; + } + + struct vm_area_struct *cur_vma = last_vma->vm_next; + while(cur_vma) + { + if(cur_vma->va_start <= vma->va_start && cur_vma->va_end >= vma->va_end) + { + last_vma->vm_next = cur_vma->vm_next; + vma->vm_next = NULL; + cur_vma->vm_next = NULL; + return 0; + } + last_vma = cur_vma; + cur_vma = cur_vma->vm_next; + } + + return -1; +} + +struct vm_area_struct* find_vma(struct vm_area_struct *vma_list, uintptr_t vaddr, uintptr_t size) +{ + uintptr_t va_start = vaddr; + uintptr_t va_end = vaddr + size; + struct vm_area_struct *vma = vma_list; + while(vma) + { + if(vma->va_start <= va_start && vma->va_end >= va_end) + { + return vma; + } + vma = vma->vm_next; + } + return NULL; +} + +int insert_pma(struct pm_area_struct **pma_list, struct pm_area_struct *pma) +{ + pma->pm_next = *pma_list; + *pma_list = pma; + return 0; +} + +int delete_pma(struct pm_area_struct **pma_list, struct pm_area_struct *pma) +{ + struct pm_area_struct *last_pma = *pma_list; + if(last_pma->paddr == pma->paddr && last_pma->size == pma->size) + { + *pma_list = last_pma->pm_next; + pma->pm_next = NULL; + last_pma->pm_next = NULL; + return 0; + } + + struct pm_area_struct *cur_pma = last_pma->pm_next; + while(cur_pma) + { + if(cur_pma->paddr == pma->paddr && cur_pma->size == pma->size) + { + last_pma->pm_next = cur_pma->pm_next; + pma->pm_next = NULL; + cur_pma->pm_next = NULL; + return 0; + } + last_pma = cur_pma; + cur_pma = cur_pma->pm_next; + } + + return -1; +} + +static uintptr_t *__pte_walk_create(uintptr_t *page_table, struct page_t **free_pages, uintptr_t va, int level) +{ + uintptr_t pos = (va >> (RISCV_PGSHIFT + level * RISCV_PGLEVEL_BITS)) & ((1<paddr; + *free_pages = (*free_pages)->next; + page_table[pos] = pte_create(paddr>>RISCV_PGSHIFT, PTE_V); + } + return __pte_walk_create((uintptr_t*)(PTE_TO_PFN(page_table[pos]) << RISCV_PGSHIFT), + free_pages, va, level - 1); +} + +static uintptr_t *pte_walk_create(uintptr_t *root_page_table, struct page_t **free_pages, uintptr_t va) +{ + return __pte_walk_create(root_page_table, free_pages, va, RISCV_PGLEVELS - 1); +} + +static uintptr_t *__pte_walk(uintptr_t *page_table, uintptr_t va, int level) +{ + uintptr_t pos = (va >> (RISCV_PGSHIFT + level * RISCV_PGLEVEL_BITS)) & ((1<>RISCV_PGSHIFT, flag | PTE_V); + return 0; +} + +static int unmap_one_page(uintptr_t *root_page_table, uintptr_t va) +{ + uintptr_t *pte = pte_walk(root_page_table, va); + if(!pte) + return -1; + *pte = 0; + return 0; +} + +int mmap(uintptr_t* root_page_table, struct page_t **free_pages, uintptr_t vaddr, uintptr_t paddr, uintptr_t size) +{ + uintptr_t va = vaddr; + uintptr_t pa = paddr; + uintptr_t va_end = vaddr + size; + while(va < va_end) + { + if(map_one_page(root_page_table, free_pages, va, pa, PTE_D | PTE_A | PTE_R | PTE_W | PTE_U | PTE_V) != 0) + { + printm("mmap failed\r\n"); + return -1; + } + va += RISCV_PGSIZE; + pa += RISCV_PGSIZE; + } + return 0; +} + +int unmap(uintptr_t* root_page_table, uintptr_t vaddr, uintptr_t size) +{ + uintptr_t va = vaddr; + uintptr_t va_end = vaddr + size; + while(va < va_end) + { + unmap_one_page(root_page_table, va); + va += RISCV_PGSIZE; + } + return 0; +} diff --git a/sm/enclave_vm.h b/sm/enclave_vm.h new file mode 100644 index 0000000..84d7063 --- /dev/null +++ b/sm/enclave_vm.h @@ -0,0 +1,60 @@ +#ifndef _ENCLAVE_VM_H +#define _ENCLAVE_VM_H + +#include "enclave.h" +#include "encoding.h" +#include "vm.h" + +//default layout of enclave +//##################### +//# reserved for # +//# s mode # +//##################### 0xffffffe000000000 //actually this is the start address of kernel's image +//# hole # +//##################### 0x0000004000000000 +//# # +//# stack # +//# # +//##################### 0x0000003000000000 +//# # +//# mmap # +//# # +//# # +//# heap # +//# # +//##################### 0x0000002000000000 +//# untrusted memory # +//# shared with host # +//##################### 0x0000001000000000 +//# code & data # +//##################### 0x0000000000001000 +//# hole # +//##################### 0x0 + +#define ENCLAVE_DEFAULT_KBUFFER_SIZE 0x1000UL +#define ENCLAVE_DEFAULT_KBUFFER 0xffffffe000000000UL +#define ENCLAVE_DEFAULT_STACK_BASE 0x0000004000000000UL +#define ENCLAVE_DEFAULT_MMAP_BASE 0x0000003000000000UL +#define ENCLAVE_DEFAULT_HEAP_BASE 0x0000002000000000UL +#define ENCLAVE_DEFAULT_TEXT_BASE 0x0000000000001000UL + +#define PTE_VALID(pte) (pte & PTE_V) +#define PTE_ILLEGAL(pte) ((pte & PTE_V) && (pte & PTE_W) && !(pte & PTE_R)) +#define PTE_TO_PFN(pte) (pte >> PTE_PPN_SHIFT) +#define IS_LEAF_PTE(pte) ((pte & PTE_V) && (pte & PTE_R || pte & PTE_X)) +#define RISCV_PGLEVELS ((VA_BITS - RISCV_PGSHIFT) / RISCV_PGLEVEL_BITS) + +void traverse_vmas(uintptr_t root_page_table, struct vm_area_struct *vma); +int insert_vma(struct vm_area_struct **vma_list, struct vm_area_struct *vma, uintptr_t up_bound); +int delete_vma(struct vm_area_struct **vma_list, struct vm_area_struct *vma); +struct vm_area_struct* find_vma(struct vm_area_struct *vma_list, uintptr_t vaddr, uintptr_t size); +int insert_pma(struct pm_area_struct **pma_list, struct pm_area_struct *pma); +int delete_pma(struct pm_area_struct **pma_list, struct pm_area_struct *pma); + +int check_enclave_layout(uintptr_t root_page_table, uintptr_t va_start, uintptr_t va_end, uintptr_t pa_start, uintptr_t pa_end); +void* va_to_pa(uintptr_t* root_page_table, void* va); + +int mmap(uintptr_t* root_page_table, struct page_t **free_pages, uintptr_t vaddr, uintptr_t paddr, uintptr_t size); +int unmap(uintptr_t* root_page_table, uintptr_t vaddr, uintptr_t size); + +#endif /* _ENCLAVE_VM_H */ diff --git a/sm/server_enclave.c b/sm/server_enclave.c new file mode 100644 index 0000000..9bbf988 --- /dev/null +++ b/sm/server_enclave.c @@ -0,0 +1,310 @@ +#include "sm.h" +#include "enclave.h" +#include "enclave_vm.h" +#include "server_enclave.h" +#include "ipi.h" +#include TARGET_PLATFORM_HEADER + +struct link_mem_t* server_enclave_head = NULL; +struct link_mem_t* server_enclave_tail = NULL; + +static int server_name_cmp(char* name1, char* name2) +{ + for(int i=0; inext_link_mem) + { + for(int i = 0; i < (cur->slab_num); i++) + { + server_enclave = (struct server_enclave_t*)(cur->addr) + i; + if(server_enclave->entity && server_name_cmp(server_name, server_enclave->server_name)==0) + { + printm("server already existed!\r\n"); + server_enclave = (void*)(-1UL); + goto failed; + } + } + } + + found = 0; + for(cur = server_enclave_head; cur != NULL; cur = cur->next_link_mem) + { + for(int i = 0; i < (cur->slab_num); i++) + { + server_enclave = (struct server_enclave_t*)(cur->addr) + i; + if(!(server_enclave->entity)) + { + memcpy(server_enclave->server_name, server_name, NAME_LEN); + server_enclave->entity = enclave; + found = 1; + break; + } + } + if(found) + break; + } + + //don't have enough enclave metadata + if(!found) + { + next = add_link_mem(&server_enclave_tail); + if(next == NULL) + { + printm("M mode: __alloc_server_enclave: don't have enough mem\r\n"); + server_enclave = NULL; + goto failed; + } + server_enclave = (struct server_enclave_t*)(next->addr); + memcpy(server_enclave->server_name, server_name, NAME_LEN); + server_enclave->entity = enclave; + } + + return server_enclave; + + failed: + if(enclave) + free_enclave(enclave->eid); + if(server_enclave) + memset((void*)server_enclave, 0, sizeof(struct server_enclave_t)); + + return NULL; +} + + +static struct server_enclave_t* __get_server_enclave_by_name(char* server_name) +{ + struct link_mem_t *cur; + struct server_enclave_t *server_enclave; + int i, found; + + found = 0; + for(cur = server_enclave_head; cur != NULL; cur = cur->next_link_mem) + { + for(int i=0; i < (cur->slab_num); ++i) + { + server_enclave = (struct server_enclave_t*)(cur->addr) + i; + if(server_enclave->entity && server_name_cmp(server_enclave->server_name, server_name)==0) + { + found = 1; + break; + } + } + } + + //haven't alloc this eid + if(!found) + { + printm("M mode: __get_server_enclave_by_name: haven't alloc this enclave:%s\r\n", server_name); + server_enclave = NULL; + } + + return server_enclave; +} + +/**************************************************************/ +/* called by host */ +/**************************************************************/ +uintptr_t create_server_enclave(struct enclave_sbi_param_t create_args) +{ + struct enclave_t* enclave = NULL; + struct server_enclave_t* server_enclave = NULL; + uintptr_t ret = 0; + + acquire_enclave_metadata_lock(); + + if((create_args.paddr & (RISCV_PGSIZE - 1)) || (create_args.size & (RISCV_PGSIZE - 1)) || create_args.size < RISCV_PGSIZE) + { + ret = ENCLAVE_ERROR; + goto failed; + } + + if(check_enclave_layout(create_args.paddr + RISCV_PGSIZE, 0, -1UL, create_args.paddr, create_args.paddr + create_args.size) != 0) + { + ret = ENCLAVE_ERROR; + goto failed; + } + + server_enclave = __alloc_server_enclave(create_args.name); + if(server_enclave == (void*)(-1UL)) + { + ret = ENCLAVE_ERROR; + goto failed; + } + if(!server_enclave) + { + //printm("create_server_enclave: no mem\r\n"); + ret = ENCLAVE_NO_MEMORY; + goto failed; + } + + enclave = server_enclave->entity; + enclave->paddr = create_args.paddr; + enclave->size = create_args.size; + enclave->entry_point = create_args.entry_point; + enclave->free_mem = create_args.free_mem; + enclave->ocall_func_id = create_args.ecall_arg0; + enclave->ocall_arg0 = create_args.ecall_arg1; + enclave->ocall_arg1 = create_args.ecall_arg2; + enclave->ocall_syscall_num = create_args.ecall_arg3; + enclave->host_ptbr = read_csr(satp); + enclave->root_page_table = create_args.paddr + RISCV_PGSIZE; + enclave->thread_context.encl_ptbr = ((create_args.paddr + RISCV_PGSIZE) >> (RISCV_PGSHIFT) | SATP_MODE_CHOICE); + enclave->type = SERVER_ENCLAVE; + //we directly set server_enclave's state as RUNNABLE as it won't be called by run_enclave call + enclave->state = RUNNABLE; + enclave->caller_eid = -1; + enclave->top_caller_eid = -1; + enclave->cur_callee_eid = -1; + + //traverse vmas + struct pm_area_struct* pma = (struct pm_area_struct*)(create_args.paddr); + struct vm_area_struct* vma = (struct vm_area_struct*)(create_args.paddr + sizeof(struct pm_area_struct)); + pma->paddr = create_args.paddr; + pma->size = create_args.size; + pma->free_mem = create_args.free_mem; + if(pma->free_mem < pma->paddr || pma->free_mem >= pma->paddr+pma->size + || pma->free_mem & ((1<pm_next = NULL; + enclave->pma_list = pma; + traverse_vmas(enclave->root_page_table, vma); + + while(vma) + { + if(vma->va_start == ENCLAVE_DEFAULT_TEXT_BASE) + { + enclave->text_vma = vma; + } + if(vma->va_end == ENCLAVE_DEFAULT_STACK_BASE) + { + enclave->stack_vma = vma; + enclave->_stack_top = enclave->stack_vma->va_start; + } + vma->pma = pma; + vma = vma->vm_next; + } + if(enclave->text_vma) + enclave->text_vma->vm_next = NULL; + if(enclave->stack_vma) + enclave->stack_vma->vm_next = NULL; + enclave->_heap_top = ENCLAVE_DEFAULT_HEAP_BASE; + enclave->heap_vma = NULL; + enclave->mmap_vma = NULL; + + enclave->free_pages = NULL; + enclave->free_pages_num = 0; + uintptr_t free_mem = create_args.paddr + create_args.size - RISCV_PGSIZE; + while(free_mem >= create_args.free_mem) + { + struct page_t *page = (struct page_t*)free_mem; + page->paddr = free_mem; + page->next = enclave->free_pages; + enclave->free_pages = page; + enclave->free_pages_num += 1; + free_mem -= RISCV_PGSIZE; + } + + copy_word_to_host((unsigned int*)create_args.eid_ptr, enclave->eid); + release_enclave_metadata_lock(); + return ret; + +failed: + release_enclave_metadata_lock(); + printm("M MODE: acquire encalve failed\r\n"); + return ret; +} + +//host call this function to destroy an existing enclave +uintptr_t destroy_server_enclave(uintptr_t* regs, unsigned int eid) +{ + return 0; +} + +/**************************************************************/ +/* called by enclave */ +/**************************************************************/ +uintptr_t acquire_server_enclave(uintptr_t *regs, char* server_name_u) +{ + uintptr_t ret = 0; + struct enclave_t *enclave = NULL; + struct server_enclave_t *server_enclave = NULL; + char *server_name = NULL; + int eid = 0; + if(check_in_enclave_world() < 0) + { + return -1UL; + } + + acquire_enclave_metadata_lock(); + + eid = get_curr_enclave_id(); + enclave = get_enclave(eid); + if(!enclave) + { + ret = -1UL; + goto failed; + } + + server_name = va_to_pa((uintptr_t*)(enclave->root_page_table), server_name_u); + if(!server_name) + { + ret = -1UL; + goto failed; + } + printm("server_enclave: after get server_name server_name is: %s\r\n", server_name); + + server_enclave = __get_server_enclave_by_name(server_name); + if(!server_enclave) + { + ret = -1UL; + goto failed; + } + ret = server_enclave->entity->eid; + + release_enclave_metadata_lock(); + printm("M MODE: acquire encalve success ret %d\r\n", ret); + return ret; + +failed: + release_enclave_metadata_lock(); + printm("M MODE: acquire encalve failed\r\n"); + return ret; +} diff --git a/sm/server_enclave.h b/sm/server_enclave.h new file mode 100644 index 0000000..4e3aad4 --- /dev/null +++ b/sm/server_enclave.h @@ -0,0 +1,19 @@ +#ifndef _SERVER_ENCLAVE_H +#define _SERVER_ENCLAVE_H + +#include "enclave.h" +#include "enclave_args.h" + +struct server_enclave_t +{ + char server_name[NAME_LEN]; + struct enclave_t* entity; +}; + +#define SERVERS_PER_METADATA_REGION 100 + +uintptr_t create_server_enclave(struct enclave_sbi_param_t create_args); +uintptr_t destroy_server_enclave(uintptr_t* regs, unsigned int eid); +uintptr_t acquire_server_enclave(uintptr_t *regs, char *server_name); + +#endif /* _SERVER_ENCLAVE_H */ diff --git a/sm/sm.c b/sm/sm.c index d14d5d9..80ec021 100644 --- a/sm/sm.c +++ b/sm/sm.c @@ -3,6 +3,7 @@ #include "pmp.h" #include "enclave.h" #include "math.h" +#include "server_enclave.h" static int sm_initialized = 0; static spinlock_t sm_init_lock = SPINLOCK_INIT; @@ -122,7 +123,6 @@ uintptr_t sm_resume_enclave(uintptr_t* regs, unsigned long eid) { uintptr_t retval = 0; uintptr_t resume_func_id = regs[11]; - switch(resume_func_id) { case RESUME_FROM_TIMER_IRQ: @@ -136,6 +136,9 @@ uintptr_t sm_resume_enclave(uintptr_t* regs, unsigned long eid) //printm("resume from stop\r\n"); retval = resume_from_stop(regs, eid); break; + case RESUME_FROM_OCALL: + retval = resume_from_ocall(regs, eid); + break; default: break; } @@ -152,6 +155,28 @@ uintptr_t sm_exit_enclave(uintptr_t* regs, unsigned long retval) return ret; } +uintptr_t sm_enclave_ocall(uintptr_t* regs, uintptr_t ocall_id, uintptr_t arg0, uintptr_t arg1) +{ + // printm("into sm_enclave_ocall: %d\r\n", ocall_id); + uintptr_t ret = 0; + switch(ocall_id) + { + case OCALL_MMAP: + ret = enclave_mmap(regs, arg0, arg1); + break; + case OCALL_UNMAP: + ret = enclave_unmap(regs, arg0, arg1); + break; + case OCALL_SYS_WRITE: + ret = enclave_sys_write(regs); + break; + default: + ret = -1UL; + break; + } + return ret; +} + uintptr_t sm_do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) { uintptr_t ret; @@ -160,3 +185,54 @@ uintptr_t sm_do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) return ret; } + +uintptr_t sm_server_enclave_acquire(uintptr_t *regs, uintptr_t server_name) +{ + uintptr_t ret = 0; + + ret = acquire_server_enclave(regs, (char*)server_name); + + return ret; +} + +uintptr_t sm_call_enclave(uintptr_t* regs, uintptr_t eid, uintptr_t arg) +{ + uintptr_t retval = 0; + + retval = call_enclave(regs, (unsigned int)eid, arg); + + return retval; +} + +uintptr_t sm_enclave_return(uintptr_t* regs, uintptr_t arg) +{ + uintptr_t ret = 0; + + ret = enclave_return(regs, arg); + + return ret; +} + +uintptr_t sm_create_server_enclave(uintptr_t enclave_sbi_param) +{ + struct enclave_sbi_param_t enclave_sbi_param_local; + uintptr_t retval = 0; + retval = copy_from_host(&enclave_sbi_param_local, + (struct enclave_sbi_param_t*)enclave_sbi_param, + sizeof(struct enclave_sbi_param_t)); + if(retval != 0) + return ENCLAVE_ERROR; + + retval = create_server_enclave(enclave_sbi_param_local); + + return retval; +} + +uintptr_t sm_destroy_server_enclave(uintptr_t *regs, uintptr_t enclave_id) +{ + uintptr_t ret = 0; + + ret = destroy_server_enclave(regs, enclave_id); + + return ret; +} diff --git a/sm/sm.h b/sm/sm.h index 4320e58..7d1fbcb 100644 --- a/sm/sm.h +++ b/sm/sm.h @@ -27,6 +27,11 @@ #define SBI_ENCLAVE_OCALL 90 #define SBI_EXIT_ENCLAVE 89 #define SBI_DEBUG_PRINT 88 +#define SBI_ACQUIRE_SERVER 87 +#define SBI_CALL_ENCLAVE 86 +#define SBI_ENCLAVE_RETURN 85 +#define SBI_CREATE_SERVER_ENCLAVE 84 +#define SBI_DESTROY_SERVER_ENCLAVE 83 //Error code of SBI_ALLOC_ENCLAVE_MEM #define ENCLAVE_NO_MEMORY -2 @@ -34,9 +39,21 @@ #define ENCLAVE_SUCCESS 0 #define ENCLAVE_TIMER_IRQ 1 +//Error code of SBI_RUN_ENCLAVE +//#define ENCLAVE_ERROR -1 +#define ENCLAVE_SUCCESS 0 +#define ENCLAVE_TIMER_IRQ 1 +#define ENCLAVE_OCALL 2 + //error code of SBI_RESUME_RNCLAVE #define RESUME_FROM_TIMER_IRQ 2000 #define RESUME_FROM_STOP 2003 +#define RESUME_FROM_OCALL 2004 + +//ENCLAVE OCALL NUMVERS +#define OCALL_MMAP 1 +#define OCALL_UNMAP 2 +#define OCALL_SYS_WRITE 3 void sm_init(); @@ -60,7 +77,7 @@ uintptr_t sm_resume_enclave(uintptr_t *regs, uintptr_t enclave_id); uintptr_t sm_destroy_enclave(uintptr_t *regs, uintptr_t enclave_id, uintptr_t destroy_flag); -uintptr_t sm_enclave_ocall(uintptr_t *regs, uintptr_t ocall_func_id, uintptr_t arg); +uintptr_t sm_enclave_ocall(uintptr_t *regs, uintptr_t ocall_func_id, uintptr_t arg0, uintptr_t arg1); uintptr_t sm_exit_enclave(uintptr_t *regs, unsigned long retval); @@ -68,4 +85,14 @@ uintptr_t sm_do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc); int check_in_enclave_world(); +uintptr_t sm_server_enclave_acquire(uintptr_t *regs, uintptr_t server_name); + +uintptr_t sm_call_enclave(uintptr_t *regs, uintptr_t enclave_id, uintptr_t arg); + +uintptr_t sm_enclave_return(uintptr_t *regs, uintptr_t arg); + +uintptr_t sm_create_server_enclave(uintptr_t enclave_create_args); + +uintptr_t sm_destroy_server_enclave(uintptr_t *regs, uintptr_t enclave_id); + #endif /* _SM_H */ diff --git a/sm/sm.mk.in b/sm/sm.mk.in index 7b016eb..f8fea96 100644 --- a/sm/sm.mk.in +++ b/sm/sm.mk.in @@ -5,7 +5,9 @@ sm_hdrs = \ enclave.h \ platform/@TARGET_PLATFORM@/platform.h \ thread.h \ - math.h + math.h \ + server_enclave.h \ + enclave_vm.h sm_c_srcs = \ ipi.c \ @@ -14,7 +16,9 @@ sm_c_srcs = \ sm.c \ enclave.c \ thread.c \ - math.c + math.c \ + server_enclave.c \ + enclave_vm.c sm_asm_srcs = \ diff --git a/sm/thread.h b/sm/thread.h index 1d3db91..6cbc1e8 100644 --- a/sm/thread.h +++ b/sm/thread.h @@ -3,27 +3,6 @@ #include -//default layout of enclave -//##################### -//# reserved for # -//# s mode # -//##################### 0xffffffe000000000 -//# hole # -//##################### 0x0000004000000000 -//# stack # -//# # -//# heap # -//##################### 0x0000002000000000 -//# untrusted memory # -//# shared with host # -//##################### 0x0000001000000000 -//# code & data # -//##################### 0x0000000000001000 -//# hole # -//##################### 0x0 - -#define ENCLAVE_DEFAULT_STACK 0x0000004000000000; - #define N_GENERAL_REGISTERS 32 struct general_registers_t