diff --git a/0001-penglai-opensbi-version-0.2.patch b/0001-penglai-opensbi-version-0.2.patch new file mode 100644 index 0000000..6e0ed83 --- /dev/null +++ b/0001-penglai-opensbi-version-0.2.patch @@ -0,0 +1,12865 @@ +From 5e48eadf63bf7fabb61271b7e65129646cfc01b6 Mon Sep 17 00:00:00 2001 +From: anonymous <2748250768@qq.com> +Date: Thu, 18 Mar 2021 13:41:07 +0800 +Subject: [PATCH] penglai opensbi version 0.2 + +--- + include/sbi/riscv_encoding.h | 20 + + include/sbi/riscv_locks.h | 2 + + include/sbi/sbi_console.h | 2 + + include/sbi/sbi_ecall.h | 3 + + include/sbi/sbi_ecall_interface.h | 3 + + include/sbi/sbi_ipi.h | 10 + + include/sbi/sbi_ipi_destroy_enclave.h | 18 + + include/sbi/sbi_math.h | 74 + + include/sbi/sbi_platform.h | 1 + + include/sbi/sbi_pmp.h | 10 + + include/sbi/sbi_tvm.h | 16 + + include/sbi/sbi_types.h | 1 + + include/sm/attest.h | 16 + + include/sm/enclave.h | 250 ++ + include/sm/enclave_args.h | 155 + + include/sm/enclave_mm.h | 25 + + include/sm/enclave_vm.h | 88 + + include/sm/encoding.h | 1472 +++++++++ + include/sm/gm/big.h | 88 + + include/sm/gm/ecc.h | 38 + + include/sm/gm/random.h | 8 + + include/sm/gm/sm2.h | 34 + + include/sm/gm/sm3.h | 103 + + include/sm/gm/typedef.h | 164 + + include/sm/ipi.h | 26 + + include/sm/platform/pt_area/platform.h | 9 + + include/sm/platform/pt_area/platform_thread.h | 18 + + include/sm/pmp.h | 66 + + include/sm/relay_page.h | 12 + + include/sm/server_enclave.h | 21 + + include/sm/sm.h | 144 + + include/sm/thread.h | 66 + + include/sm/vm.h | 36 + + lib/sbi/objects.mk | 20 + + lib/sbi/sbi_ecall.c | 115 +- + lib/sbi/sbi_ecall_penglai.c | 98 + + lib/sbi/sbi_hart.c | 9 +- + lib/sbi/sbi_illegal_insn.c | 59 + + lib/sbi/sbi_init.c | 27 + + lib/sbi/sbi_ipi.c | 30 + + lib/sbi/sbi_ipi_destroy_enclave.c | 142 + + lib/sbi/sbi_pmp.c | 123 + + lib/sbi/sbi_trap.c | 66 +- + lib/sbi/sbi_tvm.c | 141 + + lib/sbi/sm/.gitignore | 1 + + lib/sbi/sm/attest.c | 124 + + lib/sbi/sm/enclave.c | 2725 +++++++++++++++++ + lib/sbi/sm/enclave_mm.c | 195 ++ + lib/sbi/sm/enclave_vm.c | 596 ++++ + lib/sbi/sm/gm/big.c | 853 ++++++ + lib/sbi/sm/gm/ecc.c | 356 +++ + lib/sbi/sm/gm/random.c | 18 + + lib/sbi/sm/gm/sm2.c | 603 ++++ + lib/sbi/sm/gm/sm3.c | 325 ++ + lib/sbi/sm/platform/pt_area/platform.c | 31 + + lib/sbi/sm/platform/pt_area/platform_thread.c | 56 + + lib/sbi/sm/pmp.c | 240 ++ + lib/sbi/sm/relay_page.c | 221 ++ + lib/sbi/sm/server_enclave.c | 469 +++ + lib/sbi/sm/sm.ac | 9 + + lib/sbi/sm/sm.c | 1374 +++++++++ + lib/sbi/sm/thread.c | 101 + + 62 files changed, 12104 insertions(+), 22 deletions(-) + create mode 100644 include/sbi/sbi_ipi_destroy_enclave.h + create mode 100644 include/sbi/sbi_pmp.h + create mode 100644 include/sbi/sbi_tvm.h + create mode 100644 include/sm/attest.h + create mode 100644 include/sm/enclave.h + create mode 100644 include/sm/enclave_args.h + create mode 100644 include/sm/enclave_mm.h + create mode 100644 include/sm/enclave_vm.h + create mode 100644 include/sm/encoding.h + create mode 100644 include/sm/gm/big.h + create mode 100644 include/sm/gm/ecc.h + create mode 100644 include/sm/gm/random.h + create mode 100644 include/sm/gm/sm2.h + create mode 100644 include/sm/gm/sm3.h + create mode 100644 include/sm/gm/typedef.h + create mode 100644 include/sm/ipi.h + create mode 100644 include/sm/platform/pt_area/platform.h + create mode 100644 include/sm/platform/pt_area/platform_thread.h + create mode 100644 include/sm/pmp.h + create mode 100644 include/sm/relay_page.h + create mode 100644 include/sm/server_enclave.h + create mode 100644 include/sm/sm.h + create mode 100644 include/sm/thread.h + create mode 100644 include/sm/vm.h + create mode 100644 lib/sbi/sbi_ecall_penglai.c + create mode 100644 lib/sbi/sbi_ipi_destroy_enclave.c + create mode 100644 lib/sbi/sbi_pmp.c + create mode 100644 lib/sbi/sbi_tvm.c + create mode 100644 lib/sbi/sm/.gitignore + create mode 100644 lib/sbi/sm/attest.c + create mode 100644 lib/sbi/sm/enclave.c + create mode 100644 lib/sbi/sm/enclave_mm.c + create mode 100644 lib/sbi/sm/enclave_vm.c + create mode 100644 lib/sbi/sm/gm/big.c + create mode 100644 lib/sbi/sm/gm/ecc.c + create mode 100644 lib/sbi/sm/gm/random.c + create mode 100644 lib/sbi/sm/gm/sm2.c + create mode 100644 lib/sbi/sm/gm/sm3.c + create mode 100644 lib/sbi/sm/platform/pt_area/platform.c + create mode 100644 lib/sbi/sm/platform/pt_area/platform_thread.c + create mode 100644 lib/sbi/sm/pmp.c + create mode 100644 lib/sbi/sm/relay_page.c + create mode 100644 lib/sbi/sm/server_enclave.c + create mode 100644 lib/sbi/sm/sm.ac + create mode 100644 lib/sbi/sm/sm.c + create mode 100644 lib/sbi/sm/thread.c + +diff --git a/include/sbi/riscv_encoding.h b/include/sbi/riscv_encoding.h +index 827c86c..7a4908c 100644 +--- a/include/sbi/riscv_encoding.h ++++ b/include/sbi/riscv_encoding.h +@@ -577,6 +577,26 @@ + (s32)(((insn) >> 7) & 0x1f)) + #define MASK_FUNCT3 0x7000 + ++/*penglai defination*/ ++#define PTE_PPN_SHIFT 10 ++#define PTE_V 0x001 // Valid ++#define PTE_R 0x002 // Read ++#define PTE_W 0x004 // Write ++#define PTE_X 0x008 // Execute ++#define PTE_U 0x010 // User ++#define PTE_G 0x020 // Global ++#define PTE_A 0x040 // Accessed ++#define PTE_D 0x080 // Dirty ++#define PTE_SOFT 0x300 // Reserved for Software ++ ++//TODO: ++//riscv64 page config ++#define RISCV_PGSHIFT 12 ++#define RISCV_PGSIZE (1 << RISCV_PGSHIFT) ++#define RISCV_PTENUM 512 ++#define RISCV_PGLEVEL_BITS 9 ++ + /* clang-format on */ + ++#define DRAM_BASE 0x80000000 + #endif +diff --git a/include/sbi/riscv_locks.h b/include/sbi/riscv_locks.h +index 55da7c0..60076e7 100644 +--- a/include/sbi/riscv_locks.h ++++ b/include/sbi/riscv_locks.h +@@ -14,6 +14,8 @@ typedef struct { + volatile long lock; + } spinlock_t; + ++#define SPINLOCK_INIT {0} ++ + #define __RISCV_SPIN_UNLOCKED 0 + + #define SPIN_LOCK_INIT(_lptr) (_lptr)->lock = __RISCV_SPIN_UNLOCKED +diff --git a/include/sbi/sbi_console.h b/include/sbi/sbi_console.h +index 7d648f0..e64a862 100644 +--- a/include/sbi/sbi_console.h ++++ b/include/sbi/sbi_console.h +@@ -36,4 +36,6 @@ struct sbi_scratch; + + int sbi_console_init(struct sbi_scratch *scratch); + ++#define sbi_bug(fmt, ...) sbi_printf("[ERROR] "fmt, ##__VA_ARGS__) ++#define sbi_debug(fmt, ...) sbi_printf("[DEBUG] "fmt, ##__VA_ARGS__) + #endif +diff --git a/include/sbi/sbi_ecall.h b/include/sbi/sbi_ecall.h +index 3273ba6..6111ec9 100644 +--- a/include/sbi/sbi_ecall.h ++++ b/include/sbi/sbi_ecall.h +@@ -37,6 +37,7 @@ extern struct sbi_ecall_extension ecall_rfence; + extern struct sbi_ecall_extension ecall_ipi; + extern struct sbi_ecall_extension ecall_vendor; + extern struct sbi_ecall_extension ecall_hsm; ++extern struct sbi_ecall_extension ecall_pengali; + + u16 sbi_ecall_version_major(void); + +@@ -52,6 +53,8 @@ int sbi_ecall_register_extension(struct sbi_ecall_extension *ext); + + void sbi_ecall_unregister_extension(struct sbi_ecall_extension *ext); + ++int enclave_call_trap(struct sbi_trap_regs* regs); ++ + int sbi_ecall_handler(struct sbi_trap_regs *regs); + + int sbi_ecall_init(void); +diff --git a/include/sbi/sbi_ecall_interface.h b/include/sbi/sbi_ecall_interface.h +index af30500..123c1e5 100644 +--- a/include/sbi/sbi_ecall_interface.h ++++ b/include/sbi/sbi_ecall_interface.h +@@ -27,6 +27,9 @@ + #define SBI_EXT_IPI 0x735049 + #define SBI_EXT_RFENCE 0x52464E43 + #define SBI_EXT_HSM 0x48534D ++//TODO: ++//Why this magic number ++#define SBI_EXT_PENGLAI 0x100100 + + /* SBI function IDs for BASE extension*/ + #define SBI_EXT_BASE_GET_SPEC_VERSION 0x0 +diff --git a/include/sbi/sbi_ipi.h b/include/sbi/sbi_ipi.h +index 617872c..b051bd3 100644 +--- a/include/sbi/sbi_ipi.h ++++ b/include/sbi/sbi_ipi.h +@@ -11,6 +11,7 @@ + #define __SBI_IPI_H__ + + #include ++#include + + /* clang-format off */ + +@@ -47,6 +48,13 @@ struct sbi_ipi_event_ops { + * remote HART after IPI is triggered. + */ + void (* process)(struct sbi_scratch *scratch); ++ ++ /** ++ * Process callback to handle IPI event in the enclave ++ * Note: This is a mandatory callback and it is called on the ++ * remote HART after IPI is triggered. ++ */ ++ void (* e_process)(struct sbi_scratch *scratch, struct sbi_trap_regs* regs); + }; + + int sbi_ipi_send_many(ulong hmask, ulong hbase, u32 event, void *data); +@@ -63,6 +71,8 @@ int sbi_ipi_send_halt(ulong hmask, ulong hbase); + + void sbi_ipi_process(void); + ++void sbi_ipi_process_in_enclave(struct sbi_trap_regs* regs); ++ + int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot); + + void sbi_ipi_exit(struct sbi_scratch *scratch); +diff --git a/include/sbi/sbi_ipi_destroy_enclave.h b/include/sbi/sbi_ipi_destroy_enclave.h +new file mode 100644 +index 0000000..da8c9a3 +--- /dev/null ++++ b/include/sbi/sbi_ipi_destroy_enclave.h +@@ -0,0 +1,18 @@ ++#ifndef __SBI_IPI_DESTROY_ENCLAVE_H__ ++#define __SBI_IPI_DESTROY_ENCLAVE_H__ ++ ++#include ++#include ++ ++struct ipi_destroy_enclave_data_t ++{ ++ ulong host_ptbr; ++ int enclave_id; ++ struct sbi_hartmask smask; ++}; ++ ++struct sbi_scratch; ++int sbi_ipi_destroy_enclave_init(struct sbi_scratch *scratch, bool cold_boot); ++int sbi_send_ipi_destroy_enclave(ulong hmask, ulong hbase, struct ipi_destroy_enclave_data_t* ipi_destroy_enclave_data); ++void set_ipi_destroy_enclave_and_sync(u32 remote_hart,ulong host_ptbr, int enclave_id); ++#endif +\ No newline at end of file +diff --git a/include/sbi/sbi_math.h b/include/sbi/sbi_math.h +index 564fd58..664ab0a 100644 +--- a/include/sbi/sbi_math.h ++++ b/include/sbi/sbi_math.h +@@ -12,4 +12,78 @@ + + unsigned long log2roundup(unsigned long x); + ++#define ilog2(n) \ ++( \ ++ (n) < 2 ? 0 : \ ++ (n) & (1ULL << 63) ? 63 : \ ++ (n) & (1ULL << 62) ? 62 : \ ++ (n) & (1ULL << 61) ? 61 : \ ++ (n) & (1ULL << 60) ? 60 : \ ++ (n) & (1ULL << 59) ? 59 : \ ++ (n) & (1ULL << 58) ? 58 : \ ++ (n) & (1ULL << 57) ? 57 : \ ++ (n) & (1ULL << 56) ? 56 : \ ++ (n) & (1ULL << 55) ? 55 : \ ++ (n) & (1ULL << 54) ? 54 : \ ++ (n) & (1ULL << 53) ? 53 : \ ++ (n) & (1ULL << 52) ? 52 : \ ++ (n) & (1ULL << 51) ? 51 : \ ++ (n) & (1ULL << 50) ? 50 : \ ++ (n) & (1ULL << 49) ? 49 : \ ++ (n) & (1ULL << 48) ? 48 : \ ++ (n) & (1ULL << 47) ? 47 : \ ++ (n) & (1ULL << 46) ? 46 : \ ++ (n) & (1ULL << 45) ? 45 : \ ++ (n) & (1ULL << 44) ? 44 : \ ++ (n) & (1ULL << 43) ? 43 : \ ++ (n) & (1ULL << 42) ? 42 : \ ++ (n) & (1ULL << 41) ? 41 : \ ++ (n) & (1ULL << 40) ? 40 : \ ++ (n) & (1ULL << 39) ? 39 : \ ++ (n) & (1ULL << 38) ? 38 : \ ++ (n) & (1ULL << 37) ? 37 : \ ++ (n) & (1ULL << 36) ? 36 : \ ++ (n) & (1ULL << 35) ? 35 : \ ++ (n) & (1ULL << 34) ? 34 : \ ++ (n) & (1ULL << 33) ? 33 : \ ++ (n) & (1ULL << 32) ? 32 : \ ++ (n) & (1ULL << 31) ? 31 : \ ++ (n) & (1ULL << 30) ? 30 : \ ++ (n) & (1ULL << 29) ? 29 : \ ++ (n) & (1ULL << 28) ? 28 : \ ++ (n) & (1ULL << 27) ? 27 : \ ++ (n) & (1ULL << 26) ? 26 : \ ++ (n) & (1ULL << 25) ? 25 : \ ++ (n) & (1ULL << 24) ? 24 : \ ++ (n) & (1ULL << 23) ? 23 : \ ++ (n) & (1ULL << 22) ? 22 : \ ++ (n) & (1ULL << 21) ? 21 : \ ++ (n) & (1ULL << 20) ? 20 : \ ++ (n) & (1ULL << 19) ? 19 : \ ++ (n) & (1ULL << 18) ? 18 : \ ++ (n) & (1ULL << 17) ? 17 : \ ++ (n) & (1ULL << 16) ? 16 : \ ++ (n) & (1ULL << 15) ? 15 : \ ++ (n) & (1ULL << 14) ? 14 : \ ++ (n) & (1ULL << 13) ? 13 : \ ++ (n) & (1ULL << 12) ? 12 : \ ++ (n) & (1ULL << 11) ? 11 : \ ++ (n) & (1ULL << 10) ? 10 : \ ++ (n) & (1ULL << 9) ? 9 : \ ++ (n) & (1ULL << 8) ? 8 : \ ++ (n) & (1ULL << 7) ? 7 : \ ++ (n) & (1ULL << 6) ? 6 : \ ++ (n) & (1ULL << 5) ? 5 : \ ++ (n) & (1ULL << 4) ? 4 : \ ++ (n) & (1ULL << 3) ? 3 : \ ++ (n) & (1ULL << 2) ? 2 : \ ++ 1 \ ++) ++ ++#define power_2_align(n) (1 << (ilog2(n-1)+1)) ++ ++#define size_down_align(n, size) (n - ((n) % (size))) ++ ++#define size_up_align(n, size) (size_down_align(n, size) + ((n) % (size) ? (size) : 0)) ++ + #endif +diff --git a/include/sbi/sbi_platform.h b/include/sbi/sbi_platform.h +index 8087148..82de38e 100644 +--- a/include/sbi/sbi_platform.h ++++ b/include/sbi/sbi_platform.h +@@ -45,6 +45,7 @@ + #include + #include + #include ++#include + + /** Possible feature flags of a platform */ + enum sbi_platform_features { +diff --git a/include/sbi/sbi_pmp.h b/include/sbi/sbi_pmp.h +new file mode 100644 +index 0000000..4b4523b +--- /dev/null ++++ b/include/sbi/sbi_pmp.h +@@ -0,0 +1,10 @@ ++#ifndef __SBI_PMP_H__ ++#define __SBI_PMP_H__ ++ ++#include "sm/pmp.h" ++#include ++#include ++struct sbi_scratch; ++int sbi_pmp_init(struct sbi_scratch *scratch, bool cold_boot); ++int sbi_send_pmp(ulong hmask, ulong hbase, struct pmp_data_t* pmp_data); ++#endif +\ No newline at end of file +diff --git a/include/sbi/sbi_tvm.h b/include/sbi/sbi_tvm.h +new file mode 100644 +index 0000000..04839c6 +--- /dev/null ++++ b/include/sbi/sbi_tvm.h +@@ -0,0 +1,16 @@ ++#ifndef __SBI_TVM_H__ ++#define __SBI_TVM_H__ ++ ++#include ++#include ++ ++struct tvm_data_t ++{ ++ struct sbi_hartmask smask; ++}; ++ ++struct sbi_scratch; ++int sbi_tvm_init(struct sbi_scratch *scratch, bool cold_boot); ++int sbi_send_tvm(ulong hmask, ulong hbase, struct tvm_data_t* tvm_data); ++void set_tvm_and_sync(); ++#endif +\ No newline at end of file +diff --git a/include/sbi/sbi_types.h b/include/sbi/sbi_types.h +index 0952d5c..b004419 100644 +--- a/include/sbi/sbi_types.h ++++ b/include/sbi/sbi_types.h +@@ -47,6 +47,7 @@ typedef unsigned long long uint64_t; + typedef int bool; + typedef unsigned long ulong; + typedef unsigned long uintptr_t; ++typedef long intptr_t; + typedef unsigned long size_t; + typedef long ssize_t; + typedef unsigned long virtual_addr_t; +diff --git a/include/sm/attest.h b/include/sm/attest.h +new file mode 100644 +index 0000000..df18eac +--- /dev/null ++++ b/include/sm/attest.h +@@ -0,0 +1,16 @@ ++#ifndef _ATTEST_H ++#define _ATTEST_H ++ ++#include "sm/enclave.h" ++ ++void hash_enclave(struct enclave_t* enclave, void* hash, uintptr_t nonce); ++ ++void hash_shadow_enclave(struct shadow_enclave_t* enclave, void* hash, uintptr_t nonce); ++ ++void update_hash_shadow_enclave(struct shadow_enclave_t *enclave, void* hash, uintptr_t nonce_arg); ++ ++void sign_enclave(void* signature, void* hash); ++ ++int verify_enclave(void* signature, void* hash); ++ ++#endif /* _ATTEST_H */ +diff --git a/include/sm/enclave.h b/include/sm/enclave.h +new file mode 100644 +index 0000000..39275e5 +--- /dev/null ++++ b/include/sm/enclave.h +@@ -0,0 +1,250 @@ ++#ifndef _ENCLAVE_H ++#define _ENCLAVE_H ++ ++#include "sbi/riscv_encoding.h" ++#include "sm/enclave_args.h" ++#include "sbi/riscv_atomic.h" ++#include "sbi/riscv_locks.h" ++#include "sbi/sbi_string.h" ++#include "sbi/riscv_asm.h" ++#include "sbi/sbi_types.h" ++#include "sm/thread.h" ++#include "sm/vm.h" ++ ++ ++ ++#define ENCLAVES_PER_METADATA_REGION 100 ++#define ENCLAVE_METADATA_REGION_SIZE ((sizeof(struct enclave_t)) * ENCLAVES_PER_METADATA_REGION) ++#define SHADOW_ENCLAVE_METADATA_REGION_SIZE ((sizeof(struct shadow_enclave_t)) * ENCLAVES_PER_METADATA_REGION) ++#define RELAY_PAGE_NUM 5 ++#define MAX_HARTS 8 ++#define ENCLAVE_MODE 1 ++#define NORMAL_MODE 0 ++ ++#define SET_ENCLAVE_METADATA(point, enclave, create_args, struct_type, base) do { \ ++ enclave->entry_point = point; \ ++ enclave->ocall_func_id = ((struct_type)create_args)->ecall_arg0; \ ++ enclave->ocall_arg0 = ((struct_type)create_args)->ecall_arg1; \ ++ enclave->ocall_arg1 = ((struct_type)create_args)->ecall_arg2; \ ++ enclave->ocall_syscall_num = ((struct_type)create_args)->ecall_arg3; \ ++ enclave->kbuffer = ((struct_type)create_args)->kbuffer; \ ++ enclave->kbuffer_size = ((struct_type)create_args)->kbuffer_size; \ ++ enclave->shm_paddr = ((struct_type)create_args)->shm_paddr; \ ++ enclave->shm_size = ((struct_type)create_args)->shm_size; \ ++ enclave->host_ptbr = csr_read(CSR_SATP); \ ++ enclave->root_page_table = ((struct_type)create_args)->base + RISCV_PGSIZE; \ ++ enclave->thread_context.encl_ptbr = ((((struct_type)create_args)->base+RISCV_PGSIZE) >> RISCV_PGSHIFT) | SATP_MODE_CHOICE; \ ++ enclave->type = NORMAL_ENCLAVE; \ ++ enclave->state = FRESH; \ ++ enclave->caller_eid = -1; \ ++ enclave->top_caller_eid = -1; \ ++ enclave->cur_callee_eid = -1; \ ++ sbi_memcpy(enclave->enclave_name, ((struct_type)create_args)->name, NAME_LEN); \ ++} while(0) ++ ++struct link_mem_t ++{ ++ unsigned long mem_size; ++ unsigned long slab_size; ++ unsigned long slab_num; ++ char* addr; ++ struct link_mem_t* next_link_mem; ++}; ++ ++typedef enum ++{ ++ DESTROYED = -1, ++ INVALID = 0, ++ FRESH = 1, ++ RUNNABLE, ++ RUNNING, ++ STOPPED, ++ ATTESTING, ++ OCALLING ++} enclave_state_t; ++ ++struct vm_area_struct ++{ ++ unsigned long va_start; ++ unsigned long va_end; ++ ++ struct vm_area_struct *vm_next; ++ struct pm_area_struct *pma; ++}; ++ ++struct pm_area_struct ++{ ++ unsigned long paddr; ++ unsigned long size; ++ unsigned long free_mem; ++ ++ struct pm_area_struct *pm_next; ++}; ++ ++struct page_t ++{ ++ uintptr_t paddr; ++ struct page_t *next; ++}; ++ ++struct enclave_t ++{ ++ unsigned int eid; ++ enclave_type_t type; ++ enclave_state_t state; ++ ++ ///vm_area_struct lists ++ struct vm_area_struct* text_vma; ++ struct vm_area_struct* stack_vma; ++ uintptr_t _stack_top; ///lowest address of stack area ++ struct vm_area_struct* heap_vma; ++ uintptr_t _heap_top; ///highest address of heap area ++ struct vm_area_struct* mmap_vma; ++ ++ ///pm_area_struct list ++ struct pm_area_struct* pma_list; ++ struct page_t* free_pages; ++ uintptr_t free_pages_num; ++ ++ ///root page table of enclave ++ unsigned long root_page_table; ++ ++ ///root page table register for host ++ unsigned long host_ptbr; ++ ++ ///entry point of enclave ++ unsigned long entry_point; ++ ++ ///shared mem with kernel ++ unsigned long kbuffer;//paddr ++ unsigned long kbuffer_size; ++ ++ ///shared mem with host ++ unsigned long shm_paddr; ++ unsigned long shm_size; ++ ++ // host memory arg ++ unsigned long mm_arg_paddr[RELAY_PAGE_NUM]; ++ unsigned long mm_arg_size[RELAY_PAGE_NUM]; ++ ++ unsigned long* ocall_func_id; ++ unsigned long* ocall_arg0; ++ unsigned long* ocall_arg1; ++ unsigned long* ocall_syscall_num; ++ ++ // enclave thread context ++ // TODO: support multiple threads ++ struct thread_state_t thread_context; ++ unsigned int top_caller_eid; ++ unsigned int caller_eid; ++ unsigned int cur_callee_eid; ++ unsigned char hash[HASH_SIZE]; ++ char enclave_name[NAME_LEN]; ++}; ++ ++struct shadow_enclave_t ++{ ++ unsigned int eid; ++ ++ enclave_state_t state; ++ struct page_t* free_pages; ++ uintptr_t free_pages_num; ++ ++ ///root page table of enclave ++ unsigned long root_page_table; ++ ++ ///root page table register for host ++ unsigned long host_ptbr; ++ ++ ///entry point of enclave ++ unsigned long entry_point; ++ struct thread_state_t thread_context; ++ unsigned char hash[HASH_SIZE]; ++}; ++ ++/** ++ * cpu state ++ */ ++struct cpu_state_t ++{ ++ int in_enclave; /// whether current hart is in enclave-mode ++ int eid; /// the eid of current enclave if the hart in enclave-mode ++}; ++ ++void acquire_enclave_metadata_lock(); ++void release_enclave_metadata_lock(); ++ ++int cpu_in_enclave(int i); ++int cpu_eid(int i); ++int check_in_enclave_world(); ++int get_curr_enclave_id(); ++struct enclave_t* __get_enclave(int eid); ++ ++uintptr_t copy_from_host(void* dest, void* src, size_t size); ++uintptr_t copy_to_host(void* dest, void* src, size_t size); ++int copy_word_to_host(unsigned int* ptr, uintptr_t value); ++int copy_dword_to_host(uintptr_t* ptr, uintptr_t value); ++ ++struct link_mem_t* init_mem_link(unsigned long mem_size, unsigned long slab_size); ++struct link_mem_t* add_link_mem(struct link_mem_t** tail); ++ ++struct enclave_t* __alloc_enclave(); ++int __free_enclave(int eid); ++void free_enclave_memory(struct pm_area_struct *pma); ++ ++uintptr_t create_enclave(enclave_create_param_t create_args); ++uintptr_t attest_enclave(uintptr_t eid, uintptr_t report, uintptr_t nonce); ++uintptr_t attest_shadow_enclave(uintptr_t eid, uintptr_t report, uintptr_t nonce); ++uintptr_t run_enclave(uintptr_t* regs, unsigned int eid, uintptr_t addr, uintptr_t size); ++uintptr_t stop_enclave(uintptr_t* regs, unsigned int eid); ++uintptr_t wake_enclave(uintptr_t* regs, unsigned int eid); ++uintptr_t destroy_enclave(uintptr_t* regs, unsigned int eid); ++uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid); ++uintptr_t resume_from_ocall(uintptr_t* regs, unsigned int eid); ++ ++uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval); ++uintptr_t enclave_mmap(uintptr_t* regs, uintptr_t vaddr, uintptr_t size); ++uintptr_t enclave_unmap(uintptr_t* regs, uintptr_t vaddr, uintptr_t size); ++ ++uintptr_t create_shadow_enclave(enclave_create_param_t create_args); ++uintptr_t run_shadow_enclave(uintptr_t* regs, unsigned int eid, shadow_enclave_run_param_t enclave_run_param, uintptr_t addr, uintptr_t size); ++ ++struct call_enclave_arg_t ++{ ++ uintptr_t req_arg; ++ uintptr_t resp_val; ++ uintptr_t req_vaddr; ++ uintptr_t req_size; ++ uintptr_t resp_vaddr; ++ uintptr_t resp_size; ++}; ++uintptr_t call_enclave(uintptr_t *regs, unsigned int enclave_id, uintptr_t arg); ++uintptr_t enclave_return(uintptr_t *regs, uintptr_t arg); ++uintptr_t enclave_sys_write(uintptr_t *regs); ++uintptr_t enclave_sbrk(uintptr_t* regs, intptr_t size); ++uintptr_t enclave_read_sec(uintptr_t *regs, uintptr_t sec); ++uintptr_t enclave_write_sec(uintptr_t *regs, uintptr_t sec); ++uintptr_t enclave_return_relay_page(uintptr_t *regs); ++uintptr_t do_timer_irq(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc); ++uintptr_t do_yield(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc); ++uintptr_t ipi_stop_enclave(uintptr_t *regs, uintptr_t host_ptbr, int eid); ++uintptr_t ipi_destroy_enclave(uintptr_t *regs, uintptr_t host_ptbr, int eid); ++ ++ ++//relay page ++#define ENTRY_PER_METADATA_REGION 100 ++#define ENTRY_PER_RELAY_PAGE_REGION 20 ++ ++struct relay_page_entry_t ++{ ++ char enclave_name[NAME_LEN]; ++ unsigned long addr; ++ unsigned long size; ++}; ++uintptr_t change_relay_page_ownership(unsigned long relay_page_addr, unsigned long relay_page_size, char *enclave_name); ++struct relay_page_entry_t* __get_relay_page_by_name(char* enclave_name, int *slab_index, int *link_mem_index); ++int __free_relay_page_entry(unsigned long relay_page_addr, unsigned long relay_page_size); ++struct relay_page_entry_t* __alloc_relay_page_entry(char *enclave_name, unsigned long relay_page_addr, unsigned long relay_page_size); ++ ++ ++#endif /* _ENCLAVE_H */ +diff --git a/include/sm/enclave_args.h b/include/sm/enclave_args.h +new file mode 100644 +index 0000000..14e2c72 +--- /dev/null ++++ b/include/sm/enclave_args.h +@@ -0,0 +1,155 @@ ++#ifndef _ENCLAVE_ARGS_H ++#define _ENCLAVE_ARGS_H ++#include "sm/thread.h" ++#define HASH_SIZE 32 ++#define PRIVATE_KEY_SIZE 32 ++#define PUBLIC_KEY_SIZE 64 ++#define SIGNATURE_SIZE 64 ++ ++#define MANU_PUB_KEY (void*)((unsigned long)0x801ff000) ++#define DEV_PUB_KEY (MANU_PUB_KEY + PUBLIC_KEY_SIZE) ++#define DEV_PRI_KEY (DEV_PUB_KEY + PUBLIC_KEY_SIZE) ++#define SM_PUB_KEY (DEV_PRI_KEY + PRIVATE_KEY_SIZE) ++#define SM_PRI_KEY (SM_PUB_KEY + PUBLIC_KEY_SIZE) ++#define SM_HASH (SM_PRI_KEY + PRIVATE_KEY_SIZE) ++#define SM_SIGNATURE (SM_HASH + HASH_SIZE) ++ ++struct mm_alloc_arg_t ++{ ++ unsigned long req_size; ++ uintptr_t resp_addr; ++ unsigned long resp_size; ++}; ++ ++struct sm_report_t ++{ ++ unsigned char hash[HASH_SIZE]; ++ unsigned char signature[SIGNATURE_SIZE]; ++ unsigned char sm_pub_key[PUBLIC_KEY_SIZE]; ++}; ++ ++struct enclave_report_t ++{ ++ unsigned char hash[HASH_SIZE]; ++ unsigned char signature[SIGNATURE_SIZE]; ++ uintptr_t nonce; ++}; ++ ++struct report_t ++{ ++ struct sm_report_t sm; ++ struct enclave_report_t enclave; ++ unsigned char dev_pub_key[PUBLIC_KEY_SIZE]; ++}; ++ ++struct signature_t ++{ ++ unsigned char r[PUBLIC_KEY_SIZE/2]; ++ unsigned char s[PUBLIC_KEY_SIZE/2]; ++}; ++ ++struct pt_entry_t ++{ ++ unsigned long pte_addr; ++ unsigned long pte; ++}; ++ ++#if __riscv_xlen == 64 ++ ++#define NAME_LEN 16 ++ ++typedef enum ++{ ++ NORMAL_ENCLAVE = 0, ++ SERVER_ENCLAVE = 1 ++} enclave_type_t; ++ ++typedef struct enclave_create_param ++{ ++ unsigned int *eid_ptr; ++ char name[NAME_LEN]; ++ enclave_type_t type; ++ ++ unsigned long paddr; ++ unsigned long size; ++ ++ unsigned long entry_point; ++ ++ unsigned long free_mem; ++ ++ //enclave shared mem with kernel ++ unsigned long kbuffer;//paddr ++ unsigned long kbuffer_size; ++ ++ //enclave shared mem with host ++ unsigned long shm_paddr; ++ unsigned long shm_size; ++ ++ unsigned long *ecall_arg0; ++ unsigned long *ecall_arg1; ++ unsigned long *ecall_arg2; ++ unsigned long *ecall_arg3; ++} enclave_create_param_t; ++ ++typedef struct shadow_enclave_run_param ++{ ++ unsigned long sptbr; ++ unsigned long free_page; ++ unsigned long size; ++ unsigned int *eid_ptr; ++ ++ unsigned long kbuffer;//paddr ++ unsigned long kbuffer_size; ++ ++ unsigned long shm_paddr; ++ unsigned long shm_size; ++ ++ unsigned long schrodinger_paddr; ++ unsigned long schrodinger_size; ++ ++ unsigned long *ecall_arg0; ++ unsigned long *ecall_arg1; ++ unsigned long *ecall_arg2; ++ unsigned long *ecall_arg3; ++ char name[NAME_LEN]; ++} shadow_enclave_run_param_t; ++ ++#else ++ ++#define ATTRIBUTE_R 0x1 ++#define ATTRIBUTE_W 0x2 ++#define ATTRIBUTE_X 0x4 ++#define DEFAULT_EAPP_REGIONS_NUM 5 ++ ++struct region_t { ++ unsigned long base; ++ unsigned long size; ++ unsigned long attributes; ++}; ++ ++struct eapp_t { ++ unsigned long offset; ++ unsigned long size; ++ unsigned long uuid; ++ struct region_t regions[DEFAULT_EAPP_REGIONS_NUM]; ++}; ++ ++typedef struct enclave_create_param ++{ ++ unsigned long uuid; ++ unsigned long *eid_ptr; ++ ++ unsigned long untrusted_ptr; ++ unsigned long untrusted_size; ++}enclave_create_param_t; ++ ++struct init_enclave_create_param_t ++{ ++ unsigned long uuid; ++ unsigned long entry_point; ++ struct region_t regions[DEFAULT_EAPP_REGIONS_NUM]; ++}; ++ ++#endif /* __riscv_xlen == 64 */ ++ ++#endif /* _ENCLAVE_ARGS_H */ +diff --git a/include/sm/enclave_mm.h b/include/sm/enclave_mm.h +new file mode 100644 +index 0000000..5d7a1d3 +--- /dev/null ++++ b/include/sm/enclave_mm.h +@@ -0,0 +1,25 @@ ++#ifndef _ENCLAVE_MM_H ++#define _ENCLAVE_MM_H ++ ++#include "sbi/sbi_types.h" ++#include "sm/enclave.h" ++ ++struct mm_region_list_t ++{ ++ uintptr_t paddr; ++ unsigned long size; ++ struct mm_region_list_t *next; ++}; ++ ++int check_and_set_secure_memory(unsigned long paddr, unsigned long size); ++int __free_secure_memory(unsigned long paddr, unsigned long size); ++int free_secure_memory(unsigned long paddr, unsigned long size); ++ ++uintptr_t mm_init(uintptr_t paddr, unsigned long size); ++void* mm_alloc(unsigned long req_size, unsigned long* resp_size); ++int mm_free(void* paddr, unsigned long size); ++ ++int grant_enclave_access(struct enclave_t* enclave); ++int retrieve_enclave_access(struct enclave_t *enclave); ++ ++#endif /* _ENCLAVE_MM_H */ +diff --git a/include/sm/enclave_vm.h b/include/sm/enclave_vm.h +new file mode 100644 +index 0000000..0c74064 +--- /dev/null ++++ b/include/sm/enclave_vm.h +@@ -0,0 +1,88 @@ ++#ifndef _ENCLAVE_VM_H ++#define _ENCLAVE_VM_H ++ ++#include "enclave.h" ++// #include "encoding.h" ++#include "vm.h" ++ ++/* default layout of enclave */ ++//##################### ++//# reserved for # ++//# s mode # ++//##################### 0xffffffe000000000 //actually this is the start address of kernel's image ++//# hole # ++//##################### 0x0000004000000000 ++//# shared mem # ++//# with host # ++//##################### 0x0000003900000000 ++//# # ++//# host mm arg # ++//# # ++//##################### 0x0000003800000000 ++//# # ++//# stack # ++//# # ++//##################### 0x0000003000000000 ++//# mmap # ++//# # ++//##################### brk ++//# # ++//# heap # ++//# # ++//##################### 0x0000001000000000 ++//# # ++//# text/code/bss # ++//# # ++//##################### 0x0000000000001000 //not fixed, depends on enclave's lds ++//# hole # ++//##################### 0x0 ++ ++#define ENCLAVE_DEFAULT_KBUFFER_SIZE 0x1000UL ++#define ENCLAVE_DEFAULT_KBUFFER 0xffffffe000000000UL ++#define ENCLAVE_DEFAULT_SHM_BASE 0x0000003900000000UL ++#define ENCLAVE_DEFAULT_MM_ARG_BASE 0x0000003800000000UL ++#define ENCLAVE_DEFAULT_STACK_BASE 0x0000003800000000UL ++#define ENCLAVE_DEFAULT_STACK_SIZE 128*1024 ++#define ENCLAVE_DEFAULT_MMAP_BASE 0x0000003000000000UL ++#define ENCLAVE_DEFAULT_HEAP_BASE 0x0000001000000000UL ++#define ENCLAVE_DEFAULT_TEXT_BASE 0x0000000000001000UL ++ ++#define PAGE_UP(paddr) (((paddr) + RISCV_PGSIZE - 1) & (~(RISCV_PGSIZE - 1))) ++#define PAGE_DOWN(paddr) ((addr) & (~(RISCV_PGSIZE - 1))) ++#define PADDR_TO_PFN(paddr) ((paddr) >> RISCV_PGSHIFT) ++#define PAGE_PFN_SHIFT 10 ++#define PAGE_ATTRIBUTION(pte) (pte & ((1<> PTE_PPN_SHIFT) ++#define PFN_TO_PTE(pfn, attribution) ((pfn<> PTE_PPN_SHIFT)<>RISCV_PGSHIFT)<p. */ ++void vli_mod_mult_fast(u64 *result, u64 *left, u64 *right, u64 *mod, u8 ndigits); ++ ++/* Computes result = left^2 % curve->p. */ ++void vli_mod_square_fast(u64 *result, u64 *left, u64 *mod, u8 ndigits); ++ ++/* Computes result = in >> c, returning carry. Can modify in place ++ * (if result == in). 0 < shift < 64. ++ */ ++u64 vli_rshift(u64 *result, u64 *in, u32 shift, u8 ndigits); ++ ++/* Computes result = left + right, returning carry. Can modify in place. */ ++u64 vli_add(u64 *result, u64 *left, u64 *right, u8 ndigits); ++ ++/* Computes result = left - right, returning borrow. Can modify in place. */ ++u64 vli_sub(u64 *result, u64 *left, u64 *right, u8 ndigits); ++ ++/* Computes result = left * right. */ ++void vli_mult(u64 *result, u64 *left, u64 *right, u8 ndigits); ++ ++/* Computes result = left^2. */ ++void vli_square(u64 *result, u64 *left, u8 ndigits); ++ ++/* Computes result = (left + right) % mod. ++ Assumes that left < mod and right < mod, result != mod. */ ++void vli_mod_add(u64 *result, u64 *left, u64 *right, u64 *mod, u8 ndigits); ++ ++/* Computes result = (left - right) % mod. ++ Assumes that left < mod and right < mod, result != mod. */ ++void vli_mod_sub(u64 *result, u64 *left, u64 *right, u64 *mod, u8 ndigits); ++ ++/* Computes result = (left * right) % mod. */ ++void vli_mod_mult(u64 *result, u64 *left, u64 *right, u64 *mod, u8 ndigits); ++ ++/* Computes result = left^2 % mod. */ ++void vli_mod_square(u64 *result, u64 *left, u64 *mod, u8 ndigits); ++ ++/* Computes result = left^p % mod. */ ++void vli_mod_exp(u64 *result, u64 *left, u64 *p, u64 *mod, u8 ndigits); ++ ++/* Computes result = (product) % mod. */ ++void vli_mod(u64 *result, u64 *product, u64 *mod, u8 ndigits); ++ ++/* Computes result = (1 / input) % mod. All VLIs are the same size. ++ * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide" ++ * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf ++ */ ++void vli_mod_inv(u64 *result, u64 *input, u64 *mod, u8 ndigits); ++ ++/* Computes result = (left / right). ++ * remainder = (left % right). ++ */ ++void vli_div(u64 *result, u64 *remainder, u64 *left, u64 cdigits, u64 *right, u8 ddigits); ++ ++#endif +diff --git a/include/sm/gm/ecc.h b/include/sm/gm/ecc.h +new file mode 100644 +index 0000000..0d652de +--- /dev/null ++++ b/include/sm/gm/ecc.h +@@ -0,0 +1,38 @@ ++#ifndef _ECC_H_ ++#define _ECC_H_ ++ ++#include "sm/gm/typedef.h" ++ ++#define ECC_WORDSIZE 8 ++#define ECC_NUMBITS 256 ++#define ECC_NUMWORD (ECC_NUMBITS/ECC_WORDSIZE) //32 ++ ++#define ECC_MAX_DIGITS 4 ++ ++#define SWAP(a,b) { u32 t = a; a = b; b = t;} ++ ++typedef struct ecc_point ++{ ++ u64 x[ECC_MAX_DIGITS]; ++ u64 y[ECC_MAX_DIGITS]; ++} ecc_point; ++ ++struct ecc_curve { ++ u8 ndigits; ++ struct ecc_point g; ++ u64 p[ECC_MAX_DIGITS]; ++ u64 n[ECC_MAX_DIGITS]; ++ u64 h[ECC_MAX_DIGITS]; ++ u64 a[ECC_MAX_DIGITS]; ++ u64 b[ECC_MAX_DIGITS]; ++}; ++ ++void ecc_bytes2native(u64 *native, void *bytes, u8 ndigits); ++void ecc_native2bytes(void *bytes, u64 *native, u8 ndigits); ++ ++void ecc_point_add(struct ecc_curve *curve, ecc_point *result, ecc_point *x, ecc_point *y); ++void ecc_point_mult(struct ecc_curve *curve, ecc_point *result, ecc_point *point, u64 *scalar, u64 *initialZ); ++void ecc_point_mult2(struct ecc_curve *curve, ecc_point *result, ecc_point *g, ecc_point *p, u64 *s, u64 *t); ++int ecc_point_is_zero(struct ecc_curve *curve, ecc_point *point); ++ ++#endif +diff --git a/include/sm/gm/random.h b/include/sm/gm/random.h +new file mode 100644 +index 0000000..66bc9f4 +--- /dev/null ++++ b/include/sm/gm/random.h +@@ -0,0 +1,8 @@ ++#ifndef _RANDOM_H_ ++#define _RANDOM_H_ ++ ++#include "sm/gm/typedef.h" ++ ++int vli_get_random(u8 *p_data, u32 len); ++ ++#endif +diff --git a/include/sm/gm/sm2.h b/include/sm/gm/sm2.h +new file mode 100644 +index 0000000..6f10b40 +--- /dev/null ++++ b/include/sm/gm/sm2.h +@@ -0,0 +1,34 @@ ++#ifndef _SM2_H_ ++#define _SM2_H_ ++ ++#include "sm/gm/typedef.h" ++#include "sm/gm/ecc.h" ++ ++int sm2_make_prikey(u8 *prikey); ++int sm2_make_pubkey(u8 *prikey, ecc_point *pubkey); ++int sm2_make_keypair(u8 *prikey, ecc_point *pubkey); ++int sm2_sign(u8 *r, u8 *s, u8 *pri, u8 *hash); ++int sm2_verify(ecc_point *pubkey, u8 *hash, u8 *r, u8 *s); ++ ++int sm2_encrypt(ecc_point *pubKey, u8 *M, u32 Mlen, u8 *C, u32 *Clen); ++int sm2_decrypt(u8 *prikey, u8 *C, u32 Clen, u8 *M, u32 *Mlen); ++ ++void sm3_z(u8 *id, u32 idlen, ecc_point *pub, u8 *hash); ++int sm2_shared_point(u8* selfPriKey, u8* selfTempPriKey, ecc_point* selfTempPubKey, ++ ecc_point *otherPubKey, ecc_point* otherTempPubKey, ecc_point *key); ++int sm2_shared_key(ecc_point *point, u8 *ZA, u8 *ZB, u32 keyLen, u8 *key); ++int sm2_point_mult(ecc_point *G, u8 *k, ecc_point *P); ++ ++ ++int ECC_KeyEx_Init_I(u8 *pri, ecc_point *pub); ++ ++int ECC_KeyEx_Re_I(u8 *rb, u8 *dB, ecc_point *RA, ecc_point *PA, ++ u8* ZA, u8 *ZB, u8 *K, u32 klen, ecc_point *RB, ++ ecc_point *V, u8* hash); ++ ++int ECC_KeyEx_Init_II(u8* ra, u8* dA, ecc_point* RA, ecc_point* RB, ecc_point* PB, u8 ++ ZA[],u8 ZB[],u8 SB[],u8 K[], u32 klen,u8 SA[]); ++ ++int ECC_KeyEx_Re_II(ecc_point *V,ecc_point *RA,ecc_point *RB,u8 ZA[],u8 ZB[],u8 SA[]); ++ ++#endif /* _SM2_H_ */ +diff --git a/include/sm/gm/sm3.h b/include/sm/gm/sm3.h +new file mode 100644 +index 0000000..7d1e9da +--- /dev/null ++++ b/include/sm/gm/sm3.h +@@ -0,0 +1,103 @@ ++#ifndef _SM3_H ++#define _SM3_H ++ ++#include "sm/gm/typedef.h" ++ ++#define SM3_DATA_LEN 32 ++ ++/** ++ * \brief SM3 context structure ++ */ ++struct sm3_context ++{ ++ unsigned long total[2]; /*!< number of bytes processed */ ++ unsigned long state[8]; /*!< intermediate digest state */ ++ unsigned char buffer[64]; /*!< data block being processed */ ++ ++ unsigned char ipad[64]; /*!< HMAC: inner padding */ ++ unsigned char opad[64]; /*!< HMAC: outer padding */ ++}; ++ ++/** ++ * \brief SM3 context setup ++ * ++ * \param ctx context to be initialized ++ */ ++void sm3_init(struct sm3_context *ctx); ++ ++/** ++ * \brief SM3 process buffer ++ * ++ * \param ctx SM3 context ++ * \param input buffer holding the data ++ * \param ilen length of the input data ++ */ ++void sm3_update(struct sm3_context *ctx, unsigned char *input, int ilen); ++ ++/** ++ * \brief SM3 final digest ++ * ++ * \param ctx SM3 context ++ */ ++void sm3_final(struct sm3_context *ctx, unsigned char output[32]); ++ ++/** ++ * \brief Output = SM3( input buffer ) ++ * ++ * \param input buffer holding the data ++ * \param ilen length of the input data ++ * \param output SM3 checksum result ++ */ ++void sm3(unsigned char *input, int ilen, unsigned char output[32]); ++ ++/** ++ * \brief Output = SM3( file contents ) ++ * ++ * \param path input file name ++ * \param output SM3 checksum result ++ * ++ * \return 0 if successful, 1 if fopen failed, ++ * or 2 if fread failed ++ */ ++int sm3_file(char *path, unsigned char output[32]); ++ ++/** ++ * \brief SM3 HMAC context setup ++ * ++ * \param ctx HMAC context to be initialized ++ * \param key HMAC secret key ++ * \param keylen length of the HMAC key ++ */ ++void sm3_hmac_init(struct sm3_context *ctx, unsigned char *key, int keylen); ++ ++/** ++ * \brief SM3 HMAC process buffer ++ * ++ * \param ctx HMAC context ++ * \param input buffer holding the data ++ * \param ilen length of the input data ++ */ ++void sm3_hmac_update(struct sm3_context *ctx, unsigned char *input, int ilen); ++ ++/** ++ * \brief SM3 HMAC final digest ++ * ++ * \param ctx HMAC context ++ * \param output SM3 HMAC checksum result ++ */ ++void sm3_hmac_final(struct sm3_context *ctx, unsigned char output[32]); ++ ++/** ++ * \brief Output = HMAC-SM3( hmac key, input buffer ) ++ * ++ * \param key HMAC secret key ++ * \param keylen length of the HMAC key ++ * \param input buffer holding the data ++ * \param ilen length of the input data ++ * \param output HMAC-SM3 result ++ */ ++void sm3_hmac(unsigned char *key, int keylen, ++ unsigned char *input, int ilen, ++ unsigned char output[32]); ++ ++#endif /* _SM3_H */ +diff --git a/include/sm/gm/typedef.h b/include/sm/gm/typedef.h +new file mode 100644 +index 0000000..4e04895 +--- /dev/null ++++ b/include/sm/gm/typedef.h +@@ -0,0 +1,164 @@ ++#ifndef __TYPEDEF_H__ ++#define __TYPEDEF_H__ ++#include "sbi/sbi_types.h" ++ ++typedef unsigned char u8; ++typedef unsigned short u16; ++typedef unsigned int u32; ++typedef unsigned int uint; ++// typedef unsigned long long u64; ++ ++typedef char s8; ++typedef short s16; ++typedef int s32; ++// typedef long long s64; ++ ++#define be64_to_le64(x) ((u64)( \ ++ (((u64)(x) & (u64)0x00000000000000ffULL) << 56) | \ ++ (((u64)(x) & (u64)0x000000000000ff00ULL) << 40) | \ ++ (((u64)(x) & (u64)0x0000000000ff0000ULL) << 24) | \ ++ (((u64)(x) & (u64)0x00000000ff000000ULL) << 8) | \ ++ (((u64)(x) & (u64)0x000000ff00000000ULL) >> 8) | \ ++ (((u64)(x) & (u64)0x0000ff0000000000ULL) >> 24) | \ ++ (((u64)(x) & (u64)0x00ff000000000000ULL) >> 40) | \ ++ (((u64)(x) & (u64)0xff00000000000000ULL) >> 56))) ++ ++#define le64_to_be64(x) ((u64)( \ ++ (((u64)(x) & (u64)0x00000000000000ffULL) << 56) | \ ++ (((u64)(x) & (u64)0x000000000000ff00ULL) << 40) | \ ++ (((u64)(x) & (u64)0x0000000000ff0000ULL) << 24) | \ ++ (((u64)(x) & (u64)0x00000000ff000000ULL) << 8) | \ ++ (((u64)(x) & (u64)0x000000ff00000000ULL) >> 8) | \ ++ (((u64)(x) & (u64)0x0000ff0000000000ULL) >> 24) | \ ++ (((u64)(x) & (u64)0x00ff000000000000ULL) >> 40) | \ ++ (((u64)(x) & (u64)0xff00000000000000ULL) >> 56))) ++ ++static inline u16 __get_unaligned_le16(const u8 *p) ++{ ++ return p[0] | p[1] << 8; ++} ++ ++static inline u32 __get_unaligned_le32(const u8 *p) ++{ ++ return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; ++} ++ ++static inline u64 __get_unaligned_le64(const u8 *p) ++{ ++ return (u64)__get_unaligned_le32(p + 4) << 32 ++ | __get_unaligned_le32(p); ++} ++ ++static inline void __put_unaligned_le16(u16 val, u8 *p) ++{ ++ *p++ = val; ++ *p++ = val >> 8; ++} ++ ++static inline void __put_unaligned_le32(u32 val, u8 *p) ++{ ++ __put_unaligned_le16(val >> 16, p + 2); ++ __put_unaligned_le16(val, p); ++} ++ ++static inline void __put_unaligned_le64(u64 val, u8 *p) ++{ ++ __put_unaligned_le32(val >> 32, p + 4); ++ __put_unaligned_le32(val, p); ++} ++ ++static inline u16 get_unaligned_le16(const void *p) ++{ ++ return __get_unaligned_le16((const u8 *)p); ++} ++ ++static inline u32 get_unaligned_le32(const void *p) ++{ ++ return __get_unaligned_le32((const u8 *)p); ++} ++ ++static inline u64 get_unaligned_le64(const void *p) ++{ ++ return __get_unaligned_le64((const u8 *)p); ++} ++ ++static inline void put_unaligned_le16(u16 val, void *p) ++{ ++ __put_unaligned_le16(val, p); ++} ++ ++static inline void put_unaligned_le32(u32 val, void *p) ++{ ++ __put_unaligned_le32(val, p); ++} ++ ++static inline void put_unaligned_le64(u64 val, void *p) ++{ ++ __put_unaligned_le64(val, p); ++} ++ ++static inline u16 __get_unaligned_be16(const u8 *p) ++{ ++ return p[0] << 8 | p[1]; ++} ++ ++static inline u32 __get_unaligned_be32(const u8 *p) ++{ ++ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; ++} ++ ++static inline u64 __get_unaligned_be64(const u8 *p) ++{ ++ return (u64)__get_unaligned_be32(p) << 32 ++ | __get_unaligned_be32(p + 4); ++} ++ ++static inline void __put_unaligned_be16(u16 val, u8 *p) ++{ ++ *p++ = val >> 8; ++ *p++ = val; ++} ++ ++static inline void __put_unaligned_be32(u32 val, u8 *p) ++{ ++ __put_unaligned_be16(val >> 16, p); ++ __put_unaligned_be16(val, p + 2); ++} ++ ++static inline void __put_unaligned_be64(u64 val, u8 *p) ++{ ++ __put_unaligned_be32(val >> 32, p); ++ __put_unaligned_be32(val, p + 4); ++} ++ ++static inline u16 get_unaligned_be16(const void *p) ++{ ++ return __get_unaligned_be16((const u8 *)p); ++} ++ ++static inline u32 get_unaligned_be32(const void *p) ++{ ++ return __get_unaligned_be32((const u8 *)p); ++} ++ ++static inline u64 get_unaligned_be64(const void *p) ++{ ++ return __get_unaligned_be64((const u8 *)p); ++} ++ ++static inline void put_unaligned_be16(u16 val, void *p) ++{ ++ __put_unaligned_be16(val, p); ++} ++ ++static inline void put_unaligned_be32(u32 val, void *p) ++{ ++ __put_unaligned_be32(val, p); ++} ++ ++static inline void put_unaligned_be64(u64 val, void *p) ++{ ++ __put_unaligned_be64(val, p); ++} ++ ++#endif +diff --git a/include/sm/ipi.h b/include/sm/ipi.h +new file mode 100644 +index 0000000..e8316cd +--- /dev/null ++++ b/include/sm/ipi.h +@@ -0,0 +1,26 @@ ++#ifndef _IPI_H ++#define _IPI_H ++ ++#include "sbi/riscv_atomic.h" ++#include "sbi/riscv_locks.h" ++#include "sbi/sbi_types.h" ++ ++#define IPI_PMP_SYNC 0x1 ++#define IPI_STOP_ENCLAVE 0x2 ++#define IPI_DESTROY_ENCLAVE 0x3 ++ ++struct ipi_mail_t ++{ ++ uintptr_t event; ++ char data[40]; ++}; ++ ++extern int ipi_mail_pending[]; ++ ++void send_ipi_mail(uintptr_t dest_hart, uintptr_t need_sync); ++ ++void wait_pending_ipi(uintptr_t mask); ++ ++void handle_ipi_mail(uintptr_t *regs); ++ ++#endif /* _IPI_H */ +diff --git a/include/sm/platform/pt_area/platform.h b/include/sm/platform/pt_area/platform.h +new file mode 100644 +index 0000000..0a20068 +--- /dev/null ++++ b/include/sm/platform/pt_area/platform.h +@@ -0,0 +1,9 @@ ++#ifndef _PLATFORM_H ++#define _PLATFORM_H ++ ++#include "sm/enclave_mm.h" ++#include "sm/platform/pt_area/platform_thread.h" ++ ++int platform_init(); ++ ++#endif /* _PLATFORM_H */ +diff --git a/include/sm/platform/pt_area/platform_thread.h b/include/sm/platform/pt_area/platform_thread.h +new file mode 100644 +index 0000000..0a71c0b +--- /dev/null ++++ b/include/sm/platform/pt_area/platform_thread.h +@@ -0,0 +1,18 @@ ++#ifndef _PLATFORM_THREAD_H ++#define _PLATFORM_THREAD_H ++ ++#include "sm/thread.h" ++ ++void platform_enter_enclave_world(); ++ ++void platform_exit_enclave_world(); ++ ++int platform_check_in_enclave_world(); ++ ++int platform_check_enclave_authentication(); ++ ++void platform_switch_to_enclave_ptbr(struct thread_state_t* thread, uintptr_t ptbr); ++ ++void platform_switch_to_host_ptbr(struct thread_state_t* thread, uintptr_t ptbr); ++ ++#endif /* _PLATFORM_THREAD_H */ +diff --git a/include/sm/pmp.h b/include/sm/pmp.h +new file mode 100644 +index 0000000..ca963c7 +--- /dev/null ++++ b/include/sm/pmp.h +@@ -0,0 +1,66 @@ ++#ifndef _PMP_H ++#define _PMP_H ++ ++#include "sbi/sbi_types.h" ++#include "sbi/riscv_encoding.h" ++#include "sbi/sbi_hartmask.h" ++ ++#define NPMP 8 ++ ++#define PMP_OFF 0x00 ++#define PMP_NO_PERM 0 ++ ++#define PMPCFG_BIT_NUM 8 ++#define PMPCFG_BITS 0xFF ++#define PMP_PER_CFG_REG (sizeof(uintptr_t) * 8 / PMPCFG_BIT_NUM) ++ ++#define PMP_SET(num, cfg_index, pmpaddr, pmpcfg) do { \ ++ uintptr_t oldcfg = csr_read(CSR_PMPCFG##cfg_index); \ ++ pmpcfg |= (oldcfg & ~((uintptr_t)PMPCFG_BITS << (uintptr_t)PMPCFG_BIT_NUM*(num%PMP_PER_CFG_REG))); \ ++ asm volatile ("la t0, 1f\n\t" \ ++ "csrrw t0, mtvec, t0\n\t" \ ++ "csrw pmpaddr"#num", %0\n\t" \ ++ "csrw pmpcfg"#cfg_index", %1\n\t" \ ++ "sfence.vma\n\t"\ ++ ".align 2\n\t" \ ++ "1: csrw mtvec, t0 \n\t" \ ++ : : "r" (pmpaddr), "r" (pmpcfg) : "t0"); \ ++} while(0) ++ ++#define PMP_READ(num, cfg_index, pmpaddr, pmpcfg) do { \ ++ asm volatile("csrr %0, pmpaddr"#num : "=r"(pmpaddr) :); \ ++ asm volatile("csrr %0, pmpcfg"#cfg_index : "=r"(pmpcfg) :); \ ++} while(0) ++ ++struct pmp_config_t ++{ ++ uintptr_t paddr; ++ unsigned long size; ++ uintptr_t perm; ++ uintptr_t mode; ++}; ++ ++struct pmp_data_t ++{ ++ struct pmp_config_t pmp_config_arg; ++ int pmp_idx_arg; ++ struct sbi_hartmask smask; ++}; ++ ++#define SBI_PMP_DATA_INIT(__ptr, __pmp_config_arg, __pmp_idx_arg, __src) \ ++do { \ ++ (__ptr)->pmp_config_arg = (__pmp_config_arg); \ ++ (__ptr)->pmp_idx_arg = (__pmp_idx_arg); \ ++ SBI_HARTMASK_INIT_EXCEPT(&(__ptr)->smask, (__src)); \ ++} while (0) ++ ++void set_pmp_and_sync(int pmp_idx, struct pmp_config_t); ++void clear_pmp_and_sync(int pmp_idx); ++void set_pmp(int pmp_idx, struct pmp_config_t); ++void clear_pmp(int pmp_idx); ++struct pmp_config_t get_pmp(int pmp_idx); ++int region_overlap(uintptr_t pa0, uintptr_t size0, uintptr_t pa1, uintptr_t size1); ++int region_contain(uintptr_t pa0, uintptr_t size0, uintptr_t pa1, uintptr_t size1); ++int illegal_pmp_addr(uintptr_t addr, uintptr_t size); ++ ++#endif /* _PMP_H */ +diff --git a/include/sm/relay_page.h b/include/sm/relay_page.h +new file mode 100644 +index 0000000..92af036 +--- /dev/null ++++ b/include/sm/relay_page.h +@@ -0,0 +1,12 @@ ++#ifndef _RELAY_PAGE_H ++#define _RELAY_PAGE_H ++ ++#include "sm/enclave.h" ++#include "sm/enclave_args.h" ++ ++ ++uintptr_t asyn_enclave_call(uintptr_t *regs, uintptr_t enclave_name, uintptr_t arg); ++uintptr_t split_mem_region(uintptr_t *regs, uintptr_t mem_addr, uintptr_t mem_size, uintptr_t split_addr); ++int free_all_relay_page(unsigned long *mm_arg_paddr, unsigned long *mm_arg_size); ++ ++#endif /* _RELAY_PAGE_H */ +diff --git a/include/sm/server_enclave.h b/include/sm/server_enclave.h +new file mode 100644 +index 0000000..a87c704 +--- /dev/null ++++ b/include/sm/server_enclave.h +@@ -0,0 +1,21 @@ ++#ifndef _SERVER_ENCLAVE_H ++#define _SERVER_ENCLAVE_H ++ ++#include "sm/enclave.h" ++#include "sm/enclave_args.h" ++ ++struct server_enclave_t ++{ ++ //FIXME: enclave has its own name now, so it need not to assign a server name to server enclave ++ char server_name[NAME_LEN]; ++ struct enclave_t* entity; ++}; ++ ++#define SERVERS_PER_METADATA_REGION 100 ++ ++uintptr_t create_server_enclave(enclave_create_param_t create_args); ++uintptr_t destroy_server_enclave(uintptr_t* regs, unsigned int eid); ++uintptr_t acquire_server_enclave(uintptr_t *regs, char *server_name); ++uintptr_t get_caller_id(uintptr_t* regs); ++ ++#endif /* _SERVER_ENCLAVE_H */ +diff --git a/include/sm/sm.h b/include/sm/sm.h +new file mode 100644 +index 0000000..2e7a638 +--- /dev/null ++++ b/include/sm/sm.h +@@ -0,0 +1,144 @@ ++#ifndef _SM_H ++#define _SM_H ++ ++#include "sbi/sbi_types.h" ++#include "sm/enclave_args.h" ++#include "sm/ipi.h" ++ ++#define SM_BASE 0x80000000UL ++#define SM_SIZE 0x200000UL ++ ++//SBI_CALL NUMBERS ++#define SBI_SET_PTE 101 ++#define SBI_SET_PTE_ONE 1 ++#define SBI_PTE_MEMSET 2 ++#define SBI_PTE_MEMCPY 3 ++#define SBI_SM_INIT 100 ++#define SBI_CREATE_ENCLAVE 99 ++#define SBI_ATTEST_ENCLAVE 98 ++#define SBI_RUN_ENCLAVE 97 ++#define SBI_STOP_ENCLAVE 96 ++#define SBI_RESUME_ENCLAVE 95 ++#define SBI_DESTROY_ENCLAVE 94 ++#define SBI_MEMORY_EXTEND 92 ++#define SBI_MEMORY_RECLAIM 91 ++#define SBI_CREATE_SERVER_ENCLAVE 90 ++#define SBI_DESTROY_SERVER_ENCLAVE 89 ++ ++#define SBI_SM_DEBUG_PRINT 88 ++#define SBI_RUN_SHADOW_ENCLAVE 87 ++#define SBI_CREATE_SHADOW_ENCLAVE 86 ++ ++#define SBI_SCHRODINGER_INIT 85 ++#define SBI_SM_PT_AREA_SEPARATION 83 ++#define SBI_SM_SPLIT_HUGE_PAGE 82 ++#define SBI_SM_MAP_PTE 81 ++#define SBI_ATTEST_SHADOW_ENCLAVE 80 ++ ++//Error code of SBI_CREATE_ENCLAVE ++#define ENCLAVE_ERROR -1 ++#define ENCLAVE_NO_MEM -2 ++#define ENCLAVE_ATTESTATION -3 ++ ++//The enclave return result ++#define ENCLAVE_SUCCESS 0 ++#define ENCLAVE_TIMER_IRQ 1 ++#define ENCLAVE_OCALL 2 ++#define ENCLAVE_YIELD 3 ++ ++//The function id of the resume reason ++#define RESUME_FROM_TIMER_IRQ 0 ++#define RESUME_FROM_STOP 1 ++#define RESUME_FROM_OCALL 2 ++ ++ ++#define SBI_LEGAL_MAX 100UL ++//ENCLAVE_CALL NUMBERS ++#define SBI_EXIT_ENCLAVE 1 ++#define SBI_ENCLAVE_OCALL 2 ++#define SBI_ACQUIRE_SERVER 3 ++#define SBI_CALL_ENCLAVE 4 ++#define SBI_ENCLAVE_RETURN 5 ++#define SBI_ASYN_ENCLAVE_CALL 6 ++#define SBI_SPLIT_MEM_REGION 7 ++#define SBI_GET_CALLER_ID 8 ++#define SBI_YIELD 10 //reserve space for other enclave call operation ++ ++//ENCLAVE OCALL NUMBERS ++#define OCALL_MMAP 1 ++#define OCALL_UNMAP 2 ++#define OCALL_SYS_WRITE 3 ++#define OCALL_SBRK 4 ++#define OCALL_READ_SECT 5 ++#define OCALL_WRITE_SECT 6 ++#define OCALL_RETURN_RELAY_PAGE 7 ++ ++typedef int page_meta; ++#define NORMAL_PAGE ((page_meta)0x7FFFFFFF) ++#define ZERO_MAP_PAGE ((page_meta)0x7FFFFFFE) ++#define PRIVATE_PAGE ((page_meta)0x80000000) ++#define IS_PRIVATE_PAGE(meta) (((page_meta)meta) & PRIVATE_PAGE) ++#define IS_PUBLIC_PAGE(meta) (!IS_PRIVATE_PAGE(meta)) ++#define IS_ZERO_MAP_PAGE(meta) (((page_meta)meta & NORMAL_PAGE) == ZERO_MAP_PAGE) ++#define IS_SCHRODINGER_PAGE(meta) (((page_meta)meta & NORMAL_PAGE) != NORMAL_PAGE) ++#define MAKE_PRIVATE_PAGE(meta) ((page_meta)meta | PRIVATE_PAGE) ++#define MAKE_PUBLIC_PAGE(meta) ((page_meta)meta & NORMAL_PAGE) ++#define MAKE_ZERO_MAP_PAGE(meta) (((page_meta)meta & PRIVATE_PAGE) | ZERO_MAP_PAGE) ++#define MAKE_SCHRODINGER_PAGE(pri, pos) (pri ? \ ++ (PRIVATE_PAGE | ((page_meta)pos & NORMAL_PAGE)) \ ++ : ((page_meta)pos & NORMAL_PAGE)) ++#define SCHRODINGER_PTE_POS(meta) (IS_ZERO_MAP_PAGE(meta) ? -1 : ((int)meta & (int)0x7FFFFFFF)) ++ ++void sm_init(); ++ ++int enable_enclave(); ++//remember to acquire mbitmap_lock before using these functions ++int contain_private_range(uintptr_t pfn, uintptr_t pagenum); ++int test_public_range(uintptr_t pfn, uintptr_t pagenum); ++int set_private_range(uintptr_t pfn, uintptr_t pagenum); ++int set_public_range(uintptr_t pfn, uintptr_t pagenum); ++int unmap_mm_region(unsigned long paddr, unsigned long size); ++int remap_mm_region(unsigned long paddr, unsigned long size); ++ ++int check_in_enclave_world(); ++ ++//called by host ++uintptr_t sm_sm_init(uintptr_t pt_area_base, uintptr_t pt_area_size, uintptr_t mbitmap_base, uintptr_t mbitmap_size); ++uintptr_t sm_pt_area_separation(uintptr_t pgd_order, uintptr_t pmd_order); ++uintptr_t sm_set_pte(uintptr_t flag, uintptr_t* pte_addr, uintptr_t pte_src, uintptr_t size); ++uintptr_t sm_split_huge_page(unsigned long paddr, unsigned long size, uintptr_t split_pte); ++uintptr_t sm_map_pte(uintptr_t* pte, uintptr_t* new_pte_addr); ++uintptr_t sm_mm_init(uintptr_t paddr, uintptr_t size); ++uintptr_t sm_mm_extend(uintptr_t paddr, uintptr_t size); ++uintptr_t sm_create_enclave(uintptr_t enclave_create_args); ++uintptr_t sm_attest_enclave(uintptr_t enclave_id, uintptr_t report, uintptr_t nonce); ++uintptr_t sm_attest_shadow_enclave(uintptr_t enclave_id, uintptr_t report, uintptr_t nonce); ++uintptr_t sm_run_enclave(uintptr_t *regs, uintptr_t enclave_id, uintptr_t addr, uintptr_t size); ++uintptr_t sm_stop_enclave(uintptr_t *regs, uintptr_t enclave_id); ++uintptr_t sm_resume_enclave(uintptr_t *regs, uintptr_t enclave_id); ++uintptr_t sm_destroy_enclave(uintptr_t *regs, uintptr_t enclave_id); ++uintptr_t sm_create_server_enclave(uintptr_t enclave_create_args); ++uintptr_t sm_destroy_server_enclave(uintptr_t *regs, uintptr_t enclave_id); ++ ++uintptr_t sm_run_shadow_enclave(uintptr_t *regs, uintptr_t enclave_id, uintptr_t shadow_enclave_run_args, uintptr_t addr, uintptr_t size); ++uintptr_t sm_create_shadow_enclave(uintptr_t enclave_create_args); ++ ++//called by enclave ++uintptr_t sm_enclave_ocall(uintptr_t *regs, uintptr_t ocall_func_id, uintptr_t arg0, uintptr_t arg1); ++uintptr_t sm_exit_enclave(uintptr_t *regs, uintptr_t retval); ++uintptr_t sm_server_enclave_acquire(uintptr_t *regs, uintptr_t server_name); ++uintptr_t sm_get_caller_id(uintptr_t *regs); ++uintptr_t sm_call_enclave(uintptr_t *regs, uintptr_t enclave_id, uintptr_t arg); ++uintptr_t sm_enclave_return(uintptr_t *regs, uintptr_t arg); ++uintptr_t sm_asyn_enclave_call(uintptr_t *regs, uintptr_t enclave_name, uintptr_t arg); ++uintptr_t sm_split_mem_region(uintptr_t *regs, uintptr_t mem_addr, uintptr_t mem_size, uintptr_t split_addr); ++ ++//called when timer irq ++uintptr_t sm_do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc); ++uintptr_t sm_handle_yield(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc); ++ ++uintptr_t sm_schrodinger_init(uintptr_t paddr, uintptr_t size); ++ ++uintptr_t sm_print(uintptr_t paddr, uintptr_t size); ++ ++#endif /* _SM_H */ +diff --git a/include/sm/thread.h b/include/sm/thread.h +new file mode 100644 +index 0000000..9a42d56 +--- /dev/null ++++ b/include/sm/thread.h +@@ -0,0 +1,66 @@ ++#ifndef __THREAD_H__ ++#define __THREAD_H__ ++ ++#include "sbi/sbi_types.h" ++ ++/// \brief define the number of general registers ++#define N_GENERAL_REGISTERS 32 ++ ++struct general_registers_t ++{ ++ uintptr_t slot; ++ uintptr_t ra; ++ uintptr_t sp; ++ uintptr_t gp; ++ uintptr_t tp; ++ uintptr_t t0; ++ uintptr_t t1; ++ uintptr_t t2; ++ uintptr_t s0; ++ uintptr_t s1; ++ uintptr_t a0; ++ uintptr_t a1; ++ uintptr_t a2; ++ uintptr_t a3; ++ uintptr_t a4; ++ uintptr_t a5; ++ uintptr_t a6; ++ uintptr_t a7; ++ uintptr_t s2; ++ uintptr_t s3; ++ uintptr_t s4; ++ uintptr_t s5; ++ uintptr_t s6; ++ uintptr_t s7; ++ uintptr_t s8; ++ uintptr_t s9; ++ uintptr_t s10; ++ uintptr_t s11; ++ uintptr_t t3; ++ uintptr_t t4; ++ uintptr_t t5; ++ uintptr_t t6; ++}; ++ ++/* enclave thread state */ ++struct thread_state_t ++{ ++ uintptr_t encl_ptbr; ++ uintptr_t prev_stvec; ++ uintptr_t prev_mie; ++ uintptr_t prev_mideleg; ++ uintptr_t prev_medeleg; ++ uintptr_t prev_mepc; ++ uintptr_t prev_cache_binding; ++ struct general_registers_t prev_state; ++}; ++ ++/* swap previous and current thread states */ ++void swap_prev_state(struct thread_state_t* state, uintptr_t* regs); ++void swap_prev_mepc(struct thread_state_t* state, uintptr_t mepc); ++void swap_prev_stvec(struct thread_state_t* state, uintptr_t stvec); ++void swap_prev_cache_binding(struct thread_state_t* state, uintptr_t cache_binding); ++void swap_prev_mie(struct thread_state_t* state, uintptr_t mie); ++void swap_prev_mideleg(struct thread_state_t* state, uintptr_t mideleg); ++void swap_prev_medeleg(struct thread_state_t* state, uintptr_t medeleg); ++#endif /* thread */ +diff --git a/include/sm/vm.h b/include/sm/vm.h +new file mode 100644 +index 0000000..bc87a20 +--- /dev/null ++++ b/include/sm/vm.h +@@ -0,0 +1,36 @@ ++#ifndef _VM_H ++#define _VM_H ++ ++#include "sbi/riscv_encoding.h" ++#include "sbi/sbi_bitops.h" ++#include "sbi/sbi_types.h" ++ ++#define MEGAPAGE_SIZE ((uintptr_t)(RISCV_PGSIZE << RISCV_PGLEVEL_BITS)) ++#if __riscv_xlen == 64 ++# define SATP_MODE_CHOICE INSERT_FIELD(0, SATP64_MODE, SATP_MODE_SV39) ++# define VA_BITS 39 ++# define GIGAPAGE_SIZE (MEGAPAGE_SIZE << RISCV_PGLEVEL_BITS) ++#else ++# define SATP_MODE_CHOICE INSERT_FIELD(0, SATP32_MODE, SATP_MODE_SV32) ++# define VA_BITS 32 ++#endif ++ ++typedef uintptr_t pte_t; ++extern pte_t* root_page_table; ++ ++static inline void flush_tlb() ++{ ++ asm volatile ("sfence.vma"); ++} ++ ++static inline pte_t pte_create(uintptr_t ppn, int type) ++{ ++ return (ppn << PTE_PPN_SHIFT) | PTE_V | type; ++} ++ ++static inline pte_t ptd_create(uintptr_t ppn) ++{ ++ return pte_create(ppn, PTE_V); ++} ++ ++#endif +diff --git a/lib/sbi/objects.mk b/lib/sbi/objects.mk +index fa808a0..ecf7ba9 100644 +--- a/lib/sbi/objects.mk ++++ b/lib/sbi/objects.mk +@@ -21,6 +21,7 @@ libsbi-objs-y += sbi_ecall_hsm.o + libsbi-objs-y += sbi_ecall_legacy.o + libsbi-objs-y += sbi_ecall_replace.o + libsbi-objs-y += sbi_ecall_vendor.o ++libsbi-objs-y += sbi_ecall_penglai.o + libsbi-objs-y += sbi_emulate_csr.o + libsbi-objs-y += sbi_fifo.o + libsbi-objs-y += sbi_hart.o +@@ -40,3 +41,22 @@ libsbi-objs-y += sbi_tlb.o + libsbi-objs-y += sbi_trap.o + libsbi-objs-y += sbi_unpriv.o + libsbi-objs-y += sbi_expected_trap.o ++libsbi-objs-y += sbi_pmp.o ++libsbi-objs-y += sbi_tvm.o ++libsbi-objs-y += sbi_ipi_destroy_enclave.o ++libsbi-objs-y += sm/enclave_mm.o ++libsbi-objs-y += sm/platform/pt_area/platform.o ++libsbi-objs-y += sm/platform/pt_area/platform_thread.o ++libsbi-objs-y += sm/enclave_vm.o ++libsbi-objs-y += sm/enclave.o ++libsbi-objs-y += sm/pmp.o ++libsbi-objs-y += sm/relay_page.o ++libsbi-objs-y += sm/server_enclave.o ++libsbi-objs-y += sm/sm.o ++libsbi-objs-y += sm/thread.o ++libsbi-objs-y += sm/attest.o ++libsbi-objs-y += sm/gm/big.o ++libsbi-objs-y += sm/gm/ecc.o ++libsbi-objs-y += sm/gm/random.o ++libsbi-objs-y += sm/gm/sm3.o ++libsbi-objs-y += sm/gm/sm2.o +\ No newline at end of file +diff --git a/lib/sbi/sbi_ecall.c b/lib/sbi/sbi_ecall.c +index 64c9933..7ccfe59 100644 +--- a/lib/sbi/sbi_ecall.c ++++ b/lib/sbi/sbi_ecall.c +@@ -12,6 +12,8 @@ + #include + #include + #include ++#include "sm/enclave.h" ++#include "sm/sm.h" + + u16 sbi_ecall_version_major(void) + { +@@ -92,6 +94,64 @@ void sbi_ecall_unregister_extension(struct sbi_ecall_extension *ext) + sbi_list_del_init(&ext->head); + } + ++int enclave_call_trap(struct sbi_trap_regs* regs) ++{ ++ unsigned long retval; ++ if(check_in_enclave_world() < 0){ ++ retval = SBI_ERR_FAILED; ++ regs->mepc += 4; ++ regs->a0 = retval; ++ sbi_printf("M mode: %s check in enclave world is failed \n", __func__); ++ return 0; ++ } ++ ++ uintptr_t n = regs->a7; ++ csr_write(CSR_MEPC, regs->mepc + 4); ++ uintptr_t arg0 = regs->a0, arg1 = regs->a1, arg2 = regs->a2; ++ switch (n) ++ { ++ case SBI_EXIT_ENCLAVE: ++ retval = sm_exit_enclave((uintptr_t*)regs, arg0); ++ break; ++ case SBI_ENCLAVE_OCALL: ++ retval = sm_enclave_ocall((uintptr_t*)regs, arg0, arg1, arg2); ++ break; ++ case SBI_ACQUIRE_SERVER: ++ retval = sm_server_enclave_acquire((uintptr_t*)regs, arg0); ++ break; ++ case SBI_GET_CALLER_ID: ++ retval = sm_get_caller_id((uintptr_t*)regs); ++ break; ++ case SBI_CALL_ENCLAVE: ++ retval = sm_call_enclave((uintptr_t*)regs, arg0, arg1); ++ break; ++ case SBI_ENCLAVE_RETURN: ++ retval = sm_enclave_return((uintptr_t*)regs, arg0); ++ break; ++ case SBI_ASYN_ENCLAVE_CALL: ++ retval = sm_asyn_enclave_call((uintptr_t*)regs, arg0, arg1); ++ break; ++ case SBI_SPLIT_MEM_REGION: ++ retval = sm_split_mem_region((uintptr_t*)regs, arg0, arg1, arg2); ++ break; ++ default: ++ retval = SBI_ERR_FAILED; ++ break; ++ } ++ regs->a0 = retval; ++ if (!cpu_in_enclave(csr_read(CSR_MHARTID))) ++ { ++ if ((retval >= 0UL) && (retval <= SBI_LEGAL_MAX)) ++ { ++ regs->a0 = SBI_OK; ++ regs->a1 = retval; ++ } ++ } ++ regs->mepc = csr_read(CSR_MEPC); ++ regs->mstatus = csr_read(CSR_MSTATUS); ++ return 0; ++} ++ + int sbi_ecall_handler(struct sbi_trap_regs *regs) + { + int ret = 0; +@@ -109,23 +169,32 @@ int sbi_ecall_handler(struct sbi_trap_regs *regs) + args[3] = regs->a3; + args[4] = regs->a4; + args[5] = regs->a5; +- ++ // sbi_printf("SBI ECALL extension_id is %lx func_id is %lx\n", extension_id, func_id); + ext = sbi_ecall_find_extension(extension_id); +- if (ext && ext->handle) { ++ if (extension_id != SBI_EXT_PENGLAI) ++ { ++ if (ext && ext->handle) { ++ ret = ext->handle(extension_id, func_id, ++ args, &out_val, &trap); ++ if (extension_id >= SBI_EXT_0_1_SET_TIMER && ++ extension_id <= SBI_EXT_0_1_SHUTDOWN) ++ is_0_1_spec = 1; ++ } else { ++ ret = SBI_ENOTSUPP; ++ } ++ } ++ else ++ { + ret = ext->handle(extension_id, func_id, +- args, &out_val, &trap); +- if (extension_id >= SBI_EXT_0_1_SET_TIMER && +- extension_id <= SBI_EXT_0_1_SHUTDOWN) +- is_0_1_spec = 1; +- } else { +- ret = SBI_ENOTSUPP; ++ (unsigned long *)regs, &out_val, &trap); + } ++ + +- if (ret == SBI_ETRAP) { ++ if ((ret == SBI_ETRAP) && (extension_id != SBI_EXT_PENGLAI)) { + trap.epc = regs->mepc; + sbi_trap_redirect(regs, &trap); + } else { +- if (ret < SBI_LAST_ERR) { ++ if ((ret < SBI_LAST_ERR) && (extension_id != SBI_EXT_PENGLAI)) { + sbi_printf("%s: Invalid error %d for ext=0x%lx " + "func=0x%lx\n", __func__, ret, + extension_id, func_id); +@@ -140,10 +209,25 @@ int sbi_ecall_handler(struct sbi_trap_regs *regs) + * accordingly for now. Once fatal errors are defined, that + * case should be handled differently. + */ +- regs->mepc += 4; +- regs->a0 = ret; +- if (!is_0_1_spec) +- regs->a1 = out_val; ++ if (extension_id != SBI_EXT_PENGLAI) ++ { ++ regs->mepc += 4; ++ regs->a0 = ret; ++ if (!is_0_1_spec) ++ regs->a1 = out_val; ++ } ++ else ++ { ++ regs->a0 = out_val; ++ if (!cpu_in_enclave(csr_read(CSR_MHARTID))) ++ { ++ if ((out_val >= 0UL) && (out_val <= SBI_LEGAL_MAX)) ++ { ++ regs->a0 = SBI_OK; ++ regs->a1 = out_val; ++ } ++ } ++ } + } + + return 0; +@@ -173,6 +257,9 @@ int sbi_ecall_init(void) + if (ret) + return ret; + ret = sbi_ecall_register_extension(&ecall_vendor); ++ if (ret) ++ return ret; ++ ret = sbi_ecall_register_extension(&ecall_pengali); + if (ret) + return ret; + +diff --git a/lib/sbi/sbi_ecall_penglai.c b/lib/sbi/sbi_ecall_penglai.c +new file mode 100644 +index 0000000..9f97cfa +--- /dev/null ++++ b/lib/sbi/sbi_ecall_penglai.c +@@ -0,0 +1,98 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2020 Western Digital Corporation or its affiliates. ++ * ++ * Authors: ++ * Anup Patel ++ * Atish Patra ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "sm/sm.h" ++ ++ ++static int sbi_ecall_penglai_handler(unsigned long extid, unsigned long funcid, ++ unsigned long *args, unsigned long *out_val, ++ struct sbi_trap_info *out_trap) ++{ ++ uintptr_t arg0 = args[10], arg1 = args[11], arg2 = args[12], arg3 = args[13], retval; ++ csr_write(CSR_MEPC, args[32] + 4); ++ switch (funcid) { ++ case SBI_SET_PTE: ++ retval = sm_set_pte(arg0, (uintptr_t*)arg1, arg2, arg3); ++ break; ++ case SBI_SM_INIT: ++ retval = sm_sm_init(arg0, arg1, arg2, arg3); ++ break; ++ case SBI_SM_PT_AREA_SEPARATION: ++ retval = sm_pt_area_separation(arg0, arg1); ++ break; ++ case SBI_SM_SPLIT_HUGE_PAGE: ++ retval = sm_split_huge_page(arg0, arg1, arg2); ++ break; ++ case SBI_SM_MAP_PTE: ++ retval = sm_map_pte((uintptr_t *)arg0, (uintptr_t *)arg1); ++ break; ++ case SBI_MEMORY_EXTEND: ++ retval = sm_mm_extend(arg0, arg1); ++ break; ++ case SBI_CREATE_ENCLAVE: ++ retval = sm_create_enclave(arg0); ++ break; ++ case SBI_RUN_ENCLAVE: ++ retval = sm_run_enclave(args, arg0, arg1, arg2); ++ break; ++ case SBI_STOP_ENCLAVE: ++ retval = sm_stop_enclave(args, arg0); ++ break; ++ case SBI_RESUME_ENCLAVE: ++ retval = sm_resume_enclave(args, arg0); ++ break; ++ case SBI_DESTROY_ENCLAVE: ++ retval = sm_destroy_enclave(args, arg0); ++ break; ++ case SBI_ATTEST_ENCLAVE: ++ retval = sm_attest_enclave(arg0, arg1, arg2); ++ break; ++ case SBI_CREATE_SERVER_ENCLAVE: ++ retval = sm_create_server_enclave(arg0); ++ break; ++ case SBI_CREATE_SHADOW_ENCLAVE: ++ retval = sm_create_shadow_enclave(arg0); ++ break; ++ case SBI_RUN_SHADOW_ENCLAVE: ++ retval = sm_run_shadow_enclave(args, arg0, arg1, arg2, arg3); ++ break; ++ case SBI_ATTEST_SHADOW_ENCLAVE: ++ retval = sm_attest_shadow_enclave(arg0, arg1, arg2); ++ break; ++ case SBI_DESTROY_SERVER_ENCLAVE: ++ retval = sm_destroy_server_enclave(args, arg0); ++ break; ++ case SBI_SCHRODINGER_INIT: ++ retval = sm_schrodinger_init(arg0, arg1); ++ break; ++ case 84: ++ retval = sm_print(arg0, arg1); ++ break; ++ ++ default: ++ retval = SBI_ENOTSUPP; ++ } ++ args[32] = csr_read(CSR_MEPC); ++ args[33] = csr_read(CSR_MSTATUS); ++ *out_val = retval; ++ return retval; ++} ++ ++struct sbi_ecall_extension ecall_pengali = { ++ .extid_start = SBI_EXT_PENGLAI, ++ .extid_end = SBI_EXT_PENGLAI, ++ .handle = sbi_ecall_penglai_handler, ++}; +diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c +index fa20bd2..af93fd6 100644 +--- a/lib/sbi/sbi_hart.c ++++ b/lib/sbi/sbi_hart.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + extern void __sbi_expected_trap(void); + extern void __sbi_expected_trap_hext(void); +@@ -440,7 +441,9 @@ void __attribute__((noreturn)) sbi_hart_hang(void) + wfi(); + __builtin_unreachable(); + } +- ++/* ++ * Switch the m mode into the s mode ++ */ + void __attribute__((noreturn)) + sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1, + unsigned long next_addr, unsigned long next_mode, +@@ -500,7 +503,9 @@ sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1, + csr_write(CSR_USCRATCH, 0); + csr_write(CSR_UIE, 0); + } +- ++ //TODO: ++ //Set the pmp register to protect the monitor itself ++ sm_init(); + register unsigned long a0 asm("a0") = arg0; + register unsigned long a1 asm("a1") = arg1; + __asm__ __volatile__("mret" : : "r"(a0), "r"(a1)); +diff --git a/lib/sbi/sbi_illegal_insn.c b/lib/sbi/sbi_illegal_insn.c +index 0e5523f..2a8f810 100644 +--- a/lib/sbi/sbi_illegal_insn.c ++++ b/lib/sbi/sbi_illegal_insn.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + typedef int (*illegal_insn_func)(ulong insn, struct sbi_trap_regs *regs); + +@@ -114,9 +115,67 @@ static illegal_insn_func illegal_insn_table[32] = { + truly_illegal_insn /* 31 */ + }; + ++ ++extern uintptr_t pt_area_base; ++extern uintptr_t pt_area_size; ++extern uintptr_t mbitmap_base; ++extern uintptr_t mbitmap_size; ++extern uintptr_t pgd_order; ++extern uintptr_t pmd_order; ++ + int sbi_illegal_insn_handler(ulong insn, struct sbi_trap_regs *regs) + { + struct sbi_trap_info uptrap; ++ struct sbi_trap_info uptrap2; ++ ulong inst; ++ if (insn == 0) ++ inst = sbi_get_insn(regs->mepc, &uptrap2); ++ else ++ inst = insn; ++ ++ // Emulate the TVM ++ unsigned long mepc = regs->mepc; ++ /* Case1: write sptbr trapped by TVM */ ++ if ((((inst>>20) & 0xfff) == 0x180) ++ &&((inst & 0x7f) == 0b1110011) ++ && (((inst>>12) & 0x3) == 0b001)) ++ { ++ // printm("here0 %d\r\n",((inst>>15) & 0x1f)); ++ signed long val = *((unsigned long *)regs + ((inst>>15) & 0x1f)); ++ unsigned long pa = (val & 0x3fffff)<<12; ++ if((pt_area_base < pa) && ((pt_area_base + (1< pa)) ++ { ++ asm volatile ("csrrw x0, sptbr, %0":: "rK"(val)); ++ csr_write(CSR_MEPC, mepc + 4); ++ regs->mepc = csr_read(CSR_MEPC); ++ return 0 ; ++ } ++ } ++ /* Case2: read sptbr trapped by TVM */ ++ if((((inst>>20) & 0xfff) == 0x180) ++ &&((inst & 0x7f) == 0b1110011) ++ && (((inst>>12) & 0x3) == 0b010)) ++ { ++ // printm("here3 %d\r\n",((inst>>7) & 0x1f)); ++ int idx = ((inst>>7) & 0x1f); ++ unsigned long __tmp; ++ asm volatile ("csrrs %0, sptbr, x0":"=r"(__tmp)); ++ csr_write(CSR_MEPC, mepc + 4); ++ *((unsigned long *)regs + idx) = __tmp; ++ regs->mepc = csr_read(CSR_MEPC); ++ return 0 ; ++ } ++ /* Case3: sfence.vma trapped by TVM */ ++ if((((inst>>25) & 0x7f) == 0b0001001) ++ &&((inst & 0x7fff) == 0b1110011)) ++ { ++ // printm("here5 %d\r\n",((inst>>7) & 0x1f)); ++ asm volatile ("sfence.vma"); ++ csr_write(CSR_MEPC, mepc + 4); ++ regs->mepc = csr_read(CSR_MEPC); ++ return 0 ; ++ } ++ // End of the TVM trap handler + + if (unlikely((insn & 3) != 3)) { + if (insn == 0) { +diff --git a/lib/sbi/sbi_init.c b/lib/sbi/sbi_init.c +index a7fb848..3dab863 100644 +--- a/lib/sbi/sbi_init.c ++++ b/lib/sbi/sbi_init.c +@@ -21,6 +21,9 @@ + #include + #include + #include ++#include ++#include ++#include + #include + + #define BANNER \ +@@ -194,6 +197,18 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid) + rc = sbi_tlb_init(scratch, TRUE); + if (rc) + sbi_hart_hang(); ++ ++ rc = sbi_pmp_init(scratch, TRUE); ++ if (rc) ++ sbi_hart_hang(); ++ ++ rc = sbi_tvm_init(scratch, TRUE); ++ if (rc) ++ sbi_hart_hang(); ++ ++ rc = sbi_ipi_destroy_enclave_init(scratch, TRUE); ++ if (rc) ++ sbi_hart_hang(); + + rc = sbi_timer_init(scratch, TRUE); + if (rc) +@@ -254,6 +269,18 @@ static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid) + rc = sbi_tlb_init(scratch, FALSE); + if (rc) + sbi_hart_hang(); ++ ++ rc = sbi_pmp_init(scratch, FALSE); ++ if (rc) ++ sbi_hart_hang(); ++ ++ rc = sbi_tvm_init(scratch, FALSE); ++ if (rc) ++ sbi_hart_hang(); ++ ++ rc = sbi_ipi_destroy_enclave_init(scratch, FALSE); ++ if (rc) ++ sbi_hart_hang(); + + rc = sbi_timer_init(scratch, FALSE); + if (rc) +diff --git a/lib/sbi/sbi_ipi.c b/lib/sbi/sbi_ipi.c +index a27dea0..c9acc6d 100644 +--- a/lib/sbi/sbi_ipi.c ++++ b/lib/sbi/sbi_ipi.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + + struct sbi_ipi_data { + unsigned long ipi_type; +@@ -199,6 +200,35 @@ skip: + }; + } + ++void sbi_ipi_process_in_enclave(struct sbi_trap_regs* regs) ++{ ++ unsigned long ipi_type; ++ unsigned int ipi_event; ++ const struct sbi_ipi_event_ops *ipi_ops; ++ struct sbi_scratch *scratch = sbi_scratch_thishart_ptr(); ++ const struct sbi_platform *plat = sbi_platform_ptr(scratch); ++ struct sbi_ipi_data *ipi_data = ++ sbi_scratch_offset_ptr(scratch, ipi_data_off); ++ ++ u32 hartid = current_hartid(); ++ sbi_platform_ipi_clear(plat, hartid); ++ ++ ipi_type = atomic_raw_xchg_ulong(&ipi_data->ipi_type, 0); ++ ipi_event = 0; ++ while (ipi_type) { ++ if (!(ipi_type & 1UL)) ++ goto skip; ++ ++ ipi_ops = ipi_ops_array[ipi_event]; ++ if (ipi_ops && ipi_ops->e_process) ++ ipi_ops->e_process(scratch, regs); ++ ++skip: ++ ipi_type = ipi_type >> 1; ++ ipi_event++; ++ }; ++} ++ + int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot) + { + int ret; +diff --git a/lib/sbi/sbi_ipi_destroy_enclave.c b/lib/sbi/sbi_ipi_destroy_enclave.c +new file mode 100644 +index 0000000..8a748b3 +--- /dev/null ++++ b/lib/sbi/sbi_ipi_destroy_enclave.c +@@ -0,0 +1,142 @@ ++#include "sbi/sbi_ipi_destroy_enclave.h" ++#include "sm/ipi.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static unsigned long ipi_destroy_enclave_data_offset; ++static unsigned long ipi_destroy_enclave_sync_offset; ++ ++#define SBI_IPI_DESTROY_ENCLAVE_DATA_INIT(__p, __host_ptbr, __enclave_id, __src) \ ++do { \ ++ (__p)->host_ptbr = (__host_ptbr); \ ++ (__p)->enclave_id = (__enclave_id); \ ++ SBI_HARTMASK_INIT_EXCEPT(&(__p)->smask, (__src)); \ ++} while (0) ++ ++void set_ipi_destroy_enclave_and_sync(u32 remote_hart, ulong host_ptbr, int enclave_id) ++{ ++ struct ipi_destroy_enclave_data_t ipi_destroy_enclave_data; ++ u32 source_hart = current_hartid(); ++ ++ //sync all other harts ++ SBI_IPI_DESTROY_ENCLAVE_DATA_INIT(&ipi_destroy_enclave_data, host_ptbr, enclave_id, source_hart); ++ sbi_send_ipi_destroy_enclave((0x1<host_ptbr, data->enclave_id); ++ //sync ++ sbi_hartmask_for_each_hart(rhartid, &data->smask) { ++ rscratch = sbi_hartid_to_scratch(rhartid); ++ if (!rscratch) ++ continue; ++ ipi_destroy_enclave_sync = sbi_scratch_offset_ptr(rscratch, ipi_destroy_enclave_sync_offset); ++ while (atomic_raw_xchg_ulong(ipi_destroy_enclave_sync, 1)); ++ } ++} ++ ++static void sbi_process_ipi_destroy_enclave(struct sbi_scratch *scratch) ++{ ++ sbi_bug("M mode: sbi_process_ipi_destroy_enclave error\n"); ++ return; ++} ++ ++static int sbi_update_ipi_destroy_enclave(struct sbi_scratch *scratch, ++ struct sbi_scratch *remote_scratch, ++ u32 remote_hartid, void *data) ++{ ++ struct ipi_destroy_enclave_data_t *ipi_destroy_enclave_data = NULL; ++ u32 curr_hartid = current_hartid(); ++ ++ if (remote_hartid == curr_hartid) { ++ sbi_bug("M mode: sbi_update_ipi_destroy_enclave: remote_hartid is current hartid\n"); ++ return -1; ++ } ++ ++ ipi_destroy_enclave_data = sbi_scratch_offset_ptr(remote_scratch, ipi_destroy_enclave_data_offset); ++ //update the remote hart ipi_destroy_enclave data ++ sbi_memcpy(ipi_destroy_enclave_data, data, sizeof(struct ipi_destroy_enclave_data_t)); ++ ++ return 0; ++} ++ ++static void sbi_ipi_destroy_enclave_sync(struct sbi_scratch *scratch) ++{ ++ unsigned long *ipi_destroy_enclave_sync = ++ sbi_scratch_offset_ptr(scratch, ipi_destroy_enclave_sync_offset); ++ //wait the remote hart process the ipi_destroy_enclave signal ++ while (!atomic_raw_xchg_ulong(ipi_destroy_enclave_sync, 0)); ++ return; ++} ++ ++static struct sbi_ipi_event_ops ipi_destroy_enclave_ops = { ++ .name = "IPI_DESTROY_ENCLAVE", ++ .update = sbi_update_ipi_destroy_enclave, ++ .sync = sbi_ipi_destroy_enclave_sync, ++ .process = sbi_process_ipi_destroy_enclave, ++ .e_process = sbi_eprocess_ipi_destroy_enclave, ++}; ++ ++static u32 ipi_destroy_enclave_event = SBI_IPI_EVENT_MAX; ++ ++int sbi_send_ipi_destroy_enclave(ulong hmask, ulong hbase, struct ipi_destroy_enclave_data_t* ipi_destroy_enclave_data) ++{ ++ return sbi_ipi_send_many(hmask, hbase, ipi_destroy_enclave_event, ipi_destroy_enclave_data); ++} ++ ++int sbi_ipi_destroy_enclave_init(struct sbi_scratch *scratch, bool cold_boot) ++{ ++ int ret; ++ struct ipi_destroy_enclave_data_t *ipi_destroy_enclave_data; ++ unsigned long *ipi_destroy_enclave_sync; ++ if (cold_boot) { ++ // Define the ipi_destroy_enclave data offset in the scratch ++ ipi_destroy_enclave_data_offset = sbi_scratch_alloc_offset(sizeof(*ipi_destroy_enclave_data), ++ "IPI_DESTROY_ENCLAVE_DATA"); ++ if (!ipi_destroy_enclave_data_offset) ++ return SBI_ENOMEM; ++ ++ ipi_destroy_enclave_sync_offset = sbi_scratch_alloc_offset(sizeof(*ipi_destroy_enclave_sync), ++ "IPI_DESTROY_ENCLAVE_SYNC"); ++ if (!ipi_destroy_enclave_sync_offset) ++ return SBI_ENOMEM; ++ ++ ipi_destroy_enclave_data = sbi_scratch_offset_ptr(scratch, ++ ipi_destroy_enclave_data_offset); ++ ++ ipi_destroy_enclave_sync = sbi_scratch_offset_ptr(scratch, ++ ipi_destroy_enclave_sync_offset); ++ ++ *ipi_destroy_enclave_sync = 0; ++ ++ ret = sbi_ipi_event_create(&ipi_destroy_enclave_ops); ++ if (ret < 0) { ++ sbi_bug("M mode: sbi_ipi_destroy_enclave_init: init5\n"); ++ sbi_scratch_free_offset(ipi_destroy_enclave_data_offset); ++ return ret; ++ } ++ ipi_destroy_enclave_event = ret; ++ } else { ++ } ++ return 0; ++} +\ No newline at end of file +diff --git a/lib/sbi/sbi_pmp.c b/lib/sbi/sbi_pmp.c +new file mode 100644 +index 0000000..e121d0e +--- /dev/null ++++ b/lib/sbi/sbi_pmp.c +@@ -0,0 +1,123 @@ ++#include "sbi/sbi_pmp.h" ++#include "sm/ipi.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static unsigned long pmp_data_offset; ++static unsigned long pmp_sync_offset; ++ ++static void sbi_process_pmp(struct sbi_scratch *scratch) ++{ ++ struct pmp_data_t *data = sbi_scratch_offset_ptr(scratch, pmp_data_offset); ++ struct pmp_config_t pmp_config = *(struct pmp_config_t*)(data); ++ struct sbi_scratch *rscratch = NULL; ++ u32 rhartid; ++ unsigned long *pmp_sync = NULL; ++ int pmp_idx = data->pmp_idx_arg; ++ set_pmp(pmp_idx, pmp_config); ++ ++ //sync ++ sbi_hartmask_for_each_hart(rhartid, &data->smask) { ++ rscratch = sbi_hartid_to_scratch(rhartid); ++ if (!rscratch) ++ continue; ++ pmp_sync = sbi_scratch_offset_ptr(rscratch, pmp_sync_offset); ++ while (atomic_raw_xchg_ulong(pmp_sync, 1)); ++ } ++} ++ ++static int sbi_update_pmp(struct sbi_scratch *scratch, ++ struct sbi_scratch *remote_scratch, ++ u32 remote_hartid, void *data) ++{ ++ struct pmp_data_t *pmp_data = NULL; ++ int pmp_idx = 0; ++ u32 curr_hartid = current_hartid(); ++ ++ if (remote_hartid == curr_hartid) { ++ //update the pmp register locally ++ struct pmp_config_t pmp_config = *(struct pmp_config_t*)(data); ++ pmp_idx = pmp_data->pmp_idx_arg; ++ set_pmp(pmp_idx, pmp_config); ++ return -1; ++ } ++ ++ pmp_data = sbi_scratch_offset_ptr(remote_scratch, pmp_data_offset); ++ //update the remote hart pmp data ++ sbi_memcpy(pmp_data, data, sizeof(struct pmp_data_t)); ++ ++ return 0; ++} ++ ++static void sbi_pmp_sync(struct sbi_scratch *scratch) ++{ ++ unsigned long *pmp_sync = ++ sbi_scratch_offset_ptr(scratch, pmp_sync_offset); ++ //wait the remote hart process the pmp signal ++ while (!atomic_raw_xchg_ulong(pmp_sync, 0)); ++ return; ++} ++ ++static struct sbi_ipi_event_ops pmp_ops = { ++ .name = "IPI_PMP", ++ .update = sbi_update_pmp, ++ .sync = sbi_pmp_sync, ++ .process = sbi_process_pmp, ++}; ++ ++static u32 pmp_event = SBI_IPI_EVENT_MAX; ++ ++int sbi_send_pmp(ulong hmask, ulong hbase, struct pmp_data_t* pmp_data) ++{ ++ return sbi_ipi_send_many(hmask, hbase, pmp_event, pmp_data); ++} ++ ++int sbi_pmp_init(struct sbi_scratch *scratch, bool cold_boot) ++{ ++ int ret; ++ struct pmp_data_t *pmpdata; ++ unsigned long *pmp_sync; ++ ++ if (cold_boot) { ++ //Define the pmp data offset in the scratch ++ pmp_data_offset = sbi_scratch_alloc_offset(sizeof(*pmpdata), ++ "PMP_DATA"); ++ if (!pmp_data_offset) ++ return SBI_ENOMEM; ++ ++ pmp_sync_offset = sbi_scratch_alloc_offset(sizeof(*pmp_sync), ++ "PMP_SYNC"); ++ if (!pmp_sync_offset) ++ return SBI_ENOMEM; ++ ++ pmpdata = sbi_scratch_offset_ptr(scratch, ++ pmp_data_offset); ++ ++ pmp_sync = sbi_scratch_offset_ptr(scratch, ++ pmp_sync_offset); ++ ++ *pmp_sync = 0; ++ ++ ret = sbi_ipi_event_create(&pmp_ops); ++ if (ret < 0) { ++ sbi_scratch_free_offset(pmp_data_offset); ++ return ret; ++ } ++ pmp_event = ret; ++ } else { ++ } ++ ++ return 0; ++} +\ No newline at end of file +diff --git a/lib/sbi/sbi_trap.c b/lib/sbi/sbi_trap.c +index 930119d..838e537 100644 +--- a/lib/sbi/sbi_trap.c ++++ b/lib/sbi/sbi_trap.c +@@ -18,8 +18,13 @@ + #include + #include + #include ++#include ++#include ++#define read_csr(reg) ({ unsigned long __tmp; \ ++ asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \ ++ __tmp; }) + +-static void __noreturn sbi_trap_error(const char *msg, int rc, ++static void sbi_trap_error(const char *msg, int rc, + ulong mcause, ulong mtval, ulong mtval2, + ulong mtinst, struct sbi_trap_regs *regs) + { +@@ -68,7 +73,15 @@ static void __noreturn sbi_trap_error(const char *msg, int rc, + sbi_printf("%s: hart%d: %s=0x%" PRILX "\n", __func__, hartid, "t6", + regs->t6); + ++ if(check_in_enclave_world() == 0) ++ { ++ destroy_enclave((uintptr_t *)regs, get_curr_enclave_id()); ++ regs->mepc = csr_read(CSR_MEPC); ++ regs->mstatus = csr_read(CSR_MSTATUS); ++ return; ++ } + sbi_hart_hang(); ++ return; + } + + /** +@@ -82,6 +95,10 @@ static void __noreturn sbi_trap_error(const char *msg, int rc, + int sbi_trap_redirect(struct sbi_trap_regs *regs, + struct sbi_trap_info *trap) + { ++ sbi_printf("SBI: sbi_trap_redirect casuse %lx\n", trap->cause); ++ return 1; ++ ++ + ulong hstatus, vsstatus, prev_mode; + #if __riscv_xlen == 32 + bool prev_virt = (regs->mstatusH & MSTATUSH_MPV) ? TRUE : FALSE; +@@ -193,6 +210,18 @@ int sbi_trap_redirect(struct sbi_trap_regs *regs, + return 0; + } + ++void handle_timer_irq(struct sbi_trap_regs *regs, uintptr_t mcause, uintptr_t mepc) ++{ ++ ++ if(check_in_enclave_world() < 0) ++ { ++ csr_read_clear(CSR_MIE, MIP_MTIP); ++ csr_read_set(CSR_MIP, MIP_STIP); ++ return; ++ } ++ sm_do_timer_irq((uintptr_t *)regs, mcause, mepc); ++} ++ + /** + * Handle trap/interrupt + * +@@ -214,9 +243,10 @@ void sbi_trap_handler(struct sbi_trap_regs *regs) + int rc = SBI_ENOTSUPP; + const char *msg = "trap handler failed"; + ulong mcause = csr_read(CSR_MCAUSE); ++ ulong mepc = csr_read(CSR_MEPC); + ulong mtval = csr_read(CSR_MTVAL), mtval2 = 0, mtinst = 0; + struct sbi_trap_info trap; +- ++ + if (misa_extension('H')) { + mtval2 = csr_read(CSR_MTVAL2); + mtinst = csr_read(CSR_MTINST); +@@ -226,10 +256,27 @@ void sbi_trap_handler(struct sbi_trap_regs *regs) + mcause &= ~(1UL << (__riscv_xlen - 1)); + switch (mcause) { + case IRQ_M_TIMER: +- sbi_timer_process(); ++ if (check_in_enclave_world() == 0) ++ { ++ handle_timer_irq(regs, mcause, mepc); ++ regs->mepc = csr_read(CSR_MEPC); ++ regs->mstatus = csr_read(CSR_MSTATUS); ++ } ++ else ++ sbi_timer_process(); + break; + case IRQ_M_SOFT: +- sbi_ipi_process(); ++ if(check_in_enclave_world() < 0) ++ { ++ sbi_ipi_process(); ++ } ++ else ++ { ++ //TODO: just consider the ipi for destroying the enclave ++ sbi_ipi_process_in_enclave(regs); ++ regs->mepc = csr_read(CSR_MEPC); ++ regs->mstatus = csr_read(CSR_MSTATUS); ++ } + break; + default: + msg = "unhandled external interrupt"; +@@ -251,10 +298,17 @@ void sbi_trap_handler(struct sbi_trap_regs *regs) + rc = sbi_misaligned_store_handler(mtval, mtval2, mtinst, regs); + msg = "misaligned store handler failed"; + break; ++ case CAUSE_USER_ECALL: ++ rc = enclave_call_trap(regs); ++ msg = "ecall handler failed"; ++ break; + case CAUSE_SUPERVISOR_ECALL: ++ rc = 1; ++ msg = "supervisor ecall trap failed"; ++ break; + case CAUSE_HYPERVISOR_ECALL: + rc = sbi_ecall_handler(regs); +- msg = "ecall handler failed"; ++ msg = "enclave call trap failed"; + break; + default: + /* If the trap came from S or U mode, redirect it there */ +@@ -266,7 +320,7 @@ void sbi_trap_handler(struct sbi_trap_regs *regs) + rc = sbi_trap_redirect(regs, &trap); + break; + }; +- ++ + trap_error: + if (rc) + sbi_trap_error(msg, rc, mcause, mtval, mtval2, mtinst, regs); +diff --git a/lib/sbi/sbi_tvm.c b/lib/sbi/sbi_tvm.c +new file mode 100644 +index 0000000..41ef614 +--- /dev/null ++++ b/lib/sbi/sbi_tvm.c +@@ -0,0 +1,141 @@ ++#include "sbi/sbi_tvm.h" ++#include "sm/ipi.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static unsigned long tvm_data_offset; ++static unsigned long tvm_sync_offset; ++ ++#define SBI_TVM_DATA_INIT(__p, __src) \ ++do { \ ++ SBI_HARTMASK_INIT_EXCEPT(&(__p)->smask, (__src)); \ ++} while (0) ++ ++void set_tvm_and_sync() ++{ ++ struct tvm_data_t tvm_data; ++ u32 source_hart = current_hartid(); ++ ++ //sync all other harts ++ SBI_TVM_DATA_INIT(&tvm_data, source_hart); ++ sbi_send_tvm(0xFFFFFFFF&(~(1<smask) { ++ rscratch = sbi_hartid_to_scratch(rhartid); ++ if (!rscratch) ++ continue; ++ tvm_sync = sbi_scratch_offset_ptr(rscratch, tvm_sync_offset); ++ while (atomic_raw_xchg_ulong(tvm_sync, 1)); ++ } ++} ++ ++static int sbi_update_tvm(struct sbi_scratch *scratch, ++ struct sbi_scratch *remote_scratch, ++ u32 remote_hartid, void *data) ++{ ++ struct tvm_data_t *tvm_data = NULL; ++ u32 curr_hartid = current_hartid(); ++ ++ if (remote_hartid == curr_hartid) { ++ // update the tvm register locally ++ uintptr_t mstatus = csr_read(CSR_MSTATUS); ++ /* Enable TVM here */ ++ mstatus = INSERT_FIELD(mstatus, MSTATUS_TVM, 1); ++ csr_write(CSR_MSTATUS, mstatus); ++ return -1; ++ } ++ ++ tvm_data = sbi_scratch_offset_ptr(remote_scratch, tvm_data_offset); ++ //update the remote hart tvm data ++ sbi_memcpy(tvm_data, data, sizeof(struct tvm_data_t)); ++ ++ return 0; ++} ++ ++static void sbi_tvm_sync(struct sbi_scratch *scratch) ++{ ++ unsigned long *tvm_sync = ++ sbi_scratch_offset_ptr(scratch, tvm_sync_offset); ++ //wait the remote hart process the tvm signal ++ while (!atomic_raw_xchg_ulong(tvm_sync, 0)); ++ return; ++} ++ ++static struct sbi_ipi_event_ops tvm_ops = { ++ .name = "IPI_TVM", ++ .update = sbi_update_tvm, ++ .sync = sbi_tvm_sync, ++ .process = sbi_process_tvm, ++}; ++ ++static u32 tvm_event = SBI_IPI_EVENT_MAX; ++ ++int sbi_send_tvm(ulong hmask, ulong hbase, struct tvm_data_t* tvm_data) ++{ ++ return sbi_ipi_send_many(hmask, hbase, tvm_event, tvm_data); ++} ++ ++int sbi_tvm_init(struct sbi_scratch *scratch, bool cold_boot) ++{ ++ int ret; ++ struct tvm_data_t *tvmdata; ++ unsigned long *tvm_sync; ++ ++ if (cold_boot) { ++ //Define the tvm data offset in the scratch ++ tvm_data_offset = sbi_scratch_alloc_offset(sizeof(*tvmdata), ++ "TVM_DATA"); ++ if (!tvm_data_offset) ++ return SBI_ENOMEM; ++ ++ tvm_sync_offset = sbi_scratch_alloc_offset(sizeof(*tvm_sync), ++ "TVM_SYNC"); ++ if (!tvm_sync_offset) ++ return SBI_ENOMEM; ++ ++ tvmdata = sbi_scratch_offset_ptr(scratch, ++ tvm_data_offset); ++ ++ tvm_sync = sbi_scratch_offset_ptr(scratch, ++ tvm_sync_offset); ++ ++ *tvm_sync = 0; ++ ++ ret = sbi_ipi_event_create(&tvm_ops); ++ if (ret < 0) { ++ sbi_scratch_free_offset(tvm_data_offset); ++ return ret; ++ } ++ tvm_event = ret; ++ } else { ++ } ++ ++ return 0; ++} +\ No newline at end of file +diff --git a/lib/sbi/sm/.gitignore b/lib/sbi/sm/.gitignore +new file mode 100644 +index 0000000..751553b +--- /dev/null ++++ b/lib/sbi/sm/.gitignore +@@ -0,0 +1 @@ ++*.bak +diff --git a/lib/sbi/sm/attest.c b/lib/sbi/sm/attest.c +new file mode 100644 +index 0000000..6dbe703 +--- /dev/null ++++ b/lib/sbi/sm/attest.c +@@ -0,0 +1,124 @@ ++#include "sm/attest.h" ++#include "sm/gm/sm3.h" ++#include "sm/gm/sm2.h" ++#include "sbi/riscv_encoding.h" ++ ++static int hash_enclave_mem(struct sm3_context *hash_ctx, pte_t* ptes, int level, uintptr_t va, int hash_va) ++{ ++ uintptr_t pte_per_page = RISCV_PGSIZE/sizeof(pte_t); ++ pte_t *pte; ++ uintptr_t i = 0; ++ int hash_curr_va = hash_va; ++ ++ //should never happen ++ if(level <= 0) ++ return 1; ++ ++ for(pte = ptes, i = 0; i < pte_per_page; pte += 1, i += 1) ++ { ++ if(!(*pte & PTE_V)) ++ { ++ hash_curr_va = 1; ++ continue; ++ } ++ ++ uintptr_t curr_va = 0; ++ if(level == ((VA_BITS - RISCV_PGSHIFT) / RISCV_PGLEVEL_BITS)) ++ curr_va = (uintptr_t)(-1UL << VA_BITS) + (i << (VA_BITS - RISCV_PGLEVEL_BITS)); ++ else ++ curr_va = va + (i << ((level-1) * RISCV_PGLEVEL_BITS + RISCV_PGSHIFT)); ++ uintptr_t pa = (*pte >> PTE_PPN_SHIFT) << RISCV_PGSHIFT; ++ ++ //found leaf pte ++ if((*pte & PTE_R) || (*pte & PTE_X)) ++ { ++ if(hash_curr_va) ++ { ++ sm3_update(hash_ctx, (unsigned char*)&curr_va, sizeof(uintptr_t)); ++ //update hash with page attribution ++ sm3_update(hash_ctx, (unsigned char*)pte+7, 1); ++ hash_curr_va = 0; ++ } ++ ++ //4K page ++ if(level == 1) ++ { ++ sm3_update(hash_ctx, (void*)pa, 1 << RISCV_PGSHIFT); ++ } ++ //2M page ++ else if(level == 2) ++ { ++ sm3_update(hash_ctx, (void*)pa, 1 << (RISCV_PGSHIFT + RISCV_PGLEVEL_BITS)); ++ } ++ } ++ else ++ { ++ hash_curr_va = hash_enclave_mem(hash_ctx, (pte_t*)pa, level - 1, curr_va, hash_curr_va); ++ } ++ } ++ ++ return hash_curr_va; ++} ++ ++void hash_enclave(struct enclave_t *enclave, void* hash, uintptr_t nonce_arg) ++{ ++ struct sm3_context hash_ctx; ++ uintptr_t nonce = nonce_arg; ++ ++ sm3_init(&hash_ctx); ++ ++ sm3_update(&hash_ctx, (unsigned char*)(&(enclave->entry_point)), sizeof(unsigned long)); ++ ++ hash_enclave_mem(&hash_ctx, (pte_t*)(enclave->thread_context.encl_ptbr << RISCV_PGSHIFT), ++ (VA_BITS - RISCV_PGSHIFT) / RISCV_PGLEVEL_BITS, 0, 1); ++ ++ sm3_update(&hash_ctx, (unsigned char*)(&nonce), sizeof(uintptr_t)); ++ ++ sm3_final(&hash_ctx, hash); ++} ++ ++void hash_shadow_enclave(struct shadow_enclave_t *enclave, void* hash, uintptr_t nonce_arg) ++{ ++ struct sm3_context hash_ctx; ++ uintptr_t nonce = nonce_arg; ++ ++ sm3_init(&hash_ctx); ++ ++ sm3_update(&hash_ctx, (unsigned char*)(&(enclave->entry_point)), sizeof(unsigned long)); ++ ++ hash_enclave_mem(&hash_ctx, (pte_t*)(enclave->thread_context.encl_ptbr << RISCV_PGSHIFT), ++ (VA_BITS - RISCV_PGSHIFT) / RISCV_PGLEVEL_BITS, 0, 1); ++ ++ sm3_update(&hash_ctx, (unsigned char*)(&nonce), sizeof(uintptr_t)); ++ ++ sm3_final(&hash_ctx, hash); ++} ++ ++void update_hash_shadow_enclave(struct shadow_enclave_t *enclave, void* hash, uintptr_t nonce_arg) ++{ ++ struct sm3_context hash_ctx; ++ uintptr_t nonce = nonce_arg; ++ ++ sm3_init(&hash_ctx); ++ ++ sm3_update(&hash_ctx, (unsigned char*)(hash), HASH_SIZE); ++ ++ sm3_update(&hash_ctx, (unsigned char*)(&nonce), sizeof(uintptr_t)); ++ ++ sm3_final(&hash_ctx, hash); ++} ++void sign_enclave(void* signature_arg, void* hash) ++{ ++ struct signature_t *signature = (struct signature_t*)signature_arg; ++ sm2_sign((void*)(signature->r), (void*)(signature->s), (void*)SM_PRI_KEY, hash); ++} ++ ++int verify_enclave(void* signature_arg, void* hash) ++{ ++ int ret = 0; ++ struct signature_t *signature = (struct signature_t*)signature_arg; ++ ++ ret = sm2_verify((void*)SM_PUB_KEY, hash, (void*)(signature->r), (void*)(signature->s)); ++ ++ return ret; ++} +diff --git a/lib/sbi/sm/enclave.c b/lib/sbi/sm/enclave.c +new file mode 100644 +index 0000000..01940e0 +--- /dev/null ++++ b/lib/sbi/sm/enclave.c +@@ -0,0 +1,2725 @@ ++#include "sbi/riscv_encoding.h" ++#include "sbi/sbi_math.h" ++#include "sbi/riscv_locks.h" ++#include "sbi/sbi_bitops.h" ++#include "sbi/sbi_ipi_destroy_enclave.h" ++#include "sbi/sbi_console.h" ++#include "sm/enclave.h" ++#include "sm/enclave_vm.h" ++#include "sm/enclave_mm.h" ++#include "sm/sm.h" ++#include "sm/platform/pt_area/platform_thread.h" ++#include "sm/ipi.h" ++#include "sm/relay_page.h" ++#include "sm/attest.h" ++#include ++ ++int eapp_args = 0; ++ ++static struct cpu_state_t cpus[MAX_HARTS] = {{0,}, }; ++ ++//whether cpu is in enclave-mode ++int cpu_in_enclave(int i) ++{ ++ return cpus[i].in_enclave; ++} ++ ++//the eid of current cpu (if it is in enclave mode) ++int cpu_eid(int i) ++{ ++ return cpus[i].eid; ++} ++ ++//spinlock ++static spinlock_t enclave_metadata_lock = SPINLOCK_INIT; ++void acquire_enclave_metadata_lock() ++{ ++ spin_lock(&enclave_metadata_lock); ++} ++void release_enclave_metadata_lock() ++{ ++ spin_unlock(&enclave_metadata_lock); ++} ++ ++//enclave metadata ++struct link_mem_t* enclave_metadata_head = NULL; ++struct link_mem_t* enclave_metadata_tail = NULL; ++ ++struct link_mem_t* shadow_enclave_metadata_head = NULL; ++struct link_mem_t* shadow_enclave_metadata_tail = NULL; ++ ++struct link_mem_t* relay_page_head = NULL; ++struct link_mem_t* relay_page_tail = NULL; ++ ++/** ++ * \brief Compare the enclave name. ++ * ++ * \param name1 The given enclave name1. ++ * \param name2 The given enclave name2. ++ */ ++static int enclave_name_cmp(char* name1, char* name2) ++{ ++ for(int i=0; imem_size = resp_size; ++ head->slab_size = slab_size; ++ head->slab_num = (resp_size - sizeof(struct link_mem_t)) / slab_size; ++ void* align_addr = (char*)head + sizeof(struct link_mem_t); ++ head->addr = (char*)size_up_align((unsigned long)align_addr, slab_size); ++ head->next_link_mem = NULL; ++ ++ return head; ++} ++ ++/** ++ * \brief Create a new link_mem_t entry and append it into tail. ++ * ++ * \param tail Return value, The tail of the link memory. ++ */ ++struct link_mem_t* add_link_mem(struct link_mem_t** tail) ++{ ++ struct link_mem_t* new_link_mem; ++ unsigned long resp_size = 0; ++ ++ new_link_mem = (struct link_mem_t*)mm_alloc((*tail)->mem_size, &resp_size); ++ ++ if (new_link_mem == NULL) ++ return NULL; ++ else ++ sbi_memset((void*)new_link_mem, 0, resp_size); ++ ++ if(resp_size <= sizeof(struct link_mem_t) + (*tail)->slab_size) ++ { ++ mm_free(new_link_mem, resp_size); ++ } ++ ++ (*tail)->next_link_mem = new_link_mem; ++ new_link_mem->mem_size = resp_size; ++ new_link_mem->slab_num = (resp_size - sizeof(struct link_mem_t)) / (*tail)->slab_size; ++ new_link_mem->slab_size = (*tail)->slab_size; ++ void* align_addr = (char*)new_link_mem + sizeof(struct link_mem_t); ++ new_link_mem->addr = (char*)size_up_align((unsigned long)align_addr, (*tail)->slab_size); ++ new_link_mem->next_link_mem = NULL; ++ ++ *tail = new_link_mem; ++ ++ return new_link_mem; ++} ++ ++/** ++ * \brief Remove the entry (indicated by ptr) in the head's list. ++ * \param head Head of the link memory. ++ * \param pte The removed link memory ptr. ++ */ ++int remove_link_mem(struct link_mem_t** head, struct link_mem_t* ptr) ++{ ++ struct link_mem_t *cur_link_mem, *tmp_link_mem; ++ int retval =0; ++ ++ cur_link_mem = *head; ++ if (cur_link_mem == ptr) ++ { ++ *head = cur_link_mem->next_link_mem; ++ mm_free(cur_link_mem, cur_link_mem->mem_size); ++ return retval; ++ } ++ ++ for(; cur_link_mem != NULL; cur_link_mem = cur_link_mem->next_link_mem) ++ { ++ if (cur_link_mem->next_link_mem == ptr) ++ { ++ tmp_link_mem = cur_link_mem->next_link_mem; ++ cur_link_mem->next_link_mem = cur_link_mem->next_link_mem->next_link_mem; ++ mm_free(tmp_link_mem, tmp_link_mem->mem_size); ++ return retval; ++ } ++ } ++ ++ return retval; ++} ++ ++/** ++ * \brief alloc an enclave_t structure from encalve_metadata_head. ++ * Eid represents the location in the list. ++ */ ++struct enclave_t* __alloc_enclave() ++{ ++ struct link_mem_t *cur, *next; ++ struct enclave_t* enclave = NULL; ++ int i = 0, found = 0, eid = 0; ++ ++ //enclave metadata list hasn't be initialized yet ++ if(enclave_metadata_head == NULL) ++ { ++ enclave_metadata_head = init_mem_link(ENCLAVE_METADATA_REGION_SIZE, sizeof(struct enclave_t)); ++ if(!enclave_metadata_head) ++ { ++ sbi_printf("M mode: __alloc_enclave: don't have enough mempry\n"); ++ goto alloc_eid_out; ++ } ++ enclave_metadata_tail = enclave_metadata_head; ++ } ++ ++ for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(i = 0; i < (cur->slab_num); i++) ++ { ++ enclave = (struct enclave_t*)(cur->addr) + i; ++ if(enclave->state == INVALID) ++ { ++ sbi_memset((void*)enclave, 0, sizeof(struct enclave_t)); ++ enclave->state = FRESH; ++ enclave->eid = eid; ++ found = 1; ++ break; ++ } ++ eid++; ++ } ++ if(found) ++ break; ++ } ++ ++ //don't have enough enclave metadata ++ if(!found) ++ { ++ next = add_link_mem(&enclave_metadata_tail); ++ if(next == NULL) ++ { ++ sbi_bug("M mode: __alloc_enclave: add new link memory is failed\n"); ++ enclave = NULL; ++ goto alloc_eid_out; ++ } ++ enclave = (struct enclave_t*)(next->addr); ++ sbi_memset((void*)enclave, 0, sizeof(struct enclave_t)); ++ enclave->state = FRESH; ++ enclave->eid = eid; ++ } ++ ++alloc_eid_out: ++ return enclave; ++} ++ ++/** ++ * \brief Free the enclave with the given eid in the enclave list. ++ * ++ * \param eid enclave id, and represents the location in the list. ++ */ ++int __free_enclave(int eid) ++{ ++ struct link_mem_t *cur; ++ struct enclave_t *enclave = NULL; ++ int found=0 , count=0, ret_val=0; ++ ++ for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ if(eid < (count + cur->slab_num)) ++ { ++ enclave = (struct enclave_t*)(cur->addr) + (eid - count); ++ sbi_memset((void*)enclave, 0, sizeof(struct enclave_t)); ++ enclave->state = INVALID; ++ found = 1; ++ ret_val = 0; ++ break; ++ } ++ count += cur->slab_num; ++ } ++ ++ //haven't alloc this eid ++ if(!found) ++ { ++ sbi_bug("M mode: __free_enclave: haven't alloc this eid\n"); ++ ret_val = -1; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * \brief Get the enclave with the given eid. ++ * ++ * \param eid enclave id, and represents the location in the list. ++ */ ++struct enclave_t* __get_enclave(int eid) ++{ ++ struct link_mem_t *cur; ++ struct enclave_t *enclave; ++ int found=0, count=0; ++ ++ for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ if(eid < (count + cur->slab_num)) ++ { ++ enclave = (struct enclave_t*)(cur->addr) + (eid - count); ++ found = 1; ++ break; ++ } ++ ++ count += cur->slab_num; ++ } ++ ++ //haven't alloc this eid ++ if(!found) ++ { ++ sbi_bug("M mode: __get_enclave: haven't alloc this enclave\n"); ++ enclave = NULL; ++ } ++ ++ return enclave; ++} ++ ++/** ++ * \brief Check whether the enclave name is duplicated ++ * return 0 if the enclave name is unique, otherwise ++ * return -1. ++ * ++ * \param enclave_name Checked enclave name. ++ * \param target_eid The target enclave id ++ */ ++int check_enclave_name(char *enclave_name, int target_eid) ++{ ++ struct link_mem_t *cur; ++ struct enclave_t* enclave = NULL; ++ int i=0, eid=0; ++ for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(i = 0; i < (cur->slab_num); i++) ++ { ++ enclave = (struct enclave_t*)(cur->addr) + i; ++ if((enclave->state > INVALID) &&(enclave_name_cmp(enclave_name, enclave->enclave_name)==0) && (target_eid != eid)) ++ { ++ sbi_bug("M mode: check enclave name: enclave name is already existed, enclave name is %s\n", enclave_name); ++ return -1; ++ } ++ eid++; ++ } ++ } ++ return 0; ++} ++ ++/** ++ * \brief Alloc shadow enclave (seid) in the shadow enclave list. ++ */ ++static struct shadow_enclave_t* __alloc_shadow_enclave() ++{ ++ struct link_mem_t *cur, *next; ++ struct shadow_enclave_t* shadow_enclave = NULL; ++ int i=0, found=0, eid=0; ++ ++ //enclave metadata list hasn't be initialized yet ++ if(shadow_enclave_metadata_head == NULL) ++ { ++ shadow_enclave_metadata_head = init_mem_link(SHADOW_ENCLAVE_METADATA_REGION_SIZE, sizeof(struct shadow_enclave_t)); ++ if(!shadow_enclave_metadata_head) ++ { ++ sbi_printf("M mode: __alloc_enclave: don't have enough memory\n"); ++ goto alloc_eid_out; ++ } ++ shadow_enclave_metadata_tail = shadow_enclave_metadata_head; ++ } ++ ++ for(cur = shadow_enclave_metadata_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(i = 0; i < (cur->slab_num); i++) ++ { ++ shadow_enclave = (struct shadow_enclave_t*)(cur->addr) + i; ++ if(shadow_enclave->state == INVALID) ++ { ++ sbi_memset((void*)shadow_enclave, 0, sizeof(struct shadow_enclave_t)); ++ shadow_enclave->state = FRESH; ++ shadow_enclave->eid = eid; ++ found = 1; ++ break; ++ } ++ eid++; ++ } ++ if(found) ++ break; ++ } ++ ++ // don't have enough enclave metadata ++ if(!found) ++ { ++ next = add_link_mem(&shadow_enclave_metadata_tail); ++ if(next == NULL) ++ { ++ sbi_printf("M mode: __alloc_shadow_enclave: don't have enough mem\n"); ++ shadow_enclave = NULL; ++ goto alloc_eid_out; ++ } ++ shadow_enclave = (struct shadow_enclave_t*)(next->addr); ++ sbi_memset((void*)shadow_enclave, 0, sizeof(struct shadow_enclave_t)); ++ shadow_enclave->state = FRESH; ++ shadow_enclave->eid = eid; ++ } ++ ++alloc_eid_out: ++ return shadow_enclave; ++} ++ ++/** ++ * \brief Get the shadow enclave structure with the given eid. ++ * ++ * \param eid the shadow enclave id. ++ */ ++static struct shadow_enclave_t* __get_shadow_enclave(int eid) ++{ ++ struct link_mem_t *cur; ++ struct shadow_enclave_t *shadow_enclave; ++ int found=0, count=0; ++ ++ for(cur = shadow_enclave_metadata_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ if(eid < (count + cur->slab_num)) ++ { ++ shadow_enclave = (struct shadow_enclave_t*)(cur->addr) + (eid - count); ++ found = 1; ++ break; ++ } ++ ++ count += cur->slab_num; ++ } ++ ++ //haven't alloc this eid ++ if(!found) ++ { ++ sbi_bug("M mode: __get_enclave: haven't alloc this shadow_enclave\n"); ++ shadow_enclave = NULL; ++ } ++ ++ return shadow_enclave; ++} ++ ++/** ++ * \brief this function is used to handle IPC in enclave, ++ * it will return the last enclave in the chain. ++ * This is used to help us identify the real executing encalve. ++ * ++ * \param eid The enclave id. ++ */ ++struct enclave_t* __get_real_enclave(int eid) ++{ ++ struct enclave_t* enclave = __get_enclave(eid); ++ if(!enclave) ++ return NULL; ++ ++ struct enclave_t* real_enclave = NULL; ++ if(enclave->cur_callee_eid == -1) ++ real_enclave = enclave; ++ else ++ real_enclave = __get_enclave(enclave->cur_callee_eid); ++ ++ return real_enclave; ++} ++ ++ ++/********************************************/ ++/* Relay Page */ ++/********************************************/ ++ ++/* ++ allocate a new entry in the link memory, if link head is NULL, we initialize the link memory. ++ When the ownership of relay page is changed, we need first destroy the old relay page entry which ++ records the out of data ownership of relay page, and then allocate the new relay page entry with ++ new ownership. ++ ++ Return value: ++ relay_pagfe_entry @ allocate the relay page successfully ++ NULL @ allcate the relay page is failed ++ */ ++ ++/** ++ * \brief Alloc a relay page entry in the relay page list. ++ * ++ * \param enclave_name The enclave name (specified by the user). ++ * \param relay_page_addr The relay page address for the given enclave (enclave_name). ++ * \param relay_page_size The relay page size for the given enclave (enclave_name). ++ */ ++struct relay_page_entry_t* __alloc_relay_page_entry(char *enclave_name, unsigned long relay_page_addr, unsigned long relay_page_size) ++{ ++ struct link_mem_t *cur, *next; ++ struct relay_page_entry_t* relay_page_entry = NULL; ++ int found = 0, link_mem_index = 0; ++ ++ //relay_page_entry metadata list hasn't be initialized yet ++ if(relay_page_head == NULL) ++ { ++ relay_page_head = init_mem_link(sizeof(struct relay_page_entry_t)*ENTRY_PER_RELAY_PAGE_REGION, sizeof(struct relay_page_entry_t)); ++ ++ if(!relay_page_head) ++ goto failed; ++ ++ relay_page_tail = relay_page_head; ++ } ++ ++ //check whether relay page is owned by another enclave ++ for(cur = relay_page_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(int i = 0; i < (cur->slab_num); i++) ++ { ++ relay_page_entry = (struct relay_page_entry_t*)(cur->addr) + i; ++ if(relay_page_entry->addr == relay_page_addr) ++ { ++ sbi_bug("M mode: __alloc_relay_page_entry: the relay page is owned by another enclave\n"); ++ relay_page_entry = (void*)(-1UL); ++ goto failed; ++ } ++ } ++ } ++ //traverse the link memory and check whether there is an empty entry in the link memoy ++ for(cur = relay_page_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(int i = 0; i < (cur->slab_num); i++) ++ { ++ relay_page_entry = (struct relay_page_entry_t*)(cur->addr) + i; ++ //address in the relay page entry remains zero which means the entry is not used ++ if(relay_page_entry->addr == 0) ++ { ++ sbi_memcpy(relay_page_entry->enclave_name, enclave_name, NAME_LEN); ++ relay_page_entry->addr = relay_page_addr; ++ relay_page_entry->size = relay_page_size; ++ found = 1; ++ break; ++ } ++ } ++ if(found) ++ break; ++ link_mem_index = link_mem_index + 1; ++ } ++ ++ //don't have enough memory to allocate a new entry in current link memory, so allocate a new link memory ++ if(!found) ++ { ++ next = add_link_mem(&relay_page_tail); ++ if(next == NULL) ++ { ++ sbi_bug("M mode: __alloc_relay_page_entry: don't have enough mem\n"); ++ relay_page_entry = NULL; ++ goto failed; ++ } ++ relay_page_entry = (struct relay_page_entry_t*)(next->addr); ++ sbi_memcpy(relay_page_entry->enclave_name, enclave_name, NAME_LEN); ++ relay_page_entry->addr = relay_page_addr; ++ relay_page_entry->size = relay_page_size; ++ } ++ ++ return relay_page_entry; ++ ++failed: ++ if(relay_page_entry) ++ sbi_memset((void*)relay_page_entry, 0, sizeof(struct relay_page_entry_t)); ++ ++ return NULL; ++} ++ ++/** ++ * \brief Free the relay page indexed by the the given enclave name. ++ * now we just set the address in the relay page netry to zero ++ * which means this relay page entry is not used. ++ * ++ * return value: ++ * 0 : free the relay_page successfully ++ * -1 : can not find the corresponding relay page ++ * ++ * \param relay_page_addr The relay page address. ++ * \param relay_page_size The relay page size. ++ */ ++int __free_relay_page_entry(unsigned long relay_page_addr, unsigned long relay_page_size) ++{ ++ struct link_mem_t *cur; ++ struct relay_page_entry_t *relay_page_entry = NULL; ++ int found = 0, ret_val = 0; ++ ++ for(cur = relay_page_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(int i = 0; i < (cur->slab_num); i++) ++ { ++ relay_page_entry = (struct relay_page_entry_t*)(cur->addr) + i; ++ //find the corresponding relay page entry by given address and size ++ if((relay_page_entry->addr >= relay_page_addr) && ((relay_page_entry->addr + relay_page_entry->size) <= (relay_page_addr + relay_page_size))) ++ { ++ found = 1; ++ sbi_memset(relay_page_entry->enclave_name, 0, NAME_LEN); ++ relay_page_entry->addr = 0; ++ relay_page_entry->size = 0; ++ } ++ } ++ } ++ //haven't alloc this relay page ++ if(!found) ++ { ++ sbi_bug("M mode: __free_relay_page_entry: relay page [%lx : %lx + %lx]is not existed \n", relay_page_addr, relay_page_addr, relay_page_size); ++ ret_val = -1; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * \brief Retrieve the relay page entry by given the enclave name. ++ * ++ * \param enclave_name: Get the relay page entry with given enclave name. ++ * \param slab_index: Find the corresponding relay page entry and return the slab index in the link memory. ++ * \param link_mem_index: Find the corresponding relay page entry and return the link mem index in the link memory. ++ */ ++struct relay_page_entry_t* __get_relay_page_by_name(char* enclave_name, int *slab_index, int *link_mem_index) ++{ ++ struct link_mem_t *cur; ++ struct relay_page_entry_t *relay_page_entry = NULL; ++ int i, k, found=0; ++ ++ cur = relay_page_head; ++ for (k = 0; k < (*link_mem_index); k++) ++ cur = cur->next_link_mem; ++ ++ i = *slab_index; ++ for(; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(; i < (cur->slab_num); ++i) ++ { ++ relay_page_entry = (struct relay_page_entry_t*)(cur->addr) + i; ++ if((relay_page_entry->addr != 0) && enclave_name_cmp(relay_page_entry->enclave_name, enclave_name)==0) ++ { ++ found = 1; ++ *slab_index = i+1; ++ //check whether slab_index is overflow ++ if ((i+1) >= (cur->slab_num)) ++ { ++ *slab_index = 0; ++ *link_mem_index = (*link_mem_index) + 1; ++ } ++ break; ++ } ++ } ++ if(found) ++ break; ++ *link_mem_index = (*link_mem_index) + 1; ++ i=0; ++ } ++ ++ //haven't alloc this eid ++ if(!found) ++ { ++ sbi_bug("M mode: __get_relay_page_by_name: haven't alloc this enclave:%s\n", enclave_name); ++ return NULL; ++ } ++ ++ return relay_page_entry; ++} ++ ++/** ++ * \brief Change the relay page ownership, delete the old relay page entry in the link memory ++ * and add an entry with new ownership . ++ * If the relay page is not existed, reture error. ++ * ++ * \param relay_page_addr: Relay page address. ++ * \param relay_page_size: Relay page size. ++ * \param enclave_name: The new ownership (specified by the enclave name) for the relay page. ++ */ ++uintptr_t change_relay_page_ownership(unsigned long relay_page_addr, unsigned long relay_page_size, char *enclave_name) ++{ ++ uintptr_t ret_val = 0; ++ if ( __free_relay_page_entry( relay_page_addr, relay_page_size) < 0) ++ { ++ sbi_bug("M mode: change_relay_page_ownership: can not free relay page which needs transfer the ownership\n"); ++ ret_val = -1; ++ return ret_val; ++ } ++ ++ // This relay page entry allocation can not be failed ++ if (__alloc_relay_page_entry(enclave_name, relay_page_addr, relay_page_size) == NULL) ++ { ++ sbi_bug("M mode: change_relay_page_ownership: can not alloc relay page entry, addr is %lx\n", relay_page_addr); ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * \brief Swap states from host to enclaves, e.g., satp, stvec, etc. ++ * it is used when we run/resume enclave/shadow-encalves. ++ * ++ * \param host_regs The host regs ptr. ++ * \param enclave The given enclave. ++ */ ++static int swap_from_host_to_enclave(uintptr_t* host_regs, struct enclave_t* enclave) ++{ ++ //grant encalve access to memory ++ if(grant_enclave_access(enclave) < 0) ++ return -1; ++ ++ //save host context ++ swap_prev_state(&(enclave->thread_context), host_regs); ++ ++ //different platforms have differnt ptbr switch methods ++ switch_to_enclave_ptbr(&(enclave->thread_context), enclave->thread_context.encl_ptbr); ++ ++ //save host trap vector ++ swap_prev_stvec(&(enclave->thread_context), csr_read(CSR_STVEC)); ++ ++ //TODO: save host cache binding ++ //swap_prev_cache_binding(&enclave -> threads[0], csr_read(0x356)); ++ ++ //disable interrupts ++ swap_prev_mie(&(enclave->thread_context), csr_read(CSR_MIE)); ++ csr_read_clear(CSR_MIP, MIP_MTIP); ++ csr_read_clear(CSR_MIP, MIP_STIP); ++ csr_read_clear(CSR_MIP, MIP_SSIP); ++ csr_read_clear(CSR_MIP, MIP_SEIP); ++ ++ //disable interrupts/exceptions delegation ++ swap_prev_mideleg(&(enclave->thread_context), csr_read(CSR_MIDELEG)); ++ swap_prev_medeleg(&(enclave->thread_context), csr_read(CSR_MEDELEG)); ++ ++ //swap the mepc to transfer control to the enclave ++ swap_prev_mepc(&(enclave->thread_context), csr_read(CSR_MEPC)); ++ ++ //set mstatus to transfer control to u mode ++ uintptr_t mstatus = csr_read(CSR_MSTATUS); ++ mstatus = INSERT_FIELD(mstatus, MSTATUS_MPP, PRV_U); ++ csr_write(CSR_MSTATUS, mstatus); ++ ++ //mark that cpu is in enclave world now ++ enter_enclave_world(enclave->eid); ++ ++ __asm__ __volatile__ ("sfence.vma" : : : "memory"); ++ ++ return 0; ++} ++ ++/** ++ * \brief Similiar to swap_from_host_to_enclave. ++ * ++ * \param host_regs The host regs ptr. ++ * \param enclave The given enclave. ++ */ ++static int swap_from_enclave_to_host(uintptr_t* regs, struct enclave_t* enclave) ++{ ++ //retrieve enclave access to memory ++ retrieve_enclave_access(enclave); ++ ++ //restore host context ++ swap_prev_state(&(enclave->thread_context), regs); ++ ++ //restore host's ptbr ++ switch_to_host_ptbr(&(enclave->thread_context), enclave->host_ptbr); ++ ++ //restore host stvec ++ swap_prev_stvec(&(enclave->thread_context), csr_read(CSR_STVEC)); ++ ++ //TODO: restore host cache binding ++ //swap_prev_cache_binding(&(enclave->thread_context), ); ++ ++ //restore interrupts ++ swap_prev_mie(&(enclave->thread_context), csr_read(CSR_MIE)); ++ ++ //restore interrupts/exceptions delegation ++ swap_prev_mideleg(&(enclave->thread_context), csr_read(CSR_MIDELEG)); ++ swap_prev_medeleg(&(enclave->thread_context), csr_read(CSR_MEDELEG)); ++ ++ //transfer control back to kernel ++ swap_prev_mepc(&(enclave->thread_context), csr_read(CSR_MEPC)); ++ ++ //restore mstatus ++ uintptr_t mstatus = csr_read(CSR_MSTATUS); ++ mstatus = INSERT_FIELD(mstatus, MSTATUS_MPP, PRV_S); ++ csr_write(CSR_MSTATUS, mstatus); ++ ++ //mark that cpu is out of enclave world now ++ exit_enclave_world(); ++ ++ __asm__ __volatile__ ("sfence.vma" : : : "memory"); ++ ++ return 0; ++} ++ ++static inline int tlb_remote_sfence() ++{ ++ int ret; ++ struct sbi_tlb_info tlb_info; ++ u32 source_hart = current_hartid(); ++ SBI_TLB_INFO_INIT(&tlb_info, 0, 0, 0, 0, ++ SBI_TLB_FLUSH_VMA, source_hart); ++ ret = sbi_tlb_request(0xFFFFFFFF&(~(1<thread_context.encl_ptbr; ++ sbi_memcpy((void*)(&(callee_enclave->thread_context)), (void*)(&(caller_enclave->thread_context)), sizeof(struct thread_state_t)); ++ callee_enclave->thread_context.encl_ptbr = encl_ptbr; ++ callee_enclave->host_ptbr = caller_enclave->host_ptbr; ++ callee_enclave->ocall_func_id = caller_enclave->ocall_func_id; ++ callee_enclave->ocall_arg0 = caller_enclave->ocall_arg0; ++ callee_enclave->ocall_arg1 = caller_enclave->ocall_arg1; ++ callee_enclave->ocall_syscall_num = caller_enclave->ocall_syscall_num; ++ ++ //save caller's enclave context on its prev_state ++ swap_prev_state(&(caller_enclave->thread_context), regs); ++ caller_enclave->thread_context.prev_stvec = csr_read(CSR_STVEC); ++ caller_enclave->thread_context.prev_mie = csr_read(CSR_MIE); ++ caller_enclave->thread_context.prev_mideleg = csr_read(CSR_MIDELEG); ++ caller_enclave->thread_context.prev_medeleg = csr_read(CSR_MEDELEG); ++ caller_enclave->thread_context.prev_mepc = csr_read(CSR_MEPC); ++ ++ //clear callee's enclave context ++ sbi_memset((void*)regs, 0, sizeof(struct general_registers_t)); ++ ++ //different platforms have differnt ptbr switch methods ++ switch_to_enclave_ptbr(&(callee_enclave->thread_context), callee_enclave->thread_context.encl_ptbr); ++ ++ //callee use caller's stvec ++ ++ //callee use caller's cache binding ++ ++ //callee use caller's mie/mip ++ csr_read_clear(CSR_MIP, MIP_MTIP); ++ csr_read_clear(CSR_MIP, MIP_STIP); ++ csr_read_clear(CSR_MIP, MIP_SSIP); ++ csr_read_clear(CSR_MIP, MIP_SEIP); ++ ++ //callee use caller's interrupts/exceptions delegation ++ ++ //transfer control to the callee enclave ++ csr_write(CSR_MEPC, callee_enclave->entry_point); ++ ++ //callee use caller's mstatus ++ ++ //mark that cpu is in callee enclave world now ++ enter_enclave_world(callee_enclave->eid); ++ ++ top_caller_enclave->cur_callee_eid = callee_enclave->eid; ++ caller_enclave->cur_callee_eid = callee_enclave->eid; ++ callee_enclave->caller_eid = caller_enclave->eid; ++ callee_enclave->top_caller_eid = top_caller_enclave->eid; ++ ++ __asm__ __volatile__ ("sfence.vma" : : : "memory"); ++ ++ return 0; ++} ++ ++/** ++ * \brief The auxiliary function for the enclave return. ++ * ++ * \param regs The reg argument. ++ * \param top_caller_enclave The toppest enclave in the enclave calling stack. ++ * \param caller_enclave The caller enclave. ++ * \param callee_enclave The callee enclave. ++ */ ++static int __enclave_return(uintptr_t* regs, struct enclave_t* callee_enclave, struct enclave_t* caller_enclave, struct enclave_t* top_caller_enclave) ++{ ++ //restore caller's context ++ sbi_memcpy((void*)regs, (void*)(&(caller_enclave->thread_context.prev_state)), sizeof(struct general_registers_t)); ++ swap_prev_stvec(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_stvec); ++ swap_prev_mie(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_mie); ++ swap_prev_mideleg(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_mideleg); ++ swap_prev_medeleg(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_medeleg); ++ swap_prev_mepc(&(caller_enclave->thread_context), callee_enclave->thread_context.prev_mepc); ++ ++ //restore caller's host context ++ sbi_memcpy((void*)(&(caller_enclave->thread_context.prev_state)), (void*)(&(callee_enclave->thread_context.prev_state)), sizeof(struct general_registers_t)); ++ ++ //clear callee's enclave context ++ uintptr_t encl_ptbr = callee_enclave->thread_context.encl_ptbr; ++ sbi_memset((void*)(&(callee_enclave->thread_context)), 0, sizeof(struct thread_state_t)); ++ callee_enclave->thread_context.encl_ptbr = encl_ptbr; ++ callee_enclave->host_ptbr = 0; ++ callee_enclave->ocall_func_id = NULL; ++ callee_enclave->ocall_arg0 = NULL; ++ callee_enclave->ocall_arg1 = NULL; ++ callee_enclave->ocall_syscall_num = NULL; ++ ++ //different platforms have differnt ptbr switch methods ++ switch_to_enclave_ptbr(&(caller_enclave->thread_context), caller_enclave->thread_context.encl_ptbr); ++ ++ csr_read_clear(CSR_MIP, MIP_MTIP); ++ csr_read_clear(CSR_MIP, MIP_STIP); ++ csr_read_clear(CSR_MIP, MIP_SSIP); ++ csr_read_clear(CSR_MIP, MIP_SEIP); ++ ++ //mark that cpu is in caller enclave world now ++ enter_enclave_world(caller_enclave->eid); ++ top_caller_enclave->cur_callee_eid = caller_enclave->eid; ++ caller_enclave->cur_callee_eid = -1; ++ callee_enclave->caller_eid = -1; ++ callee_enclave->top_caller_eid = -1; ++ ++ __asm__ __volatile__ ("sfence.vma" : : : "memory"); ++ ++ return 0; ++} ++ ++/** ++ * \brief free a list of memory indicated by pm_area_struct. ++ * the pages are zero-ed and turned back to host. ++ * ++ * \param pma The pma structure of the free memory. ++ */ ++void free_enclave_memory(struct pm_area_struct *pma) ++{ ++ uintptr_t paddr = 0; ++ uintptr_t size = 0; ++ ++ extern spinlock_t mbitmap_lock; ++ spin_lock(&mbitmap_lock); ++ ++ while(pma) ++ { ++ paddr = pma->paddr; ++ size = pma->size; ++ pma = pma->pm_next; ++ //we can not clear the first page as it will be used to free mem by host ++ sbi_memset((void*)(paddr + RISCV_PGSIZE), 0, size - RISCV_PGSIZE); ++ __free_secure_memory(paddr, size); ++ } ++ ++ spin_unlock(&mbitmap_lock); ++} ++ ++void initilze_va_struct(struct pm_area_struct* pma, struct vm_area_struct* vma, struct enclave_t* enclave) ++{ ++ pma->pm_next = NULL; ++ enclave->pma_list = pma; ++ traverse_vmas(enclave->root_page_table, vma); ++ //FIXME: here we assume there are exactly text(include text/data/bss) vma and stack vma ++ while(vma) ++ { ++ if(vma->va_start == ENCLAVE_DEFAULT_TEXT_BASE) ++ { ++ enclave->text_vma = vma; ++ } ++ if(vma->va_end == ENCLAVE_DEFAULT_STACK_BASE) ++ { ++ enclave->stack_vma = vma; ++ enclave->_stack_top = enclave->stack_vma->va_start; ++ } ++ vma->pma = pma; ++ vma = vma->vm_next; ++ } ++ if(enclave->text_vma) ++ enclave->text_vma->vm_next = NULL; ++ if(enclave->stack_vma) ++ enclave->stack_vma->vm_next = NULL; ++ enclave->_heap_top = ENCLAVE_DEFAULT_HEAP_BASE; ++ enclave->heap_vma = NULL; ++ enclave->mmap_vma = NULL; ++} ++ ++/**************************************************************/ ++/* called by host */ ++/**************************************************************/ ++ ++/** ++ * \brief Create a new enclave with the create_args. ++ * ++ * \param create_args The arguments for creating a new enclave. ++ */ ++uintptr_t create_enclave(enclave_create_param_t create_args) ++{ ++ struct enclave_t* enclave = NULL; ++ struct pm_area_struct* pma = NULL; ++ struct vm_area_struct* vma = NULL; ++ uintptr_t ret = 0, free_mem = 0; ++ int need_free_secure_memory = 0; ++ ++ acquire_enclave_metadata_lock(); ++ ++ if(!enable_enclave()) ++ { ++ ret = ENCLAVE_ERROR; ++ sbi_bug("M mode: %s: cannot enable enclave \n", __func__); ++ goto failed; ++ } ++ ++ //check enclave memory layout ++ if(check_and_set_secure_memory(create_args.paddr, create_args.size) != 0) ++ { ++ ret = ENCLAVE_ERROR; ++ sbi_bug("M mode: %s: check and set secure memory is failaed\n", __func__); ++ goto failed; ++ } ++ need_free_secure_memory = 1; ++ ++ //check enclave memory layout ++ if(check_enclave_layout(create_args.paddr + RISCV_PGSIZE, 0, -1UL, create_args.paddr, create_args.paddr + create_args.size) != 0) ++ { ++ ret = ENCLAVE_ERROR; ++ sbi_bug("M mode: %s: check memory layout is failed\n", __func__); ++ goto failed; ++ } ++ ++ //Sync and flush the remote TLB entry. ++ tlb_remote_sfence(); ++ ++ enclave = __alloc_enclave(); ++ if(!enclave) ++ { ++ ret = ENCLAVE_NO_MEM; ++ sbi_printf("M mode: %s: alloc enclave is failed \n", __func__); ++ goto failed; ++ } ++ ++ SET_ENCLAVE_METADATA(create_args.entry_point, enclave, &create_args, enclave_create_param_t *, paddr); ++ ++ //traverse vmas ++ pma = (struct pm_area_struct*)(create_args.paddr); ++ vma = (struct vm_area_struct*)(create_args.paddr + sizeof(struct pm_area_struct)); ++ pma->paddr = create_args.paddr; ++ pma->size = create_args.size; ++ pma->free_mem = create_args.free_mem; ++ if(pma->free_mem < pma->paddr || pma->free_mem >= pma->paddr+pma->size ++ || pma->free_mem & ((1<free_pages = NULL; ++ enclave->free_pages_num = 0; ++ free_mem = create_args.paddr + create_args.size - RISCV_PGSIZE; ++ ++ // Reserve the first two entries for free memory page ++ while(free_mem >= create_args.free_mem) ++ { ++ struct page_t *page = (struct page_t*)free_mem; ++ page->paddr = free_mem; ++ page->next = enclave->free_pages; ++ enclave->free_pages = page; ++ enclave->free_pages_num += 1; ++ free_mem -= RISCV_PGSIZE; ++ } ++ //check kbuffer ++ if(create_args.kbuffer_size < RISCV_PGSIZE || create_args.kbuffer & (RISCV_PGSIZE-1) || create_args.kbuffer_size & (RISCV_PGSIZE-1)) ++ { ++ ret = ENCLAVE_ERROR; ++ sbi_bug("M mode: %s: kbuffer check is failed\n", __func__); ++ goto failed; ++ } ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), ENCLAVE_DEFAULT_KBUFFER, create_args.kbuffer, create_args.kbuffer_size); ++ ++ //check shm ++ if(create_args.shm_paddr && create_args.shm_size && ++ !(create_args.shm_paddr & (RISCV_PGSIZE-1)) && !(create_args.shm_size & (RISCV_PGSIZE-1))) ++ { ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), ENCLAVE_DEFAULT_SHM_BASE, create_args.shm_paddr, create_args.shm_size); ++ enclave->shm_paddr = create_args.shm_paddr; ++ enclave->shm_size = create_args.shm_size; ++ } ++ else ++ { ++ enclave->shm_paddr = 0; ++ enclave->shm_size = 0; ++ } ++ ++ hash_enclave(enclave, (void*)(enclave->hash), 0); ++ copy_word_to_host((unsigned int*)create_args.eid_ptr, enclave->eid); ++ release_enclave_metadata_lock(); ++ return ret; ++ ++failed: ++ if(need_free_secure_memory) ++ { ++ free_secure_memory(create_args.paddr, create_args.size); ++ } ++ if(enclave) ++ { ++ __free_enclave(enclave->eid); ++ } ++ release_enclave_metadata_lock(); ++ return ret; ++} ++ ++/** ++ * \brief Create a new shadow enclave with the create_args. ++ * ++ * \param create_args The arguments for creating a new shadow enclave. ++ */ ++uintptr_t create_shadow_enclave(enclave_create_param_t create_args) ++{ ++ uintptr_t ret = 0; ++ int need_free_secure_memory = 0; ++ acquire_enclave_metadata_lock(); ++ eapp_args = 0; ++ if(!enable_enclave()) ++ { ++ ret = ENCLAVE_ERROR; ++ goto failed; ++ } ++ ++ //check enclave memory layout ++ if(check_and_set_secure_memory(create_args.paddr, create_args.size) != 0) ++ { ++ ret = ENCLAVE_ERROR; ++ goto failed; ++ } ++ ++ //Sync and flush the remote TLB entry. ++ tlb_remote_sfence(); ++ ++ need_free_secure_memory = 1; ++ //check enclave memory layout ++ if(check_enclave_layout(create_args.paddr + RISCV_PGSIZE, 0, -1UL, create_args.paddr, create_args.paddr + create_args.size) != 0) ++ { ++ ret = ENCLAVE_ERROR; ++ goto failed; ++ } ++ struct shadow_enclave_t* shadow_enclave; ++ shadow_enclave = __alloc_shadow_enclave(); ++ if(!shadow_enclave) ++ { ++ sbi_bug("M mode: create shadow enclave: no enough memory to alloc_shadow_enclave\n"); ++ ret = ENCLAVE_NO_MEM; ++ goto failed; ++ } ++ shadow_enclave->entry_point = create_args.entry_point; ++ //first page is reserve for page link ++ shadow_enclave->root_page_table = create_args.paddr + RISCV_PGSIZE; ++ shadow_enclave->thread_context.encl_ptbr = ((create_args.paddr+RISCV_PGSIZE) >> RISCV_PGSHIFT) | SATP_MODE_CHOICE; ++ ++ hash_shadow_enclave(shadow_enclave, (void*)(shadow_enclave->hash), 0); ++ copy_word_to_host((unsigned int*)create_args.eid_ptr, shadow_enclave->eid); ++ spin_unlock(&enclave_metadata_lock); ++ return ret; ++ ++failed: ++ if(need_free_secure_memory) ++ { ++ free_secure_memory(create_args.paddr, create_args.size); ++ } ++ spin_unlock(&enclave_metadata_lock); ++ return ret; ++} ++ ++uintptr_t map_relay_page(unsigned int eid, uintptr_t mm_arg_addr, uintptr_t mm_arg_size, uintptr_t* mmap_offset, struct enclave_t* enclave, struct relay_page_entry_t* relay_page_entry) ++{ ++ uintptr_t retval = 0; ++ // If the mm_arg_size is zero but mm_arg_addr is not zero, it means the relay page is transfer from other enclave ++ if(mm_arg_addr && !mm_arg_size) ++ { ++ int slab_index = 0, link_mem_index = 0, kk = 0; ++ if(check_enclave_name(enclave->enclave_name, eid) < 0) ++ { ++ sbi_bug("M mode:map_relay_page: check enclave name is failed\n"); ++ retval = -1UL; ++ return retval; ++ } ++ while((relay_page_entry = __get_relay_page_by_name(enclave->enclave_name, &slab_index, &link_mem_index)) != NULL) ++ { ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), ENCLAVE_DEFAULT_MM_ARG_BASE + *mmap_offset, relay_page_entry->addr, relay_page_entry->size); ++ *mmap_offset = *mmap_offset + relay_page_entry->size; ++ if (enclave->mm_arg_paddr[0] == 0) ++ { ++ enclave->mm_arg_paddr[kk] = relay_page_entry->addr; ++ enclave->mm_arg_size[kk] = relay_page_entry->size; ++ } ++ else ++ { ++ // enclave->mm_arg_size = enclave->mm_arg_size + relay_page_entry->size; ++ enclave->mm_arg_paddr[kk] = relay_page_entry->addr; ++ enclave->mm_arg_size[kk] = relay_page_entry->size; ++ } ++ kk = kk + 1; ++ } ++ if ((relay_page_entry == NULL) && (enclave->mm_arg_paddr[0] == 0)) ++ { ++ sbi_bug("M mode: map_relay_page: get relay page by name is failed \n"); ++ retval = -1UL; ++ return retval; ++ } ++ } ++ else if(mm_arg_addr && mm_arg_size) ++ { ++ //check whether the enclave name is duplicated ++ if (check_enclave_name(enclave->enclave_name, eid) < 0) ++ { ++ sbi_bug("M mode:map_relay_page: check enclave name is failed\n"); ++ retval = -1UL; ++ return retval; ++ } ++ if (__alloc_relay_page_entry(enclave->enclave_name, mm_arg_addr, mm_arg_size) ==NULL) ++ { ++ sbi_printf("M mode: map_relay_page: lack of the secure memory for the relay page entries\n"); ++ retval = ENCLAVE_NO_MEM; ++ return retval; ++ } ++ //check the relay page is not mapping in other enclave, and unmap the relay page for host ++ if(check_and_set_secure_memory(mm_arg_addr, mm_arg_size) != 0) ++ { ++ sbi_bug("M mode: map_relay_page: check_and_set_secure_memory is failed\n"); ++ retval = -1UL; ++ return retval; ++ } ++ enclave->mm_arg_paddr[0] = mm_arg_addr; ++ enclave->mm_arg_size[0] = mm_arg_size; ++ *mmap_offset = mm_arg_size; ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), ENCLAVE_DEFAULT_MM_ARG_BASE, mm_arg_addr, mm_arg_size); ++ ++ //Sync and flush the remote TLB entry. ++ tlb_remote_sfence(); ++ return 0; ++ } ++ ++ return retval; ++} ++ ++/** ++ * \brief Run enclave with the given eid. ++ * ++ * \param regs The host reg need to saved. ++ * \param eid The given enclave id. ++ * \param mm_arg_addr The relay page address for this enclave, map before enclave run. ++ * \param mm_arg_size The relay page size for this enclave, map before enclave run. ++ */ ++uintptr_t run_enclave(uintptr_t* regs, unsigned int eid, uintptr_t mm_arg_addr, uintptr_t mm_arg_size) ++{ ++ struct enclave_t* enclave; ++ uintptr_t retval = 0, mmap_offset = 0; ++ struct relay_page_entry_t* relay_page_entry = NULL; ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_enclave(eid); ++ if(!enclave || enclave->state != FRESH || enclave->type == SERVER_ENCLAVE) ++ { ++ sbi_bug("M mode: run_enclave: enclave%d can not be accessed!\n", eid); ++ retval = -1UL; ++ goto run_enclave_out; ++ } ++ ++ /** We bind a host process (host_ptbr) during run_enclave, which will be checked during resume */ ++ enclave->host_ptbr = csr_read(CSR_SATP); ++ ++ if((retval =map_relay_page(eid, mm_arg_addr, mm_arg_size, &mmap_offset, enclave, relay_page_entry)) < 0) ++ { ++ if (retval == ENCLAVE_NO_MEM) ++ goto run_enclave_out; ++ else ++ goto run_enclave_out; ++ } ++ //the relay page is transfered from another enclave ++ ++ if(swap_from_host_to_enclave(regs, enclave) < 0) ++ { ++ sbi_bug("M mode: run_enclave: enclave can not be run\n"); ++ retval = -1UL; ++ goto run_enclave_out; ++ } ++ ++ //set return address to enclave ++ csr_write(CSR_MEPC, (uintptr_t)(enclave->entry_point)); ++ ++ //enable timer interrupt ++ csr_read_set(CSR_MIE, MIP_MTIP); ++ csr_read_set(CSR_MIE, MIP_MSIP); ++ ++ //set default stack ++ regs[2] = ENCLAVE_DEFAULT_STACK_BASE; ++ ++ //pass parameters ++ if(enclave->shm_paddr) ++ regs[10] = ENCLAVE_DEFAULT_SHM_BASE; ++ else ++ regs[10] = 0; ++ retval = regs[10]; ++ regs[11] = enclave->shm_size; ++ regs[12] = 1; ++ if(enclave->mm_arg_paddr[0]) ++ regs[13] = ENCLAVE_DEFAULT_MM_ARG_BASE; ++ else ++ regs[13] = 0; ++ regs[14] = mmap_offset; ++ eapp_args = eapp_args+1; ++ ++ enclave->state = RUNNING; ++run_enclave_out: ++ release_enclave_metadata_lock(); ++ return retval; ++} ++ ++/** ++ * \brief Run shodow enclave with the given eid. ++ * ++ * \param regs The host reg need to saved. ++ * \param eid The given shadow enclave id. ++ * \param enclave_run_param The parameter for run a shadow enclave. ++ * \param mm_arg_addr The relay page address for this enclave, map before enclave run. ++ * \param mm_arg_size The relay page size for this enclave, map before enclave run. ++ */ ++uintptr_t run_shadow_enclave(uintptr_t* regs, unsigned int eid, shadow_enclave_run_param_t enclave_run_param, uintptr_t mm_arg_addr, uintptr_t mm_arg_size) ++{ ++ struct enclave_t* enclave = NULL; ++ struct shadow_enclave_t* shadow_enclave = NULL; ++ struct relay_page_entry_t* relay_page_entry = NULL; ++ struct pm_area_struct* pma = NULL; ++ struct vm_area_struct* vma = NULL; ++ uintptr_t retval = 0, mmap_offset = 0, free_mem = 0; ++ int need_free_secure_memory = 0, copy_page_table_ret = 0; ++ ++ acquire_enclave_metadata_lock(); ++ ++ shadow_enclave = __get_shadow_enclave(eid); ++ enclave = __alloc_enclave(); ++ ++ if(!enclave) ++ { ++ sbi_bug("create enclave from shadow enclave is failed\n"); ++ retval = ENCLAVE_NO_MEM; ++ goto run_enclave_out; ++ } ++ ++ if(check_and_set_secure_memory(enclave_run_param.free_page, enclave_run_param.size) != 0) ++ { ++ retval = ENCLAVE_ERROR; ++ goto run_enclave_out; ++ } ++ need_free_secure_memory = 1; ++ ++ enclave->free_pages = NULL; ++ enclave->free_pages_num = 0; ++ free_mem = enclave_run_param.free_page + enclave_run_param.size - 2*RISCV_PGSIZE; ++ ++ // Reserve the first two entries in the free pages ++ while(free_mem >= enclave_run_param.free_page + 2*RISCV_PGSIZE) ++ { ++ struct page_t *page = (struct page_t*)free_mem; ++ page->paddr = free_mem; ++ page->next = enclave->free_pages; ++ enclave->free_pages = page; ++ enclave->free_pages_num += 1; ++ free_mem -= RISCV_PGSIZE; ++ } ++ ++ copy_page_table_ret = __copy_page_table((pte_t*) (shadow_enclave->root_page_table), &(enclave->free_pages), 2, (pte_t*)(enclave_run_param.free_page + RISCV_PGSIZE)); ++ if (copy_page_table_ret < 0) ++ { ++ sbi_bug("copy_page_table fail\n"); ++ retval = ENCLAVE_ERROR; ++ goto run_enclave_out; ++ } ++ ++ copy_page_table_ret = map_empty_page((uintptr_t*)(enclave_run_param.free_page + RISCV_PGSIZE), &(enclave->free_pages), ENCLAVE_DEFAULT_STACK_BASE-ENCLAVE_DEFAULT_STACK_SIZE, ENCLAVE_DEFAULT_STACK_SIZE); ++ if (copy_page_table_ret < 0) ++ { ++ sbi_bug("alloc stack for shadow enclave fail\n"); ++ sbi_bug("M mode: shadow_enclave_run: ENCLAVE_DEFAULT_STACK_SIZE is larger than the free memory size \n"); ++ retval = ENCLAVE_ERROR; ++ goto run_enclave_out; ++ } ++ ++ SET_ENCLAVE_METADATA(shadow_enclave->entry_point, enclave, &enclave_run_param, shadow_enclave_run_param_t *, free_page); ++ ++ //traverse vmas ++ pma = (struct pm_area_struct*)(enclave_run_param.free_page); ++ vma = (struct vm_area_struct*)(enclave_run_param.free_page + sizeof(struct pm_area_struct)); ++ pma->paddr = enclave_run_param.free_page; ++ pma->size = enclave_run_param.size; ++ pma->free_mem = enclave_run_param.free_page + 2*RISCV_PGSIZE; ++ initilze_va_struct(pma, vma, enclave); ++ ++ if(enclave_run_param.kbuffer_size < RISCV_PGSIZE || enclave_run_param.kbuffer & (RISCV_PGSIZE-1) || enclave_run_param.kbuffer_size & (RISCV_PGSIZE-1)) ++ { ++ retval = ENCLAVE_ERROR; ++ goto run_enclave_out; ++ } ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), ENCLAVE_DEFAULT_KBUFFER, enclave_run_param.kbuffer, enclave_run_param.kbuffer_size); ++ ++ //check shm ++ if(enclave_run_param.shm_paddr && enclave_run_param.shm_size && ++ !(enclave_run_param.shm_paddr & (RISCV_PGSIZE-1)) && !(enclave_run_param.shm_size & (RISCV_PGSIZE-1))) ++ { ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), ENCLAVE_DEFAULT_SHM_BASE, enclave_run_param.shm_paddr, enclave_run_param.shm_size); ++ enclave->shm_paddr = enclave_run_param.shm_paddr; ++ enclave->shm_size = enclave_run_param.shm_size; ++ } ++ else ++ { ++ enclave->shm_paddr = 0; ++ enclave->shm_size = 0; ++ } ++ ++ copy_word_to_host((unsigned int*)enclave_run_param.eid_ptr, enclave->eid); ++ ++ //map the relay page ++ if((retval =map_relay_page(eid, mm_arg_addr, mm_arg_size, &mmap_offset, enclave, relay_page_entry)) < 0) ++ { ++ if (retval == ENCLAVE_NO_MEM) ++ goto failed; ++ else ++ goto run_enclave_out; ++ } ++ ++ if(swap_from_host_to_enclave(regs, enclave) < 0) ++ { ++ sbi_bug("M mode: run_shadow_enclave: enclave can not be run\n"); ++ retval = -1UL; ++ goto run_enclave_out; ++ } ++ ++ //set return address to enclave ++ csr_write(CSR_MEPC, (uintptr_t)(enclave->entry_point)); ++ ++ //enable timer interrupt ++ csr_read_set(CSR_MIE, MIP_MTIP); ++ csr_read_set(CSR_MIE, MIP_MSIP); ++ ++ //set default stack ++ regs[2] = ENCLAVE_DEFAULT_STACK_BASE; ++ ++ //pass parameters ++ if(enclave->shm_paddr) ++ regs[10] = ENCLAVE_DEFAULT_SHM_BASE; ++ else ++ regs[10] = 0; ++ retval = regs[10]; ++ regs[11] = enclave->shm_size; ++ regs[12] = (eapp_args) % 5; ++ if(enclave->mm_arg_paddr[0]) ++ regs[13] = ENCLAVE_DEFAULT_MM_ARG_BASE; ++ else ++ regs[13] = 0; ++ regs[14] = mmap_offset; ++ eapp_args = eapp_args+1; ++ ++ enclave->state = RUNNING; ++ sbi_printf("M mode: run shadow enclave...\n"); ++ ++run_enclave_out: ++ release_enclave_metadata_lock(); ++ return retval; ++ ++failed: ++ if(need_free_secure_memory) ++ { ++ free_secure_memory(enclave_run_param.free_page, enclave_run_param.size); ++ sbi_memset((void *)enclave_run_param.free_page, 0, enclave_run_param.size); ++ } ++ ++ if(enclave) ++ __free_enclave(enclave->eid); ++ ++ release_enclave_metadata_lock(); ++ return retval; ++} ++ ++ ++uintptr_t attest_enclave(uintptr_t eid, uintptr_t report_ptr, uintptr_t nonce) ++{ ++ struct enclave_t* enclave = NULL; ++ int attestable = 1; ++ struct report_t report; ++ enclave_state_t old_state = INVALID; ++ acquire_enclave_metadata_lock(); ++ enclave = __get_enclave(eid); ++ if(!enclave || (enclave->state != FRESH && enclave->state != STOPPED) ++ || enclave->host_ptbr != csr_read(CSR_SATP)) ++ attestable = 0; ++ else ++ { ++ old_state = enclave->state; ++ enclave->state = ATTESTING; ++ } ++ release_enclave_metadata_lock(); ++ ++ if(!attestable) ++ { ++ sbi_printf("M mode: attest_enclave: enclave%ld is not attestable\r\n", eid); ++ return -1UL; ++ } ++ ++ sbi_memcpy((void*)(report.dev_pub_key), (void*)DEV_PUB_KEY, PUBLIC_KEY_SIZE); ++ sbi_memcpy((void*)(report.sm.hash), (void*)SM_HASH, HASH_SIZE); ++ sbi_memcpy((void*)(report.sm.sm_pub_key), (void*)SM_PUB_KEY, PUBLIC_KEY_SIZE); ++ sbi_memcpy((void*)(report.sm.signature), (void*)SM_SIGNATURE, SIGNATURE_SIZE); ++ ++ hash_enclave(enclave, (void*)(report.enclave.hash), nonce); ++ sign_enclave((void*)(report.enclave.signature), (void*)(report.enclave.hash)); ++ report.enclave.nonce = nonce; ++ ++ //printHex((unsigned char*)(report.enclave.signature), 64); ++ ++ copy_to_host((void*)report_ptr, (void*)(&report), sizeof(struct report_t)); ++ ++ acquire_enclave_metadata_lock(); ++ enclave->state = old_state; ++ release_enclave_metadata_lock(); ++ return 0; ++} ++ ++uintptr_t attest_shadow_enclave(uintptr_t eid, uintptr_t report_ptr, uintptr_t nonce) ++{ ++ struct shadow_enclave_t* shadow_enclave = NULL; ++ int attestable = 1; ++ struct report_t report; ++ acquire_enclave_metadata_lock(); ++ shadow_enclave = __get_shadow_enclave(eid); ++ release_enclave_metadata_lock(); ++ ++ if(!attestable) ++ { ++ sbi_printf("M mode: attest_enclave: enclave%ld is not attestable\r\n", eid); ++ return -1UL; ++ } ++ update_hash_shadow_enclave(shadow_enclave, (char *)shadow_enclave->hash, nonce); ++ sbi_memcpy((char *)(report.enclave.hash), (char *)shadow_enclave->hash, HASH_SIZE); ++ sbi_memcpy((void*)(report.dev_pub_key), (void*)DEV_PUB_KEY, PUBLIC_KEY_SIZE); ++ sbi_memcpy((void*)(report.sm.hash), (void*)SM_HASH, HASH_SIZE); ++ sbi_memcpy((void*)(report.sm.sm_pub_key), (void*)SM_PUB_KEY, PUBLIC_KEY_SIZE); ++ sbi_memcpy((void*)(report.sm.signature), (void*)SM_SIGNATURE, SIGNATURE_SIZE); ++ sign_enclave((void*)(report.enclave.signature), (void*)(report.enclave.hash)); ++ report.enclave.nonce = nonce; ++ ++ copy_to_host((void*)report_ptr, (void*)(&report), sizeof(struct report_t)); ++ ++ return 0; ++} ++ ++/** ++ * \brief host use this function to wake a stopped enclave. ++ * ++ * \param regs The host reg need to saved. ++ * \param eid The given enclave id. ++ */ ++uintptr_t wake_enclave(uintptr_t* regs, unsigned int eid) ++{ ++ uintptr_t retval = 0; ++ struct enclave_t* enclave = NULL; ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_real_enclave(eid); ++ if(!enclave || enclave->state != STOPPED || enclave->host_ptbr != csr_read(CSR_SATP)) ++ { ++ sbi_bug("M mode: wake_enclave: enclave%d can not be accessed!\n", eid); ++ retval = -1UL; ++ goto wake_enclave_out; ++ } ++ ++ enclave->state = RUNNABLE; ++ ++wake_enclave_out: ++ release_enclave_metadata_lock(); ++ return retval; ++} ++ ++/** ++ * \brief Resume the enclave from the previous status. ++ * ++ * \param regs The host reg need to saved. ++ * \param eid The given enclave id. ++ */ ++uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid) ++{ ++ uintptr_t retval = 0; ++ struct enclave_t* enclave = NULL; ++ ++ acquire_enclave_metadata_lock(); ++ enclave = __get_real_enclave(eid); ++ if(!enclave || enclave->state <= FRESH || enclave->host_ptbr != csr_read(CSR_SATP)) ++ { ++ sbi_bug("M mode: resume_enclave: enclave%d can not be accessed\n", eid); ++ retval = -1UL; ++ goto resume_enclave_out; ++ } ++ ++ if(enclave->state == STOPPED) ++ { ++ sbi_bug("M mode: resume_enclave: enclave%d is stopped\n", eid); ++ retval = ENCLAVE_TIMER_IRQ; ++ goto resume_enclave_out; ++ } ++ if(enclave->state != RUNNABLE) ++ { ++ sbi_bug("M mode: resume_enclave: enclave%d is not runnable\n", eid); ++ retval = -1UL; ++ goto resume_enclave_out; ++ } ++ ++ if(swap_from_host_to_enclave(regs, enclave) < 0) ++ { ++ sbi_bug("M mode: resume_enclave: enclave can not be resume\n"); ++ retval = -1UL; ++ goto resume_enclave_out; ++ } ++ enclave->state = RUNNING; ++ // regs[10] will be set to retval when mcall_trap return, so we have to ++ // set retval to be regs[10] here to succuessfully restore context ++ retval = regs[10]; ++resume_enclave_out: ++ release_enclave_metadata_lock(); ++ return retval; ++} ++ ++/** ++ * \brief Map the memory for ocall return. ++ * ++ * \param enclave The enclave structure. ++ * \param paddr The mapped physical address. ++ * \param size The mapped memory size. ++ */ ++uintptr_t mmap_after_resume(struct enclave_t *enclave, uintptr_t paddr, uintptr_t size) ++{ ++ uintptr_t retval = 0; ++ //uintptr_t vaddr = ENCLAVE_DEFAULT_MMAP_BASE; ++ uintptr_t vaddr = enclave->thread_context.prev_state.a1; ++ if(!vaddr) vaddr = ENCLAVE_DEFAULT_MMAP_BASE - (size - RISCV_PGSIZE); ++ if(check_and_set_secure_memory(paddr, size) < 0) ++ { ++ sbi_bug("M mode: mmap_after_resume: check_secure_memory(0x%lx, 0x%lx) failed\n", paddr, size); ++ retval = -1UL; ++ return retval; ++ } ++ ++ //Sync and flush the remote TLB entry. ++ tlb_remote_sfence(); ++ ++ struct pm_area_struct *pma = (struct pm_area_struct*)paddr; ++ struct vm_area_struct *vma = (struct vm_area_struct*)(paddr + sizeof(struct pm_area_struct)); ++ pma->paddr = paddr; ++ pma->size = size; ++ pma->pm_next = NULL; ++ //vma->va_start = vaddr - (size - RISCV_PGSIZE); ++ //vma->va_end = vaddr; ++ vma->va_start = vaddr; ++ vma->va_end = vaddr + size - RISCV_PGSIZE; ++ vma->vm_next = NULL; ++ vma->pma = pma; ++ if(insert_vma(&(enclave->mmap_vma), vma, ENCLAVE_DEFAULT_MMAP_BASE) < 0) ++ { ++ vma->va_end = enclave->mmap_vma->va_start; ++ vma->va_start = vma->va_end - (size - RISCV_PGSIZE); ++ vma->vm_next = enclave->mmap_vma; ++ enclave->mmap_vma = vma; ++ } ++ insert_pma(&(enclave->pma_list), pma); ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), vma->va_start, paddr+RISCV_PGSIZE, size-RISCV_PGSIZE); ++ retval = vma->va_start; ++ ++ return retval; ++} ++ ++/** ++ * \brief Map the sbrk memory for ocall return. ++ * ++ * \param enclave The enclave structure. ++ * \param paddr The mapped physical address. ++ * \param size The mapped memory size. ++ */ ++uintptr_t sbrk_after_resume(struct enclave_t *enclave, uintptr_t paddr, uintptr_t size) ++{ ++ uintptr_t retval = 0; ++ intptr_t req_size = (intptr_t)(enclave->thread_context.prev_state.a1); ++ if(req_size <= 0) ++ { ++ return enclave->_heap_top; ++ } ++ if(check_and_set_secure_memory(paddr, size) < 0) ++ { ++ retval = -1UL; ++ sbi_bug("M mode: sbrk_after_resume: check and set the secure memory is failed \n"); ++ return retval; ++ } ++ ++ //Sync and flush the remote TLB entry. ++ tlb_remote_sfence(); ++ ++ struct pm_area_struct *pma = (struct pm_area_struct*)paddr; ++ struct vm_area_struct *vma = (struct vm_area_struct*)(paddr + sizeof(struct pm_area_struct)); ++ pma->paddr = paddr; ++ pma->size = size; ++ pma->pm_next = NULL; ++ vma->va_start = enclave->_heap_top; ++ vma->va_end = vma->va_start + size - RISCV_PGSIZE; ++ vma->vm_next = NULL; ++ vma->pma = pma; ++ vma->vm_next = enclave->heap_vma; ++ enclave->heap_vma = vma; ++ enclave->_heap_top = vma->va_end; ++ insert_pma(&(enclave->pma_list), pma); ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), vma->va_start, paddr+RISCV_PGSIZE, size-RISCV_PGSIZE); ++ retval = enclave->_heap_top; ++ ++ return retval; ++} ++ ++/** ++ * \brief Map the relay page for ocall return. ++ * ++ * \param enclave The enclave structure. ++ * \param mm_arg_addr Relay page address. ++ * \param mm_arg_size Relay page size. ++ */ ++uintptr_t return_relay_page_after_resume(struct enclave_t *enclave, uintptr_t mm_arg_addr, uintptr_t mm_arg_size) ++{ ++ uintptr_t retval = 0, mmap_offset = 0; ++ if((retval =map_relay_page(enclave->eid, mm_arg_addr, mm_arg_size, &mmap_offset, enclave, NULL)) < 0) ++ { ++ if (retval == ENCLAVE_NO_MEM) ++ goto run_enclave_out; ++ else ++ goto run_enclave_out; ++ } ++ ++run_enclave_out: ++ return retval; ++} ++ ++/** ++ * \brief Host use this fucntion to re-enter enclave world. ++ * ++ * \param regs The host register context. ++ * \param eid Resume enclave id. ++ */ ++uintptr_t resume_from_ocall(uintptr_t* regs, unsigned int eid) ++{ ++ uintptr_t retval = 0; ++ uintptr_t ocall_func_id = regs[12]; ++ struct enclave_t* enclave = NULL; ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_real_enclave(eid); ++ if(!enclave || enclave->state != OCALLING || enclave->host_ptbr != csr_read(CSR_SATP)) ++ { ++ retval = -1UL; ++ goto out; ++ } ++ ++ switch(ocall_func_id) ++ { ++ case OCALL_MMAP: ++ retval = mmap_after_resume(enclave, regs[13], regs[14]); ++ if(retval == -1UL) ++ goto out; ++ break; ++ case OCALL_UNMAP: ++ retval = 0; ++ break; ++ case OCALL_SYS_WRITE: ++ retval = enclave->thread_context.prev_state.a0; ++ break; ++ case OCALL_SBRK: ++ retval = sbrk_after_resume(enclave, regs[13], regs[14]); ++ if(retval == -1UL) ++ goto out; ++ break; ++ case OCALL_READ_SECT: ++ retval = regs[13]; ++ break; ++ case OCALL_WRITE_SECT: ++ retval = regs[13]; ++ break; ++ case OCALL_RETURN_RELAY_PAGE: ++ retval = return_relay_page_after_resume(enclave, regs[13], regs[14]); ++ if(retval == -1UL) ++ goto out; ++ break; ++ default: ++ retval = 0; ++ break; ++ } ++ ++ if(swap_from_host_to_enclave(regs, enclave) < 0) ++ { ++ retval = -1UL; ++ goto out; ++ } ++ enclave->state = RUNNING; ++ ++out: ++ release_enclave_metadata_lock(); ++ return retval; ++} ++ ++/** ++ * \brief Host calls this function to destroy an existing enclave. ++ * ++ * \param regs The host register context. ++ * \param eid Resume enclave id. ++ */ ++uintptr_t destroy_enclave(uintptr_t* regs, unsigned int eid) ++{ ++ uintptr_t retval = 0; ++ struct enclave_t *enclave = NULL; ++ uintptr_t dest_hart = 0; ++ struct pm_area_struct* pma = NULL; ++ int need_free_enclave_memory = 0; ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_enclave(eid); ++ unsigned long mm_arg_paddr[RELAY_PAGE_NUM]; ++ unsigned long mm_arg_size[RELAY_PAGE_NUM]; ++ for(int kk = 0; kk < RELAY_PAGE_NUM; kk++) ++ { ++ mm_arg_paddr[kk] = enclave->mm_arg_paddr[kk]; ++ mm_arg_size[kk] = enclave->mm_arg_size[kk]; ++ } ++ if(!enclave || enclave->state < FRESH || enclave->type == SERVER_ENCLAVE) ++ { ++ sbi_bug("M mode: destroy_enclave: enclave%d can not be accessed\r\n", eid); ++ retval = -1UL; ++ goto destroy_enclave_out; ++ } ++ ++ if(enclave->state != RUNNING) ++ { ++ pma = enclave->pma_list; ++ need_free_enclave_memory = 1; ++ __free_enclave(eid); ++ } ++ else ++ { ++ //cpus' state will be protected by enclave_metadata_lock ++ for(int i = 0; i < MAX_HARTS; ++i) ++ { ++ if(cpus[i].in_enclave && cpus[i].eid == eid) ++ dest_hart = i; ++ } ++ if (dest_hart == csr_read(CSR_MHARTID)) ++ ipi_destroy_enclave(regs, csr_read(CSR_SATP), eid); ++ else ++ set_ipi_destroy_enclave_and_sync(dest_hart, csr_read(CSR_SATP), eid); ++ } ++ ++destroy_enclave_out: ++ release_enclave_metadata_lock(); ++ ++ //should wait after release enclave_metadata_lock to avoid deadlock ++ if(need_free_enclave_memory) ++ { ++ free_enclave_memory(pma); ++ free_all_relay_page(mm_arg_paddr, mm_arg_size); ++ } ++ ++ return retval; ++} ++ ++/**************************************************************/ ++/* called by enclave */ ++/**************************************************************/ ++/** ++ * \brief Exit from the enclave. ++ * ++ * \param regs The host register context. ++ * \param enclave_retval Enclave return value. ++ */ ++uintptr_t exit_enclave(uintptr_t* regs, unsigned long enclave_retval) ++{ ++ struct enclave_t *enclave = NULL; ++ int eid = 0; ++ uintptr_t ret = 0; ++ struct pm_area_struct *pma = NULL; ++ int need_free_enclave_memory = 0; ++ if(check_in_enclave_world() < 0) ++ { ++ sbi_bug("M mode: exit_enclave: cpu is not in enclave world now\n"); ++ return -1UL; ++ } ++ ++ acquire_enclave_metadata_lock(); ++ ++ eid = get_curr_enclave_id(); ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave) != 0 || enclave->type == SERVER_ENCLAVE) ++ { ++ sbi_bug("M mode: exit_enclave: enclave%d can not be accessed!\n", eid); ++ ret = -1UL; ++ goto exit_enclave_out; ++ } ++ swap_from_enclave_to_host(regs, enclave); ++ ++ pma = enclave->pma_list; ++ need_free_enclave_memory = 1; ++ unsigned long mm_arg_paddr[RELAY_PAGE_NUM]; ++ unsigned long mm_arg_size[RELAY_PAGE_NUM]; ++ for(int kk = 0; kk < RELAY_PAGE_NUM; kk++) ++ { ++ mm_arg_paddr[kk] = enclave->mm_arg_paddr[kk]; ++ mm_arg_size[kk] = enclave->mm_arg_size[kk]; ++ } ++ __free_enclave(eid); ++ ++exit_enclave_out: ++ ++ if(need_free_enclave_memory) ++ { ++ free_enclave_memory(pma); ++ free_all_relay_page(mm_arg_paddr, mm_arg_size); ++ } ++ release_enclave_metadata_lock(); ++ return ret; ++} ++ ++/** ++ * \brief Enclave needs to map a new mmap region, ocall to the host to handle. ++ * ++ * \param regs The enclave register context. ++ * \param vaddr Mmap virtual address. ++ * \param suze Mmap virtual memory size. ++ */ ++uintptr_t enclave_mmap(uintptr_t* regs, uintptr_t vaddr, uintptr_t size) ++{ ++ uintptr_t ret = 0; ++ int eid = get_curr_enclave_id(); ++ struct enclave_t* enclave = NULL; ++ if(check_in_enclave_world() < 0) ++ return -1; ++ if(vaddr) ++ { ++ if(vaddr & (RISCV_PGSIZE-1) || size < RISCV_PGSIZE || size & (RISCV_PGSIZE-1)) ++ return -1; ++ } ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) ++ { ++ ret = -1UL; ++ goto out; ++ } ++ ++ copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_MMAP); ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg1, size + RISCV_PGSIZE); ++ ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = OCALLING; ++ ret = ENCLAVE_OCALL; ++ ++out: ++ release_enclave_metadata_lock(); ++ return ret; ++} ++ ++/** ++ * \brief Enclave needs to unmap a mmap region, ocall to the host to handle. ++ * ++ * \param regs The enclave register context. ++ * \param vaddr Unmap virtual address. ++ * \param suze Unmap virtual memory size. ++ */ ++uintptr_t enclave_unmap(uintptr_t* regs, uintptr_t vaddr, uintptr_t size) ++{ ++ uintptr_t ret = 0; ++ int eid = get_curr_enclave_id(); ++ struct enclave_t* enclave = NULL; ++ struct vm_area_struct *vma = NULL; ++ struct pm_area_struct *pma = NULL; ++ int need_free_secure_memory = 0; ++ if(check_in_enclave_world() < 0) ++ return -1; ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) ++ { ++ ret = -1UL; ++ goto out; ++ } ++ ++ vma = find_vma(enclave->mmap_vma, vaddr, size); ++ if(!vma) ++ { ++ ret = -1UL; ++ goto out; ++ } ++ pma = vma->pma; ++ delete_vma(&(enclave->mmap_vma), vma); ++ delete_pma(&(enclave->pma_list), pma); ++ vma->vm_next = NULL; ++ pma->pm_next = NULL; ++ unmap((uintptr_t*)(enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); ++ need_free_secure_memory = 1; ++ ++ copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_UNMAP); ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg0, pma->paddr); ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg1, pma->size); ++ ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = OCALLING; ++ ret = ENCLAVE_OCALL; ++ ++out: ++ release_enclave_metadata_lock(); ++ if(need_free_secure_memory) ++ { ++ free_enclave_memory(pma); ++ } ++ return ret; ++} ++ ++/** ++ * \brief Enclave calls sbrk() in the runtime, ocall to the host to handle. ++ * ++ * \param regs The enclave register context. ++ * \param size Stack augment memory size. ++ */ ++uintptr_t enclave_sbrk(uintptr_t* regs, intptr_t size) ++{ ++ uintptr_t ret = 0; ++ uintptr_t abs_size = 0; ++ int eid = get_curr_enclave_id(); ++ struct enclave_t* enclave = NULL; ++ struct pm_area_struct *pma = NULL; ++ struct vm_area_struct *vma = NULL; ++ if(check_in_enclave_world() < 0) ++ return -1; ++ if(size < 0) ++ { ++ abs_size = 0 - size; ++ } ++ else ++ { ++ abs_size = size; ++ } ++ if(abs_size & (RISCV_PGSIZE-1)) ++ return -1; ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) ++ { ++ ret = -1UL; ++ goto out; ++ } ++ ++ if(size == 0) ++ { ++ ret = enclave->_heap_top; ++ goto out; ++ } ++ if(size < 0) ++ { ++ uintptr_t dest_va = enclave->_heap_top - abs_size; ++ vma = enclave->heap_vma; ++ while(vma && vma->va_start >= dest_va) ++ { ++ struct pm_area_struct *cur_pma = vma->pma; ++ delete_pma(&(enclave->pma_list), cur_pma); ++ cur_pma->pm_next = pma; ++ pma = cur_pma; ++ unmap((uintptr_t*)(enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); ++ enclave->heap_vma = vma->vm_next; ++ vma = vma->vm_next; ++ } ++ if(enclave->heap_vma) ++ enclave->_heap_top = enclave->heap_vma->va_end; ++ else ++ enclave->_heap_top = ENCLAVE_DEFAULT_HEAP_BASE; ++ } ++ copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_SBRK); ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg0, (uintptr_t)pma); ++ if(size > 0) ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg1, size + RISCV_PGSIZE); ++ else ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg1, size); ++ ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = OCALLING; ++ ret = ENCLAVE_OCALL; ++ ++out: ++ release_enclave_metadata_lock(); ++ if(pma) ++ { ++ free_enclave_memory(pma); ++ } ++ return ret; ++} ++ ++/** ++ * \brief Enclave calls print() in the runtime, ocall to the host to handle. ++ * ++ * \param regs The enclave register context. ++ */ ++uintptr_t enclave_sys_write(uintptr_t* regs) ++{ ++ uintptr_t ret = 0; ++ int eid = get_curr_enclave_id(); ++ struct enclave_t* enclave = NULL; ++ if(check_in_enclave_world() < 0) ++ { ++ sbi_bug("M mode: %s check enclave world is failed\n", __func__); ++ return -1; ++ } ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) ++ { ++ ret = -1UL; ++ sbi_bug("M mode: %s check enclave authentication is failed\n", __func__); ++ goto out; ++ } ++ copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_SYS_WRITE); ++ ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = OCALLING; ++ ret = ENCLAVE_OCALL; ++out: ++ release_enclave_metadata_lock(); ++ return ret; ++} ++ ++/** ++ * \brief Call the server enclave, and transfer the relay page ownership. ++ * ++ * \param regs The enclave register context. ++ * \param callee_eid The callee enclave id. ++ * \param arg The passing arguments. ++ */ ++uintptr_t call_enclave(uintptr_t* regs, unsigned int callee_eid, uintptr_t arg) ++{ ++ struct enclave_t* top_caller_enclave = NULL; ++ struct enclave_t* caller_enclave = NULL; ++ struct enclave_t* callee_enclave = NULL; ++ struct vm_area_struct* vma = NULL; ++ struct pm_area_struct* pma = NULL; ++ uintptr_t retval = 0; ++ int caller_eid = get_curr_enclave_id(); ++ if(check_in_enclave_world() < 0) ++ return -1; ++ ++ acquire_enclave_metadata_lock(); ++ caller_enclave = __get_enclave(caller_eid); ++ if(!caller_enclave || caller_enclave->state != RUNNING || check_enclave_authentication(caller_enclave) != 0) ++ { ++ sbi_bug("M mode: call_enclave: enclave%d can not execute call_enclave!\n", caller_eid); ++ retval = -1UL; ++ goto out; ++ } ++ if(caller_enclave->caller_eid != -1) ++ top_caller_enclave = __get_enclave(caller_enclave->top_caller_eid); ++ else ++ top_caller_enclave = caller_enclave; ++ if(!top_caller_enclave || top_caller_enclave->state != RUNNING) ++ { ++ sbi_bug("M mode: call_enclave: enclave%d can not execute call_enclave!\n", caller_eid); ++ retval = -1UL; ++ goto out; ++ } ++ callee_enclave = __get_enclave(callee_eid); ++ if(!callee_enclave || callee_enclave->type != SERVER_ENCLAVE || callee_enclave->caller_eid != -1 || callee_enclave->state != RUNNABLE) ++ { ++ sbi_bug("M mode: call_enclave: enclave%d can not be accessed!\n", callee_eid); ++ retval = -1UL; ++ goto out; ++ } ++ ++ struct call_enclave_arg_t call_arg; ++ struct call_enclave_arg_t* call_arg0 = va_to_pa((uintptr_t*)(caller_enclave->root_page_table), (void*)arg); ++ if(!call_arg0) ++ { ++ sbi_bug("M mode: call_enclave: call_arg0 is not existed \n"); ++ retval = -1UL; ++ goto out; ++ } ++ copy_from_host(&call_arg, call_arg0, sizeof(struct call_enclave_arg_t)); ++ if(call_arg.req_vaddr != 0) ++ { ++ if(call_arg.req_vaddr & (RISCV_PGSIZE-1) || call_arg.req_size < RISCV_PGSIZE || call_arg.req_size & (RISCV_PGSIZE-1)) ++ { ++ sbi_bug("M mode: call_enclave: vaddr and size is not align \n"); ++ retval = -1UL; ++ goto out; ++ } ++ ++ if(call_arg.req_vaddr == ENCLAVE_DEFAULT_MM_ARG_BASE) ++ { ++ callee_enclave->mm_arg_paddr[0] = caller_enclave->mm_arg_paddr[0]; ++ callee_enclave->mm_arg_size[0] = caller_enclave->mm_arg_size[0]; ++ caller_enclave->mm_arg_paddr[0] = 0; ++ caller_enclave->mm_arg_paddr[0] = 0; ++ unmap((uintptr_t*)(caller_enclave->root_page_table), call_arg.req_vaddr, call_arg.req_size); ++ mmap((uintptr_t*)(callee_enclave->root_page_table), &(callee_enclave->free_pages), ENCLAVE_DEFAULT_MM_ARG_BASE, callee_enclave->mm_arg_paddr[0], call_arg.req_size); ++ } ++ else ++ { ++ //Unmap for caller enclave ++ vma = find_vma(caller_enclave->mmap_vma, call_arg.req_vaddr, call_arg.req_size); ++ if(!vma) ++ { ++ sbi_bug("M mode: call_enclave:vma is not existed \n"); ++ retval = -1UL; ++ goto out; ++ } ++ pma = vma->pma; ++ delete_vma(&(caller_enclave->mmap_vma), vma); ++ delete_pma(&(caller_enclave->pma_list), pma); ++ vma->vm_next = NULL; ++ pma->pm_next = NULL; ++ unmap((uintptr_t*)(caller_enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); ++ //Map for callee enclave ++ if(insert_vma(&(callee_enclave->mmap_vma), vma, ENCLAVE_DEFAULT_MMAP_BASE) < 0) ++ { ++ vma->va_end = callee_enclave->mmap_vma->va_start; ++ vma->va_start = vma->va_end - (pma->size - RISCV_PGSIZE); ++ vma->vm_next = callee_enclave->mmap_vma; ++ callee_enclave->mmap_vma = vma; ++ } ++ insert_pma(&(callee_enclave->pma_list), pma); ++ mmap((uintptr_t*)(callee_enclave->root_page_table), &(callee_enclave->free_pages), vma->va_start, pma->paddr + RISCV_PGSIZE, pma->size - RISCV_PGSIZE); ++ } ++ } ++ if(__enclave_call(regs, top_caller_enclave, caller_enclave, callee_enclave) < 0) ++ { ++ sbi_bug("M mode: call_enclave: enclave can not be run\n"); ++ retval = -1UL; ++ goto out; ++ } ++ //set return address to enclave ++ csr_write(CSR_MEPC, (uintptr_t)(callee_enclave->entry_point)); ++ ++ //enable timer interrupt ++ csr_read_set(CSR_MIE, MIP_MTIP); ++ csr_read_set(CSR_MIE, MIP_MSIP); ++ ++ //set default stack ++ regs[2] = ENCLAVE_DEFAULT_STACK_BASE; ++ ++ //map kbuffer ++ mmap((uintptr_t*)(callee_enclave->root_page_table), &(callee_enclave->free_pages), ENCLAVE_DEFAULT_KBUFFER, top_caller_enclave->kbuffer, top_caller_enclave->kbuffer_size); ++ //pass parameters ++ ++ regs[10] = call_arg.req_arg; ++ if(call_arg.req_vaddr == ENCLAVE_DEFAULT_MM_ARG_BASE) ++ regs[11] = ENCLAVE_DEFAULT_MM_ARG_BASE; ++ else if(call_arg.req_vaddr) ++ regs[11] = vma->va_start; ++ else ++ regs[11] = 0; ++ regs[12] = call_arg.req_size; ++ if(callee_enclave->shm_paddr){ ++ regs[13] = ENCLAVE_DEFAULT_SHM_BASE; ++ } ++ else{ ++ regs[13] = 0; ++ } ++ regs[14] = callee_enclave->shm_size; ++ retval = call_arg.req_arg; ++ ++ callee_enclave->state = RUNNING; ++out: ++ release_enclave_metadata_lock(); ++ return retval; ++} ++ ++/** ++ * \brief Server enclave return, and transfer the relay page ownership. ++ * ++ * \param regs The enclave register context. ++ * \param arg The return arguments. ++ */ ++uintptr_t enclave_return(uintptr_t* regs, uintptr_t arg) ++{ ++ struct enclave_t *enclave = NULL; ++ struct enclave_t *caller_enclave = NULL; ++ struct enclave_t *top_caller_enclave = NULL; ++ int eid = 0; ++ uintptr_t ret = 0; ++ struct vm_area_struct* vma = NULL; ++ struct pm_area_struct *pma = NULL; ++ ++ if(check_in_enclave_world() < 0) ++ { ++ sbi_bug("M mode: enclave_return: cpu is not in enclave world now\n"); ++ return -1UL; ++ } ++ ++ acquire_enclave_metadata_lock(); ++ ++ eid = get_curr_enclave_id(); ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave) != 0 || enclave->type != SERVER_ENCLAVE) ++ { ++ sbi_bug("M mode: enclave_return: enclave%d can not return!\n", eid); ++ ret = -1UL; ++ goto out; ++ } ++ struct call_enclave_arg_t ret_arg; ++ struct call_enclave_arg_t* ret_arg0 = va_to_pa((uintptr_t*)(enclave->root_page_table), (void*)arg); ++ if(!ret_arg0) ++ { ++ sbi_bug("M mode: enclave_return: ret_arg0 is invalid \n"); ++ ret = -1UL; ++ goto out; ++ } ++ copy_from_host(&ret_arg, ret_arg0, sizeof(struct call_enclave_arg_t)); ++ ++ caller_enclave = __get_enclave(enclave->caller_eid); ++ top_caller_enclave = __get_enclave(enclave->top_caller_eid); ++ __enclave_return(regs, enclave, caller_enclave, top_caller_enclave); ++ unmap((uintptr_t*)(enclave->root_page_table), ENCLAVE_DEFAULT_KBUFFER, top_caller_enclave->kbuffer_size); ++ ++ //restore caller_enclave's req arg ++ //there is no need to check call_arg's validity again as it is already checked when executing call_enclave() ++ struct call_enclave_arg_t *call_arg = va_to_pa((uintptr_t*)(caller_enclave->root_page_table), (void*)(regs[11])); ++ ++ //restore req_vaddr ++ if(!call_arg->req_vaddr || !ret_arg.req_vaddr || ret_arg.req_vaddr & (RISCV_PGSIZE-1) ++ || ret_arg.req_size < call_arg->req_size || ret_arg.req_size & (RISCV_PGSIZE-1)) ++ { ++ call_arg->req_vaddr = 0; ++ sbi_printf("M MODE: enclave return: the ret argument is in-consistent with caller argument\n"); ++ sbi_bug("M MODE: enclave return: call_arg->req_vaddr %lx call_arg->req_size %lx ret_arg->req_vaddr %lx ret_arg->size %lx\n", call_arg->req_vaddr, call_arg->req_size, ret_arg.req_vaddr, ret_arg.req_size); ++ goto restore_resp_addr; ++ } ++ //Remap for caller enclave ++ if(call_arg->req_vaddr == ENCLAVE_DEFAULT_MM_ARG_BASE) ++ { ++ caller_enclave->mm_arg_paddr[0] = enclave->mm_arg_paddr[0]; ++ caller_enclave->mm_arg_size[0] = enclave->mm_arg_size[0]; ++ enclave->mm_arg_paddr[0] = 0; ++ enclave->mm_arg_paddr[0] = 0; ++ unmap((uintptr_t*)(enclave->root_page_table), call_arg->req_vaddr, call_arg->req_size); ++ mmap((uintptr_t*)(caller_enclave->root_page_table), &(caller_enclave->free_pages), call_arg->req_vaddr, caller_enclave->mm_arg_paddr[0], call_arg->req_size); ++ } ++ else ++ { ++ vma = find_vma(enclave->mmap_vma, ret_arg.req_vaddr, ret_arg.req_size); ++ if(!vma) ++ { ++ //enclave return even when the shared mem return failed ++ call_arg->req_vaddr = 0; ++ sbi_bug("M MODE: enclave return: can not find the corresponding vma for callee enclave\n"); ++ goto restore_resp_addr; ++ } ++ pma = vma->pma; ++ delete_vma(&(enclave->mmap_vma), vma); ++ delete_pma(&(enclave->pma_list), pma); ++ unmap((uintptr_t*)(enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); ++ vma->va_start = call_arg->req_vaddr; ++ vma->va_end = vma->va_start + pma->size - RISCV_PGSIZE; ++ vma->vm_next = NULL; ++ pma->pm_next = NULL; ++ if(insert_vma(&(caller_enclave->mmap_vma), vma, ENCLAVE_DEFAULT_MMAP_BASE) < 0) ++ { ++ vma->va_end = caller_enclave->mmap_vma->va_start; ++ vma->va_start = vma->va_end - (pma->size - RISCV_PGSIZE); ++ vma->vm_next = caller_enclave->mmap_vma; ++ caller_enclave->mmap_vma = vma; ++ } ++ insert_pma(&(caller_enclave->pma_list), pma); ++ mmap((uintptr_t*)(caller_enclave->root_page_table), &(caller_enclave->free_pages), vma->va_start, pma->paddr + RISCV_PGSIZE, pma->size - RISCV_PGSIZE); ++ call_arg->req_vaddr = vma->va_start; ++ } ++ ++restore_resp_addr: ++ if(!ret_arg.resp_vaddr || ret_arg.resp_vaddr & (RISCV_PGSIZE-1) ++ || ret_arg.resp_size < RISCV_PGSIZE || ret_arg.resp_size & (RISCV_PGSIZE-1)) ++ { ++ call_arg->resp_vaddr = 0; ++ call_arg->resp_size = 0; ++ goto restore_return_val; ++ } ++ vma = find_vma(enclave->mmap_vma, ret_arg.resp_vaddr, ret_arg.resp_size); ++ if(!vma) ++ { ++ //enclave return even when the shared mem return failed ++ call_arg->resp_vaddr = 0; ++ call_arg->resp_size = 0; ++ goto restore_return_val; ++ } ++ pma = vma->pma; ++ delete_vma(&(enclave->mmap_vma), vma); ++ delete_pma(&(enclave->pma_list), pma); ++ unmap((uintptr_t*)(enclave->root_page_table), vma->va_start, vma->va_end - vma->va_start); ++ vma->vm_next = NULL; ++ pma->pm_next = NULL; ++ if(caller_enclave->mmap_vma) ++ vma->va_end = caller_enclave->mmap_vma->va_start; ++ else ++ vma->va_end = ENCLAVE_DEFAULT_MMAP_BASE; ++ vma->va_start = vma->va_end - (pma->size - RISCV_PGSIZE); ++ vma->vm_next = caller_enclave->mmap_vma; ++ caller_enclave->mmap_vma = vma; ++ insert_pma(&(caller_enclave->pma_list), pma); ++ mmap((uintptr_t*)(caller_enclave->root_page_table), &(caller_enclave->free_pages), vma->va_start, pma->paddr + RISCV_PGSIZE, pma->size - RISCV_PGSIZE); ++ call_arg->resp_vaddr = vma->va_start; ++ call_arg->resp_size = ret_arg.resp_size; ++ ++ //pass return value of server ++restore_return_val: ++ call_arg->resp_val = ret_arg.resp_val; ++ enclave->state = RUNNABLE; ++ ret = 0; ++out: ++ release_enclave_metadata_lock(); ++ return ret; ++} ++ ++/**************************************************************/ ++/* called when irq */ ++/**************************************************************/ ++/** ++ * \brief Handle the time interrupt for enclave. ++ * ++ * \param regs The enclave register context. ++ * \param mcause CSR register of mcause. ++ * \param mepc CSR register of the mepc. ++ */ ++uintptr_t do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) ++{ ++ uintptr_t retval = 0; ++ unsigned int eid = get_curr_enclave_id(); ++ struct enclave_t *enclave = NULL; ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_enclave(eid); ++ if(!enclave || enclave->state != RUNNING) ++ { ++ sbi_bug("M mode: do_timer_irq: something is wrong with enclave%d, state: %d\n", eid, enclave->state); ++ retval = -1UL; ++ goto timer_irq_out; ++ } ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = RUNNABLE; ++ ++timer_irq_out: ++ release_enclave_metadata_lock(); ++ ++ csr_read_clear(CSR_MIE, MIP_MTIP); ++ csr_read_set(CSR_MIP, MIP_STIP); ++ regs[10] = ENCLAVE_TIMER_IRQ; ++ retval = ENCLAVE_TIMER_IRQ; ++ return retval; ++} ++ ++/** ++ * \brief Handle the yield for enclave. ++ * ++ * \param regs The enclave register context. ++ * \param mcause CSR register of mcause. ++ * \param mepc CSR register of the mepc. ++ */ ++uintptr_t do_yield(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) ++{ ++ uintptr_t retval = 0; ++ unsigned int eid = get_curr_enclave_id(); ++ struct enclave_t *enclave = NULL; ++ ++ acquire_enclave_metadata_lock(); ++ ++ enclave = __get_enclave(eid); ++ if(!enclave || enclave->state != RUNNING) ++ { ++ sbi_bug("M mode: do_yield: something is wrong with enclave%d\n", eid); ++ retval = -1UL; ++ goto timer_irq_out; ++ } ++ ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = RUNNABLE; ++ ++timer_irq_out: ++ release_enclave_metadata_lock(); ++ retval = ENCLAVE_YIELD; ++ return retval; ++} ++ ++/** ++ * \brief IPI notifaction for destroy enclave. ++ * ++ * \param regs The enclave register context. ++ * \param host_ptbr host ptbr register. ++ * \param eid The enclave id. ++ */ ++uintptr_t ipi_destroy_enclave(uintptr_t *regs, uintptr_t host_ptbr, int eid) ++{ ++ uintptr_t ret = 0; ++ struct enclave_t* enclave = NULL; ++ struct pm_area_struct* pma = NULL; ++ int need_free_enclave_memory = 0; ++ ++ // TODO acquire the enclave metadata lock ++ // acquire_enclave_metadata_lock(); ++ // printm("M mode: ipi_destroy_enclave %d\r\n", eid); ++ ++ enclave = __get_enclave(eid); ++ unsigned long mm_arg_paddr[RELAY_PAGE_NUM]; ++ unsigned long mm_arg_size[RELAY_PAGE_NUM]; ++ for(int kk = 0; kk < RELAY_PAGE_NUM; kk++) ++ { ++ mm_arg_paddr[kk] = enclave->mm_arg_paddr[kk]; ++ mm_arg_size[kk] = enclave->mm_arg_size[kk]; ++ } ++ ++ //enclave may have exited or even assigned to other host ++ //after ipi sender release the enclave_metadata_lock ++ if(!enclave || enclave->state < FRESH) ++ { ++ ret = -1; ++ sbi_bug("M mode: ipi_stop_enclave: enclave is not existed!\r\n"); ++ goto ipi_stop_enclave_out; ++ } ++ ++ //this situation should never happen ++ if(enclave->state == RUNNING ++ && (check_in_enclave_world() < 0 || cpus[csr_read(CSR_MHARTID)].eid != eid)) ++ { ++ sbi_bug("[ERROR] M mode: ipi_stop_enclave: this situation should never happen!\r\n"); ++ ret = -1; ++ goto ipi_stop_enclave_out; ++ } ++ ++ if(enclave->state == RUNNING) ++ { ++ swap_from_enclave_to_host(regs, enclave); ++ //regs[10] = ENCLAVE_DESTROYED; ++ regs[10] = 0; ++ } ++ pma = enclave->pma_list; ++ need_free_enclave_memory = 1; ++ __free_enclave(eid); ++ ++ipi_stop_enclave_out: ++ // release_enclave_metadata_lock(); ++ ++ if(need_free_enclave_memory) ++ { ++ free_enclave_memory(pma); ++ free_all_relay_page(mm_arg_paddr, mm_arg_size); ++ } ++ regs[10] = 0; ++ regs[11] = 0; ++ return ret; ++} ++ ++/** ++ * \brief Enclave call read in the runtime, ocall to the host to handle. ++ * ++ * \param regs The enclave register context. ++ */ ++uintptr_t enclave_read_sec(uintptr_t *regs, uintptr_t sec){ ++ uintptr_t ret = 0; ++ int eid = get_curr_enclave_id(); ++ struct enclave_t *enclave = NULL; ++ if(check_in_enclave_world() < 0){ ++ return -1; ++ } ++ acquire_enclave_metadata_lock(); ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave) != 0 || enclave->state != RUNNING){ ++ ret = -1; ++ goto out; ++ } ++ copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_READ_SECT); ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg0, sec); ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = OCALLING; ++ ret = ENCLAVE_OCALL; ++ ++out: ++ release_enclave_metadata_lock(); ++ return ret; ++ ++} ++ ++/** ++ * \brief Enclave call write() in the runtime, ocall to the host to handle. ++ * ++ * \param regs The enclave register context. ++ */ ++uintptr_t enclave_write_sec(uintptr_t *regs, uintptr_t sec){ ++ uintptr_t ret = 0; ++ int eid = get_curr_enclave_id(); ++ struct enclave_t *enclave = NULL; ++ if(check_in_enclave_world() < 0){ ++ return -1; ++ } ++ acquire_enclave_metadata_lock(); ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave) != 0|| enclave->state != RUNNING){ ++ ret = -1; ++ goto out; ++ } ++ copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_WRITE_SECT); ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg0,sec); ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = OCALLING; ++ ret = ENCLAVE_OCALL; ++ ++out: ++ release_enclave_metadata_lock(); ++ return ret; ++} ++ ++/** ++ * \brief Enclave return the relay page in the runtime, ocall to the host to handle. ++ * ++ * \param regs The enclave register context. ++ */ ++uintptr_t enclave_return_relay_page(uintptr_t *regs) ++{ ++ uintptr_t ret = 0; ++ int eid = get_curr_enclave_id(); ++ struct enclave_t *enclave = NULL; ++ if(check_in_enclave_world() < 0){ ++ return -1; ++ } ++ acquire_enclave_metadata_lock(); ++ enclave = __get_enclave(eid); ++ if(!enclave || check_enclave_authentication(enclave) != 0|| enclave->state != RUNNING){ ++ ret = -1; ++ sbi_bug("M mode: enclave_return_relay_page: check enclave is failed\n"); ++ goto out; ++ } ++ ++ copy_dword_to_host((uintptr_t*)enclave->ocall_func_id, OCALL_RETURN_RELAY_PAGE); ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg0,enclave->mm_arg_paddr[0]); ++ copy_dword_to_host((uintptr_t*)enclave->ocall_arg1,enclave->mm_arg_size[0]); ++ //remap the relay page for host ++ for(int kk = 0; kk < RELAY_PAGE_NUM; kk++) ++ { ++ if (enclave->mm_arg_paddr[kk]) ++ { ++ __free_secure_memory(enclave->mm_arg_paddr[kk], enclave->mm_arg_size[kk]); ++ __free_relay_page_entry(enclave->mm_arg_paddr[kk], enclave->mm_arg_size[kk]); ++ unmap((uintptr_t*)(enclave->root_page_table), ENCLAVE_DEFAULT_MM_ARG_BASE, enclave->mm_arg_size[kk]); ++ } ++ } ++ swap_from_enclave_to_host(regs, enclave); ++ enclave->state = OCALLING; ++ ret = ENCLAVE_OCALL; ++ ++out: ++ release_enclave_metadata_lock(); ++ return ret; ++} +diff --git a/lib/sbi/sm/enclave_mm.c b/lib/sbi/sm/enclave_mm.c +new file mode 100644 +index 0000000..801fb70 +--- /dev/null ++++ b/lib/sbi/sm/enclave_mm.c +@@ -0,0 +1,195 @@ ++#include "sm/sm.h" ++#include "sm/enclave.h" ++#include "sm/enclave_vm.h" ++#include "sm/enclave_mm.h" ++#include "sbi/riscv_atomic.h" ++#include "sbi/sbi_math.h" ++ ++// mm_region_list maintains the (free?) secure pages in monitor ++static struct mm_region_list_t *mm_region_list; ++static spinlock_t mm_regions_lock = SPINLOCK_INIT; ++extern spinlock_t mbitmap_lock; ++ ++ ++/** ++ * \brief This function will turn a set of untrusted pages to secure pages. ++ * Frist, it will valiated the range is valid. ++ * Then, it ensures the pages are untrusted/public now. ++ * Afterthat, it updates the metadata of the pages into secure (or private). ++ * Last, it unmaps the pages from the host PTEs. ++ * ++ * FIXME: we should re-consider the order of the last two steps. ++ * ++ * \param paddr the check physical address. ++ * \param size the check physical size ++ */ ++int check_and_set_secure_memory(unsigned long paddr, unsigned long size) ++{ ++ int ret = 0; ++ if(paddr & (RISCV_PGSIZE-1) || size < RISCV_PGSIZE || size & (RISCV_PGSIZE-1)) ++ { ++ ret = -1; ++ return ret; ++ } ++ ++ spin_lock(&mbitmap_lock); ++ ++ if(test_public_range(PADDR_TO_PFN(paddr), size >> RISCV_PGSHIFT) != 0) ++ { ++ ret = -1; ++ goto out; ++ } ++ set_private_range(PADDR_TO_PFN(paddr), size >> RISCV_PGSHIFT); ++ unmap_mm_region(paddr, size); ++ ++out: ++ spin_unlock(&mbitmap_lock); ++ return ret; ++} ++ ++/** ++ * \brief Free a set of secure pages. ++ * It turn the secure pgaes into unsecure (or public) ++ * and remap all the pages back to host's PTEs. ++ * ++ * \param paddr The free physical address. ++ * \param size The free memory size. ++ */ ++int __free_secure_memory(unsigned long paddr, unsigned long size) ++{ ++ int ret = 0; ++ ++ set_public_range(PADDR_TO_PFN(paddr), size >> RISCV_PGSHIFT); ++ remap_mm_region(paddr, size); ++ return ret; ++} ++ ++/** ++ * \brief Free a set of secure pages. ++ * It turn the secure pgaes into unsecure (or public) ++ * and remap all the pages back to host's PTEs. ++ * ++ * \param paddr The free physical address. ++ * \param size The free memory size. ++ */ ++int free_secure_memory(unsigned long paddr, unsigned long size) ++{ ++ int ret = 0; ++ spin_lock(&mbitmap_lock); ++ ++ set_public_range(PADDR_TO_PFN(paddr), size >> RISCV_PGSHIFT); ++ remap_mm_region(paddr, size); ++ ++ spin_unlock(&mbitmap_lock); ++ return ret; ++} ++ ++/** ++ * \brief mm_init adds a new range into mm_region_list for monitor/enclaves to use. ++ * ++ * \param paddr The init physical address. ++ * \param size The init memory size. ++ */ ++uintptr_t mm_init(uintptr_t paddr, unsigned long size) ++{ ++ uintptr_t ret = 0; ++ spin_lock(&mm_regions_lock); ++ ++ if(size < RISCV_PGSIZE || (paddr & (RISCV_PGSIZE-1)) || (size & (RISCV_PGSIZE-1))) ++ { ++ ret = -1; ++ goto out; ++ } ++ ++ if(check_and_set_secure_memory(paddr, size) != 0) ++ { ++ ret = -1; ++ goto out; ++ } ++ ++ struct mm_region_list_t* list = (struct mm_region_list_t*)paddr; ++ list->paddr = paddr; ++ list->size = size; ++ list->next = mm_region_list; ++ mm_region_list = list; ++ ++out: ++ spin_unlock(&mm_regions_lock); ++ return ret; ++} ++ ++/** ++ * \brief mm_alloc returns a memory region ++ * The returned memory size is put into resp_size, and the addr in return value. ++ * ++ * \param req_size The request memory size. ++ * \param resp_size The response memory size. ++ */ ++void* mm_alloc(unsigned long req_size, unsigned long *resp_size) ++{ ++ void* ret = NULL; ++ spin_lock(&mm_regions_lock); ++ ++ if(!mm_region_list) ++ { ++ ret = NULL; ++ goto out; ++ } ++ ++ ret = (void*)(mm_region_list->paddr); ++ *resp_size = mm_region_list->size; ++ mm_region_list = mm_region_list->next; ++ ++out: ++ spin_unlock(&mm_regions_lock); ++ return ret; ++} ++ ++/** ++ * \brief mm_free frees a memory region back to mm_region_list. ++ * ++ * \param paddr The physical address need to be reclaimed. ++ * \param size The reclaimed memory size. ++ */ ++int mm_free(void* paddr, unsigned long size) ++{ ++ int ret = 0; ++ spin_lock(&mm_regions_lock); ++ ++ if(size < RISCV_PGSIZE || ((uintptr_t)paddr & (RISCV_PGSIZE-1)) != 0) ++ { ++ ret = -1; ++ goto out; ++ } ++ ++ struct mm_region_list_t* list = (struct mm_region_list_t*)paddr; ++ list->paddr = (uintptr_t)paddr; ++ list->size = size; ++ list->next = mm_region_list; ++ mm_region_list = list; ++ ++out: ++ spin_unlock(&mm_regions_lock); ++ return ret; ++} ++ ++/** ++ * \brief grant enclave access to enclave's memory, it's an empty function now. ++ * ++ * \param paddr The physical address need to be reclaimed. ++ * \param size The reclaimed memory size. ++ */ ++int grant_enclave_access(struct enclave_t* enclave) ++{ ++ return 0; ++} ++ ++/** ++ * \brief It's an empty function now. ++ * ++ * \param enclave The current enclave. ++ */ ++int retrieve_enclave_access(struct enclave_t *enclave) ++{ ++ return 0; ++} +diff --git a/lib/sbi/sm/enclave_vm.c b/lib/sbi/sm/enclave_vm.c +new file mode 100644 +index 0000000..8069af0 +--- /dev/null ++++ b/lib/sbi/sm/enclave_vm.c +@@ -0,0 +1,596 @@ ++#include "sm/vm.h" ++// #include "mtrap.h" ++#include "sm/enclave_vm.h" ++#include "sbi/riscv_encoding.h" ++#include "sbi/sbi_console.h" ++ ++//get the ppn from an pte entry ++static uintptr_t pte_ppn(pte_t pte) ++{ ++ return pte >> PTE_PPN_SHIFT; ++} ++ ++/** ++ * \brief internal functions of check_enclave_layout, it will recursively check the region ++ * ++ * \param page_table is an PT page (physical addr), it could be non-root PT page ++ * \param vaddr is the start virtual addr of the PTE in page_table ++ * \param level is the PT page level of page_table ++ */ ++static int __check_enclave_layout(uintptr_t page_table, uintptr_t va_start, uintptr_t va_end, uintptr_t pa_start, uintptr_t pa_end, uintptr_t vaddr, int level) ++{ ++ if(level < 0) ++ { ++ return -1; ++ } ++ ++ uintptr_t* pte = (uintptr_t*)page_table; ++ uintptr_t region_size = RISCV_PGSIZE * (1 << (level*RISCV_PGLEVEL_BITS)); ++ for(int i=0; i < (RISCV_PGSIZE/sizeof(uintptr_t)); ++i) ++ { ++ uintptr_t addr0 = vaddr + i*region_size; ++ uintptr_t addr1 = addr0 + region_size; ++ if(addr1 <= va_start || addr0 >= va_end) ++ { ++ continue; ++ } ++ ++ if(PTE_VALID(pte[i])) ++ { ++ if(PTE_ILLEGAL(pte[i])) ++ { ++ return -1; ++ } ++ ++ addr0 = PTE_TO_PFN(pte[i]) << RISCV_PGSHIFT; ++ addr1 = addr0 + region_size; ++ if(IS_LEAF_PTE(pte[i])) ++ { ++ if(!(addr0 >= pa_start && addr1 <= pa_end)) ++ { ++ return -1; ++ } ++ } ++ else if(__check_enclave_layout(PTE_TO_PFN(pte[i]) << RISCV_PGSHIFT, va_start, va_end, ++ pa_start, pa_end, addr0, level-1)) ++ { ++ return -1; ++ } ++ } ++ } ++ return 0; ++} ++ ++/** ++ * \brief check whether a VM region is mapped (only) to a PM region ++ * ++ * \param root_page_table is the root of the pgae table ++ * \param va_start is the start of the VM region ++ * \param va_end is the end of the VM region ++ * \param pa_start is the start of the PM region ++ * \param pa_end is the end of the PM region ++ */ ++int check_enclave_layout(uintptr_t root_page_table, uintptr_t va_start, uintptr_t va_end, uintptr_t pa_start, uintptr_t pa_end) ++{ ++ return __check_enclave_layout(root_page_table, va_start, va_end, pa_start, pa_end, 0, RISCV_PGLEVELS-1); ++} ++ ++/** ++ * \brief The auxiliary function for traverse_vams(). ++ * ++ * \param page_table The traversed page table. ++ * \param vma_list The vma list for the enclave. ++ * \param vma_num Pointer, the vma number in the vma list. ++ * \param va_start Pointer, the given / return value for the start of the virtual address. ++ * \param va_end Pointer, the given / return value for the end of the virtual address. ++ * \param vaddr The temporary / init virtual address. ++ */ ++static void __traverse_vmas(uintptr_t page_table, struct vm_area_struct *vma_list, int *vma_num, uintptr_t *va_start, uintptr_t *va_end, uintptr_t vaddr, int level) ++{ ++ if(level < 0) ++ { ++ return; ++ } ++ ++ uintptr_t *pte= (uintptr_t*)page_table; ++ uintptr_t region_size = RISCV_PGSIZE * (1 << (level*RISCV_PGLEVEL_BITS)); ++ for(int i = 0; i < (RISCV_PGSIZE / sizeof(uintptr_t)); ++i) ++ { ++ if(!PTE_VALID(pte[i])) ++ { ++ if((*va_start) && (*va_end)) ++ { ++ vma_list[*vma_num].va_start = *va_start; ++ vma_list[*vma_num].va_end = *va_end; ++ vma_list[*vma_num].vm_next = (struct vm_area_struct*)(&vma_list[*vma_num + 1]); ++ *va_start = 0; ++ *va_end = 0; ++ *vma_num += 1; ++ } ++ continue; ++ } ++ ++ if(IS_LEAF_PTE(pte[i])) ++ { ++ if(!(*va_start)) ++ { ++ *va_start = vaddr + i*region_size; ++ } ++ *va_end = vaddr + (i+1)*region_size; ++ } ++ else ++ { ++ __traverse_vmas(PTE_TO_PFN(pte[i]) << RISCV_PGSHIFT, vma_list, vma_num, ++ va_start, va_end, vaddr + i*region_size, level-1); ++ } ++ } ++ ++ if(level == (RISCV_PGLEVELS-1) && (*va_start) && (*va_end)) ++ { ++ vma_list[*vma_num].va_start = *va_start; ++ vma_list[*vma_num].va_end = *va_end; ++ vma_list[*vma_num].vm_next = 0; ++ *va_start = 0; ++ *va_end = 0; ++ *vma_num += 1; ++ } ++ else if(level == (RISCV_PGLEVELS-1) && *vma_num) ++ { ++ vma_list[*vma_num - 1].vm_next = 0; ++ } ++} ++ ++/** ++ * \brief This traverse the vma list, check and set a new vma list. should only be called during create_enclave as two vma may be mistakely regarded as one ++ * after monitor map new pages for enclave. ++ * ++ * \param root_page_table The enclave root page table. ++ * \param vma_list The vma list for the enclave. ++ */ ++void traverse_vmas(uintptr_t root_page_table, struct vm_area_struct *vma_list) ++{ ++ uintptr_t va_start = 0; ++ uintptr_t va_end = 0; ++ int vma_num = 0; ++ __traverse_vmas(root_page_table, vma_list, &vma_num, &va_start, &va_end, 0, RISCV_PGLEVELS-1); ++} ++ ++/** ++ * \brief The auxiliary function for the va_to_pa(). ++ * ++ * \param page_table The traversed page table. ++ * \param va Pointer, given virtual address. ++ * \param level The page table level. ++ */ ++void* __va_to_pa(uintptr_t* page_table, uintptr_t *va, int level) ++{ ++ if(!page_table || level<0) ++ return NULL; ++ ++ uintptr_t page_size_bits = RISCV_PGSHIFT + level*RISCV_PGLEVEL_BITS; ++ uintptr_t pos = (((uintptr_t)va) >> page_size_bits) & ((1<va_end > up_bound) ++ return -1; ++ ++ struct vm_area_struct* first_vma = *vma_list; ++ if(!first_vma || (first_vma->va_start >= vma->va_end)) ++ { ++ vma->vm_next = first_vma; ++ *vma_list = vma; ++ return 0; ++ } ++ ++ int found = 0; ++ struct vm_area_struct* second_vma = first_vma->vm_next; ++ while(second_vma) ++ { ++ if((first_vma->va_end <= vma->va_start) && (second_vma->va_start >= vma->va_end)) ++ { ++ vma->vm_next = second_vma; ++ first_vma->vm_next = vma; ++ found = 1; ++ break; ++ } ++ first_vma = second_vma; ++ second_vma = second_vma->vm_next; ++ } ++ if(!found) ++ { ++ if(first_vma && (first_vma->va_end <= vma->va_start)) ++ { ++ first_vma->vm_next = vma; ++ vma->vm_next = NULL; ++ return 0; ++ } ++ return -1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \brief Delete a vma structure in the vma list. ++ * ++ * \param vma_list Pointer of the vma_list (pointer), the enclave vma list. ++ * \param vma Pointer, the given vma structure. ++ */ ++int delete_vma(struct vm_area_struct **vma_list, struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *last_vma = (struct vm_area_struct*)(*vma_list); ++ if(last_vma->va_start <= vma->va_start && last_vma->va_end >= vma->va_end) ++ { ++ *vma_list = last_vma->vm_next; ++ vma->vm_next = NULL; ++ last_vma->vm_next = NULL; ++ return 0; ++ } ++ ++ struct vm_area_struct *cur_vma = last_vma->vm_next; ++ while(cur_vma) ++ { ++ if(cur_vma->va_start <= vma->va_start && cur_vma->va_end >= vma->va_end) ++ { ++ last_vma->vm_next = cur_vma->vm_next; ++ vma->vm_next = NULL; ++ cur_vma->vm_next = NULL; ++ return 0; ++ } ++ last_vma = cur_vma; ++ cur_vma = cur_vma->vm_next; ++ } ++ ++ return -1; ++} ++ ++/** ++ * \brief Find a vma structure in the vma list. ++ * ++ * \param vma_list Pointer, the enclave vma list. ++ * \param vaddr The given virtual address. ++ * \param size The vma size. ++ */ ++struct vm_area_struct* find_vma(struct vm_area_struct *vma_list, uintptr_t vaddr, uintptr_t size) ++{ ++ uintptr_t va_start = vaddr; ++ uintptr_t va_end = vaddr + size; ++ struct vm_area_struct *vma = vma_list; ++ while(vma) ++ { ++ if(vma->va_start <= va_start && vma->va_end >= va_end) ++ { ++ return vma; ++ } ++ vma = vma->vm_next; ++ } ++ return NULL; ++} ++ ++/** ++ * \brief Insert a pma structure in the pma list. ++ * ++ * \param pma_list Pointer of the pma_list (pointer), the enclave pma list. ++ * \param pma The given pma structure. ++ */ ++int insert_pma(struct pm_area_struct **pma_list, struct pm_area_struct *pma) ++{ ++ pma->pm_next = *pma_list; ++ *pma_list = pma; ++ return 0; ++} ++ ++/** ++ * \brief Delete a pma structure in the pma list. ++ * ++ * \param pma_list Pointer of the pma_list (pointer), the enclave pma list. ++ * \param pma The given pma structure. ++ */ ++int delete_pma(struct pm_area_struct **pma_list, struct pm_area_struct *pma) ++{ ++ struct pm_area_struct *last_pma = *pma_list; ++ if(last_pma->paddr == pma->paddr && last_pma->size == pma->size) ++ { ++ *pma_list = last_pma->pm_next; ++ pma->pm_next = NULL; ++ last_pma->pm_next = NULL; ++ return 0; ++ } ++ ++ struct pm_area_struct *cur_pma = last_pma->pm_next; ++ while(cur_pma) ++ { ++ if(cur_pma->paddr == pma->paddr && cur_pma->size == pma->size) ++ { ++ last_pma->pm_next = cur_pma->pm_next; ++ pma->pm_next = NULL; ++ cur_pma->pm_next = NULL; ++ return 0; ++ } ++ last_pma = cur_pma; ++ cur_pma = cur_pma->pm_next; ++ } ++ ++ return -1; ++} ++ ++/** ++ * \brief The auxiliary function of the pte_walk_create. ++ * ++ * \param page_table The given page table. ++ * \param free_pages Pointer of the free page page structure(pointer). ++ * \param va The given virtual address. ++ * \param level The page table level. ++ */ ++static uintptr_t *__pte_walk_create(uintptr_t *page_table, struct page_t **free_pages, uintptr_t va, int level) ++{ ++ uintptr_t pos = (va >> (RISCV_PGSHIFT + level*RISCV_PGLEVEL_BITS)) & ((1<paddr; ++ *free_pages = (*free_pages)->next; ++ page_table[pos] = pte_create(paddr>>RISCV_PGSHIFT, PTE_V); ++ } ++ return __pte_walk_create((uintptr_t*)(PTE_TO_PFN(page_table[pos]) << RISCV_PGSHIFT), ++ free_pages, va, level-1); ++} ++ ++/** ++ * \brief Walk the page table and create the pte entry. ++ * ++ * \param root_page_table The enclave root page table. ++ * \param free_pages Pointer of the free page page structure(pointer). ++ * \param va The given virtual address. ++ */ ++static uintptr_t *pte_walk_create(uintptr_t *root_page_table, struct page_t **free_pages, uintptr_t va) ++{ ++ return __pte_walk_create(root_page_table, free_pages, va, RISCV_PGLEVELS-1); ++} ++ ++/** ++ * \brief The auxiliary function of the pte_walk. ++ * ++ * \param page_table The given page table. ++ * \param va The given virtual address. ++ * \param level The page table level. ++ */ ++static uintptr_t *__pte_walk(uintptr_t *page_table, uintptr_t va, int level) ++{ ++ uintptr_t pos = (va >> (RISCV_PGSHIFT + level*RISCV_PGLEVEL_BITS)) & ((1<>RISCV_PGSHIFT, flag | PTE_V); ++ return 0; ++} ++ ++/** ++ * \brief Unmap on page for enclave. ++ * ++ * \param root_page_table The enclave root page table. ++ * \param va The mapped virtual address. ++ */ ++static int unmap_one_page(uintptr_t *root_page_table, uintptr_t va) ++{ ++ uintptr_t *pte = pte_walk(root_page_table, va); ++ if(!pte) ++ return -1; ++ *pte = 0; ++ return 0; ++} ++ ++/** ++ * \brief Map a range of virtual address to the corresponding physical address. ++ * ++ * \param root_page_table The enclave root page table. ++ * \param free_pages Pointer of the free page page structure(pointer). ++ * \param vaddr The mapped virtual address. ++ * \param paddr The mapped physical address. ++ * \param size The mapped range size. ++ */ ++int mmap(uintptr_t* root_page_table, struct page_t **free_pages, uintptr_t vaddr, uintptr_t paddr, uintptr_t size) ++{ ++ uintptr_t va = vaddr; ++ uintptr_t pa = paddr; ++ uintptr_t va_end = vaddr + size; ++ while(va < va_end) ++ { ++ if(map_one_page(root_page_table, free_pages, va, pa, PTE_D | PTE_A | PTE_R | PTE_W | PTE_U | PTE_V) != 0) ++ { ++ sbi_bug("M mode: mmap: map one page is failed\n"); ++ return -1; ++ } ++ va += RISCV_PGSIZE; ++ pa += RISCV_PGSIZE; ++ } ++ return 0; ++} ++ ++/** ++ * \brief Unap a range of virtual address to the corresponding physical address. ++ * ++ * \param root_page_table The enclave root page table. ++ * \param vaddr The unmapped virtual address. ++ * \param size The unmapped range size. ++ */ ++int unmap(uintptr_t* root_page_table, uintptr_t vaddr, uintptr_t size) ++{ ++ uintptr_t va = vaddr; ++ uintptr_t va_end = vaddr + size; ++ while(va < va_end) ++ { ++ unmap_one_page(root_page_table, va); ++ va += RISCV_PGSIZE; ++ } ++ return 0; ++} ++ ++/** ++ * \brief Copy the page table entry. ++ * ++ * \param page_table The given page table. ++ * \param free_pages Pointer of the free page page structure(pointer). ++ * \param level The page table level. ++ * \param copy_page The copied page table page. ++ */ ++int __copy_page_table(pte_t* page_table, struct page_t ** free_page, int level, pte_t* copy_page) ++{ ++ pte_t* t = page_table; ++ pte_t* c_t = copy_page; ++ int i,ret; ++ if(level >= 0) ++ { ++ for (i = 0; i < (1< 0) && (t[i] & PTE_V)) || ++ ((level == 0) && (t[i] & PTE_V) && (t[i] & PTE_W))) ++ { ++ pte_t* next_copy_page_table; ++ pte_t* next_page_table; ++ if ((*free_page) == NULL) ++ return -1; ++ uintptr_t free_ppn = ((*free_page)->paddr) >> RISCV_PGSHIFT; ++ *free_page = (*free_page)->next; ++ c_t[i] = ptd_create(free_ppn); ++ c_t[i] = c_t[i] | (t[i] & 0x3ff); ++ next_copy_page_table = (pte_t*) (pte_ppn(c_t[i]) << RISCV_PGSHIFT); ++ next_page_table = (pte_t*) (pte_ppn(t[i]) << RISCV_PGSHIFT); ++ ret = __copy_page_table(next_page_table, free_page, level-1, next_copy_page_table); ++ if (ret < 0) ++ return -1; ++ } ++ else if((level == 0) && (t[i] & PTE_V) && (!(t[i] & PTE_W))) ++ { ++ c_t[i] = t[i]; ++ } ++ } ++ } ++ else ++ { ++ sbi_memcpy(c_t , t, RISCV_PGSIZE); ++ } ++ return 0; ++} ++ ++/** ++ * \brief Map an empty page table. ++ * ++ * \param root_page_table The enclave root page table. ++ * \param free_pages Pointer of the free page page structure(pointer). ++ * \param vaddr The virtual address for the empty page. ++ * \param size The empty page size. ++ */ ++int map_empty_page(uintptr_t* root_page_table, struct page_t **free_pages, uintptr_t vaddr, uintptr_t size) ++{ ++ uintptr_t va = vaddr; ++ uintptr_t va_end = vaddr + size; ++ while(va < va_end) ++ { ++ if ((*free_pages) == NULL) ++ return -1; ++ uintptr_t free_ppn = (*free_pages)->paddr; ++ *free_pages = (*free_pages)->next; ++ map_one_page(root_page_table, free_pages, va, free_ppn, PTE_R | PTE_W | PTE_U | PTE_V); ++ va += RISCV_PGSIZE; ++ } ++ return 0; ++} +diff --git a/lib/sbi/sm/gm/big.c b/lib/sbi/sm/gm/big.c +new file mode 100644 +index 0000000..b542ffe +--- /dev/null ++++ b/lib/sbi/sm/gm/big.c +@@ -0,0 +1,853 @@ ++#include "sm/gm/big.h" ++ ++typedef struct ++{ ++ u64 m_low; ++ u64 m_high; ++} uint128_t; ++ ++void vli_clear(u64 *vli, u8 ndigits) ++{ ++ int i; ++ ++ for(i = 0; i < ndigits; ++i){ ++ vli[i] = 0; ++ } ++} ++ ++/* Returns true if vli == 0, false otherwise. */ ++int vli_is_zero(u64 *vli, u8 ndigits) ++{ ++ int i; ++ ++ for(i = 0; i < ndigits; ++i){ ++ if (vli[i]) ++ return 0; ++ } ++ ++ return 1; ++} ++ ++/* Returns nonzero if bit bit of vli is set. */ ++u64 vli_test_bit(u64 *vli, u8 bit, u8 ndigits) ++{ ++ return (vli[bit/64] & ((u64)1 << (bit % 64))); ++} ++ ++/* Counts the number of 64-bit "digits" in vli. */ ++u32 vli_num_digits(u64 *vli, u8 ndigits) ++{ ++ int i; ++ /* Search from the end until we find a non-zero digit. ++ * We do it in reverse because we expect that most digits will ++ * be nonzero. ++ */ ++ for(i = ndigits - 1; i >= 0 && vli[i] == 0; --i); ++ ++ return (i + 1); ++} ++ ++/* Counts the number of bits required for vli. */ ++u32 vli_num_bits(u64 *vli, u8 ndigits) ++{ ++ u32 i, num_digits; ++ u64 digit; ++ ++ num_digits = vli_num_digits(vli, ndigits); ++ if(num_digits == 0) ++ return 0; ++ ++ digit = vli[num_digits - 1]; ++ for(i = 0; digit; ++i) ++ digit >>= 1; ++ ++ return ((num_digits - 1) * 64 + i); ++} ++ ++/* Sets dest = src. */ ++void vli_set(u64 *dest, u64 *src, u8 ndigits) ++{ ++ u32 i; ++ ++ for(i = 0; i < ndigits; ++i) ++ dest[i] = src[i]; ++} ++ ++/* Returns sign of left - right. */ ++int vli_cmp(u64 *left, u64 *right, u8 ndigits) ++{ ++ int i; ++ ++ for(i = ndigits - 1; i >= 0; --i){ ++ if(left[i] > right[i]) ++ return 1; ++ else if (left[i] < right[i]) ++ return -1; ++ } ++ return 0; ++} ++ ++/* Computes result = in << c, returning carry. Can modify in place ++ * (if result == in). 0 < shift < 64. ++ */ ++u64 vli_lshift(u64 *result, u64 *in, u32 shift, u8 ndigits) ++{ ++ u64 carry = 0; ++ int i; ++ ++ for(i = 0; i < ndigits; ++i){ ++ u64 temp = in[i]; ++ result[i] = (temp << shift) | carry; ++ carry = shift ? temp >> (64 - shift) : 0; ++ } ++ ++ return carry; ++} ++ ++/* Computes result = in >> c, returning carry. Can modify in place ++ * (if result == in). 0 < shift < 64. ++ */ ++u64 vli_rshift(u64 *result, u64 *in, u32 shift, u8 ndigits) ++{ ++ u64 carry = 0; ++ int i; ++ ++ for(i = ndigits -1; i >= 0; --i){ ++ u64 temp = in[i]; ++ result[i] = (temp >> shift) | carry; ++ carry = shift ? temp << (64 - shift) : 0; ++ } ++ ++ return carry; ++} ++ ++/* Computes result = left + right, returning carry. Can modify in place. */ ++u64 vli_add(u64 *result, u64 *left, u64 *right, u8 ndigits) ++{ ++ u64 carry = 0; ++ u32 i; ++ ++ for(i = 0; i < ndigits; ++i){ ++ u64 sum; ++ ++ sum = left[i] + right[i] + carry; ++ if(sum != left[i]){ ++ carry = (sum < left[i]); ++ } ++ result[i] = sum; ++ } ++ ++ return carry; ++} ++ ++/* Computes result = left - right, returning borrow. Can modify in place. */ ++u64 vli_sub(u64 *result, u64 *left, u64 *right, u8 ndigits) ++{ ++ u64 borrow = 0; ++ int i; ++ ++ for(i = 0; i < ndigits; ++i){ ++ u64 diff; ++ ++ diff = left[i] - right[i] - borrow; ++ if (diff != left[i]) ++ borrow = (diff > left[i]); ++ ++ result[i] = diff; ++ } ++ ++ return borrow; ++} ++ ++static uint128_t mul_64_64(u64 left, u64 right) ++{ ++ u64 a0 = left & 0xffffffffull; ++ u64 a1 = left >> 32; ++ u64 b0 = right & 0xffffffffull; ++ u64 b1 = right >> 32; ++ u64 m0 = a0 * b0; ++ u64 m1 = a0 * b1; ++ u64 m2 = a1 * b0; ++ u64 m3 = a1 * b1; ++ uint128_t result; ++ ++ m2 += (m0 >> 32); ++ m2 += m1; ++ ++ /* Overflow */ ++ if (m2 < m1) ++ m3 += 0x100000000ull; ++ ++ result.m_low = (m0 & 0xffffffffull) | (m2 << 32); ++ result.m_high = m3 + (m2 >> 32); ++ ++ return result; ++} ++ ++static uint128_t add_128_128(uint128_t a, uint128_t b) ++{ ++ uint128_t result; ++ ++ result.m_low = a.m_low + b.m_low; ++ result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low); ++ ++ return result; ++} ++ ++static u64 vli_add_digit_mul(u64 *result, u64 *b, u64 c, u64 *d, u8 digits) ++{ ++ uint128_t mul; ++ u64 carry; ++ u32 i; ++ ++ if(c == 0) ++ return 0; ++ ++ carry = 0; ++ for (i = 0; i < digits; i++) { ++ mul = mul_64_64(c, d[i]); ++ if((result[i] = b[i] + carry) < carry){ ++ carry = 1; ++ } ++ else{ ++ carry = 0; ++ } ++ if((result[i] += mul.m_low) < mul.m_low){ ++ carry++; ++ } ++ carry += mul.m_high; ++ } ++ ++ return carry; ++} ++ ++void bn_mult(u64 *result, u64 *left, u64 *right, u8 ndigits) ++{ ++ u64 t[2*ndigits]; ++ u32 bdigits, cdigits, i; ++ ++ vli_clear(t, 2*ndigits); ++ ++ bdigits = vli_num_digits(left, ndigits); ++ cdigits = vli_num_digits(right, ndigits); ++ ++ for(i=0; i> BN_DIGIT_BITS) & BN_MAX_DIGIT; ++ if((a[i] = b[i] - borrow) > (BN_MAX_DIGIT - borrow)){ ++ borrow = 1; ++ }else{ ++ borrow = 0; ++ } ++ if((a[i] -= rl) > (BN_MAX_DIGIT - rl)){ ++ borrow++; ++ } ++ borrow += rh; ++ } ++ ++ return borrow; ++} ++ ++static u32 bn_digit_bits(u32 a) ++{ ++ u32 i; ++ ++ for(i = 0; i< sizeof(a) * 8; i++){ ++ if(a == 0) ++ break; ++ a >>= 1; ++ } ++ ++ return i; ++} ++ ++void bn_div(u32 *a, u32 *b, u32 *c, u32 cdigits, u32 *d, u32 ddigits) ++{ ++ u32 ai, t, cc[cdigits+1], dd[cdigits/2]; ++ u32 dddigits, shift; ++ u64 tmp; ++ int i; ++ ++ dddigits = ddigits; ++ ++ shift = BN_DIGIT_BITS - bn_digit_bits(d[dddigits-1]); ++ vli_clear((u64*)cc, dddigits/2); ++ cc[cdigits] = vli_lshift((u64*)cc, (u64*)c, shift, cdigits/2); ++ vli_lshift((u64*)dd, (u64*)d, shift, dddigits/2); ++ t = dd[dddigits-1]; ++ ++ vli_clear((u64*)a, cdigits/2); ++ i = cdigits - dddigits; ++ for(; i>=0; i--){ ++ if(t == BN_MAX_DIGIT){ ++ ai = cc[i+dddigits]; ++ }else{ ++ tmp = cc[i+dddigits-1]; ++ tmp += (u64)cc[i+dddigits] << BN_DIGIT_BITS; ++ ai = tmp / (t + 1); ++ } ++ ++ cc[i+dddigits] -= vli_sub_digit_mult(&cc[i], &cc[i], ai, dd, dddigits); ++ while(cc[i+dddigits] || (vli_cmp((u64*)&cc[i], (u64*)dd, dddigits/2) >= 0)){ ++ ai++; ++ cc[i+dddigits] -= vli_sub((u64*)&cc[i], (u64*)&cc[i], (u64*)dd, dddigits/2); ++ } ++ a[i] = ai; ++ } ++ ++ vli_rshift((u64*)b, (u64*)cc, shift, dddigits/2); ++} ++ ++void vli_div(u64 *result, u64 *remainder, u64 *left, u64 cdigits, u64 *right, u8 ddigits) ++{ ++ bn_div((u32*)result, (u32*)remainder, (u32*)left, cdigits*2, (u32*)right, ddigits*2); ++} ++ ++void bn_mod(u64 *result, u64 *left, u64 *right, u8 ndigits) ++{ ++ u64 t[2*ndigits]; ++ ++ vli_div(t, result, left, ndigits*2, right, ndigits); ++} ++ ++void _vli_mult(u64 *result, u64 *left, u64 *right, u8 ndigits) ++{ ++ uint128_t r01 = { 0, 0 }; ++ u64 r2 = 0; ++ unsigned int i, k; ++ ++ /* Compute each digit of result in sequence, maintaining the ++ * carries. ++ */ ++ for(k = 0; k < ndigits * 2 - 1; k++){ ++ unsigned int min; ++ ++ if(k < ndigits) ++ min = 0; ++ else ++ min = (k + 1) - ndigits; ++ ++ for(i = min; i <= k && i < ndigits; i++){ ++ uint128_t product; ++ ++ product = mul_64_64(left[i], right[k - i]); ++ ++ r01 = add_128_128(r01, product); ++ r2 += (r01.m_high < product.m_high); ++ } ++ ++ result[k] = r01.m_low; ++ r01.m_low = r01.m_high; ++ r01.m_high = r2; ++ r2 = 0; ++ } ++ ++ result[ndigits * 2 - 1] = r01.m_low; ++} ++ ++void vli_mult(u64 *result, u64 *left, u64 *right, u8 ndigits) ++{ ++#if 1 ++ bn_mult(result, left, right, ndigits); ++#else ++ _vli_mult(result, left, right, ndigits); ++#endif ++} ++ ++void vli_square(u64 *result, u64 *left, u8 ndigits) ++{ ++ uint128_t r01 = { 0, 0 }; ++ u64 r2 = 0; ++ int i, k; ++ ++ for(k = 0; k < ndigits * 2 - 1; k++){ ++ unsigned int min; ++ ++ if(k < ndigits) ++ min = 0; ++ else ++ min = (k + 1) - ndigits; ++ ++ for(i = min; i <= k && i <= k - i; i++){ ++ uint128_t product; ++ ++ product = mul_64_64(left[i], left[k - i]); ++ ++ if(i < k - i){ ++ r2 += product.m_high >> 63; ++ product.m_high = (product.m_high << 1) | ++ (product.m_low >> 63); ++ product.m_low <<= 1; ++ } ++ ++ r01 = add_128_128(r01, product); ++ r2 += (r01.m_high < product.m_high); ++ } ++ ++ result[k] = r01.m_low; ++ r01.m_low = r01.m_high; ++ r01.m_high = r2; ++ r2 = 0; ++ } ++ ++ result[ndigits * 2 - 1] = r01.m_low; ++} ++ ++/* Computes result = (left + right) % mod. ++ Assumes that left < mod and right < mod, result != mod. */ ++void vli_mod_add(u64 *result, u64 *left, u64 *right, u64 *mod, u8 ndigits) ++{ ++ u64 carry; ++ ++ carry = vli_add(result, left, right, ndigits); ++ /* result > mod (result = mod + remainder), so subtract mod to ++ * get remainder. ++ */ ++ ++ if(carry || vli_cmp(result, mod, ndigits) >= 0){ ++ /* result > mod (result = mod + remainder), so subtract mod to get remainder. */ ++ vli_sub(result, result, mod, ndigits); ++ } ++} ++ ++/* Computes result = (left - right) % mod. ++ * Assumes that left < mod and right < mod, result != mod. ++ */ ++void vli_mod_sub(u64 *result, u64 *left, u64 *right, u64 *mod, u8 ndigits) ++{ ++ u64 borrow; ++ ++ borrow = vli_sub(result, left, right, ndigits); ++ /* In this case, result == -diff == (max int) - diff. ++ * Since -x % d == d - x, we can get the correct result from ++ * result + mod (with overflow). ++ */ ++ if(borrow) ++ vli_add(result, result, mod, ndigits); ++} ++ ++/* Computes result = product % curve_prime ++ * from http://www.nsa.gov/ia/_files/nist-routines.pdf ++ */ ++void vli_mmod_fast_nist_256(u64 *result, u64 *product, u64 *curve_prime, u8 ndigits) ++{ ++ u64 tmp[2 * ndigits]; ++ int carry; ++ ++ /* t */ ++ vli_set(result, product, ndigits); ++ ++ /* s1 */ ++ tmp[0] = 0; ++ tmp[1] = product[5] & 0xffffffff00000000ull; ++ tmp[2] = product[6]; ++ tmp[3] = product[7]; ++ carry = vli_lshift(tmp, tmp, 1, ndigits); ++ carry += vli_add(result, result, tmp, ndigits); ++ ++ /* s2 */ ++ tmp[1] = product[6] << 32; ++ tmp[2] = (product[6] >> 32) | (product[7] << 32); ++ tmp[3] = product[7] >> 32; ++ carry += vli_lshift(tmp, tmp, 1, ndigits); ++ carry += vli_add(result, result, tmp, ndigits); ++ ++ /* s3 */ ++ tmp[0] = product[4]; ++ tmp[1] = product[5] & 0xffffffff; ++ tmp[2] = 0; ++ tmp[3] = product[7]; ++ carry += vli_add(result, result, tmp, ndigits); ++ ++ /* s4 */ ++ tmp[0] = (product[4] >> 32) | (product[5] << 32); ++ tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull); ++ tmp[2] = product[7]; ++ tmp[3] = (product[6] >> 32) | (product[4] << 32); ++ carry += vli_add(result, result, tmp, ndigits); ++ ++ /* d1 */ ++ tmp[0] = (product[5] >> 32) | (product[6] << 32); ++ tmp[1] = (product[6] >> 32); ++ tmp[2] = 0; ++ tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32); ++ carry -= vli_sub(result, result, tmp, ndigits); ++ ++ /* d2 */ ++ tmp[0] = product[6]; ++ tmp[1] = product[7]; ++ tmp[2] = 0; ++ tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull); ++ carry -= vli_sub(result, result, tmp, ndigits); ++ ++ /* d3 */ ++ tmp[0] = (product[6] >> 32) | (product[7] << 32); ++ tmp[1] = (product[7] >> 32) | (product[4] << 32); ++ tmp[2] = (product[4] >> 32) | (product[5] << 32); ++ tmp[3] = (product[6] << 32); ++ carry -= vli_sub(result, result, tmp, ndigits); ++ ++ /* d4 */ ++ tmp[0] = product[7]; ++ tmp[1] = product[4] & 0xffffffff00000000ull; ++ tmp[2] = product[5]; ++ tmp[3] = product[6] & 0xffffffff00000000ull; ++ carry -= vli_sub(result, result, tmp, ndigits); ++ ++ if (carry < 0) { ++ do{ ++ carry += vli_add(result, result, curve_prime, ndigits); ++ }while(carry < 0); ++ } ++ else{ ++ while(carry || vli_cmp(curve_prime, result, ndigits) != 1){ ++ carry -= vli_sub(result, result, curve_prime, ndigits); ++ } ++ } ++} ++ ++void vli_mmod_fast_sm2_256(u64 *result, u64 *_product, u64 *mod, u8 ndigits) ++{ ++ u32 tmp1[8]; ++ u32 tmp2[8]; ++ u32 tmp3[8]; ++ u32 *product = (u32 *)_product; ++ int carry = 0; ++ ++ vli_set(result, (u64 *)product, ndigits); ++ vli_clear((u64 *)tmp1, ndigits); ++ vli_clear((u64 *)tmp2, ndigits); ++ vli_clear((u64 *)tmp3, ndigits); ++ ++ /* Y0 */ ++ tmp1[0] = tmp1[3] = tmp1[7] = product[8]; ++ tmp2[2] = product[8]; ++ carry += vli_add(result, result, (u64 *)tmp1, ndigits); ++ carry -= vli_sub(result, result, (u64 *)tmp2, ndigits); ++ ++ /* Y1 */ ++ tmp1[0] = tmp1[1] = tmp1[4] = tmp1[7] = product[9]; ++ tmp1[3] = 0; ++ tmp2[2] = product[9]; ++ carry += vli_add(result, result, (u64 *)tmp1, ndigits); ++ carry -= vli_sub(result, result, (u64 *)tmp2, ndigits); ++ ++ /* Y2 */ ++ tmp1[0] = tmp1[1] = tmp1[5] = tmp1[7] = product[10]; ++ tmp1[4] = 0; ++ carry += vli_add(result, result, (u64 *)tmp1, ndigits); ++ ++ /* Y3 */ ++ tmp1[0] = tmp1[1] = tmp1[3] = tmp1[6] = tmp1[7] = product[11]; ++ tmp1[5] = 0; ++ carry += vli_add(result, result, (u64 *)tmp1, ndigits); ++ ++ /* Y4 */ ++ tmp1[0] = tmp1[1] = tmp1[3] = tmp1[4] = tmp1[7] = tmp3[7] = product[12]; ++ tmp1[6] = 0; ++ carry += vli_add(result, result, (u64 *)tmp1, ndigits); ++ carry += vli_add(result, result, (u64 *)tmp3, ndigits); ++ ++ /* Y5 */ ++ tmp1[0] = tmp1[1] = tmp1[3] = tmp1[4] = tmp1[5] = tmp1[7] = product[13]; ++ tmp2[2] = product[13]; ++ tmp3[0] = tmp3[3] = tmp3[7] = product[13]; ++ carry += vli_add(result, result, (u64 *)tmp1, ndigits); ++ carry += vli_add(result, result, (u64 *)tmp3, ndigits); ++ carry -= vli_sub(result, result, (u64 *)tmp2, ndigits); ++ ++ /* Y6 */ ++ tmp1[0] = tmp1[1] = tmp1[3] = tmp1[4] = tmp1[5] = tmp1[6] = tmp1[7] = product[14]; ++ tmp2[2] = product[14]; ++ tmp3[0] = tmp3[1] = tmp3[4] = tmp3[7] = product[14]; ++ tmp3[3] = 0; ++ carry += vli_add(result, result, (u64 *)tmp1, ndigits); ++ carry += vli_add(result, result, (u64 *)tmp3, ndigits); ++ carry -= vli_sub(result, result, (u64 *)tmp2, ndigits); ++ ++ /* Y7 */ ++ tmp1[0] = tmp1[1] = tmp1[3] = tmp1[4] = tmp1[5] = tmp1[6] = tmp1[7] = product[15]; ++ tmp3[0] = tmp3[1] = tmp3[5] = product[15]; ++ tmp3[4] = 0; ++ tmp3[7] = 0; ++ tmp2[7] = product[15]; ++ tmp2[2] = 0; ++ carry += vli_lshift((u64 *)tmp2, (u64 *)tmp2, 1, ndigits); ++ carry += vli_add(result, result, (u64 *)tmp1, ndigits); ++ carry += vli_add(result, result, (u64 *)tmp3, ndigits); ++ carry += vli_add(result, result, (u64 *)tmp2, ndigits); ++ if(carry < 0){ ++ do{ ++ carry += vli_add(result, result, mod, ndigits); ++ }while(carry < 0); ++ } ++ else{ ++ while(carry || vli_cmp(mod, result, ndigits) != 1) ++ { ++ carry -= vli_sub(result, result, mod, ndigits); ++ } ++ } ++} ++ ++/* Computes result = (product) % mod. */ ++void _vli_mod(u64 *result, u64 *product, u64 *mod, u8 ndigits) ++{ ++ u64 modMultiple[2 * ndigits]; ++ uint digitShift, bitShift; ++ uint productBits; ++ uint modBits = vli_num_bits(mod, ndigits); ++ ++ productBits = vli_num_bits(product + ndigits, ndigits); ++ if(productBits){ ++ productBits += ndigits * 64; ++ } ++ else{ ++ productBits = vli_num_bits(product, ndigits); ++ } ++ ++ if(productBits < modBits){ ++ /* product < mod. */ ++ vli_set(result, product, ndigits); ++ return; ++ } ++ ++ /* Shift mod by (leftBits - modBits). This multiplies mod by the largest ++ power of two possible while still resulting in a number less than left. */ ++ vli_clear(modMultiple, ndigits); ++ vli_clear(modMultiple + ndigits, ndigits); ++ digitShift = (productBits - modBits) / 64; ++ bitShift = (productBits - modBits) % 64; ++ if(bitShift){ ++ modMultiple[digitShift + ndigits] = vli_lshift(modMultiple + digitShift, mod, bitShift, ndigits); ++ } ++ else{ ++ vli_set(modMultiple + digitShift, mod, ndigits); ++ } ++ ++ /* Subtract all multiples of mod to get the remainder. */ ++ vli_clear(result, ndigits); ++ result[0] = 1; /* Use result as a temp var to store 1 (for subtraction) */ ++ while(productBits > ndigits * 64 || vli_cmp(modMultiple, mod, ndigits) >= 0) ++ { ++ int cmp = vli_cmp(modMultiple + ndigits, product + ndigits, ndigits); ++ if(cmp < 0 || (cmp == 0 && vli_cmp(modMultiple, product, ndigits) <= 0)){ ++ if (vli_sub(product, product, modMultiple, ndigits)) ++ { ++ /* borrow */ ++ vli_sub(product + ndigits, product + ndigits, result, ndigits); ++ } ++ vli_sub(product + ndigits, product + ndigits, modMultiple + ndigits, ndigits); ++ } ++ u64 carry = (modMultiple[ndigits] & 0x01) << 63; ++ vli_rshift(modMultiple + ndigits, modMultiple + ndigits, 1, ndigits); ++ vli_rshift(modMultiple, modMultiple, 1, ndigits); ++ modMultiple[ndigits-1] |= carry; ++ ++ --productBits; ++ } ++ vli_set(result, product, ndigits); ++} ++ ++/* Computes result = (product) % mod. */ ++void vli_mod(u64 *result, u64 *product, u64 *mod, u8 ndigits) ++{ ++#if 1 ++ bn_mod(result, product, mod, ndigits); ++#else ++ _vli_mod(result, product, mod, ndigits); ++#endif ++} ++ ++/* Computes result = (left * right) % curve->p. */ ++void vli_mod_mult_fast(u64 *result, u64 *left, u64 *right, u64 *mod, u8 ndigits) ++{ ++ u64 product[2 * ndigits]; ++ ++ vli_mult(product, left, right, ndigits); ++#if 1 ++ vli_mod(result, product, mod, ndigits); ++#else ++ if ( mod[1] == 0xFFFFFFFF00000000ull) ++ vli_mmod_fast_sm2_256(result, product, mod, ndigits); ++ else ++ vli_mmod_fast_nist_256(result, product, mod, ndigits); ++#endif ++} ++ ++/* Computes result = left^2 % curve->p. */ ++void vli_mod_square_fast(u64 *result, u64 *left, u64 *mod, u8 ndigits) ++{ ++ u64 product[2 * ndigits]; ++ ++ vli_square(product, left, ndigits); ++#if 1 ++ vli_mod(result, product, mod, ndigits); ++ ++#else ++ if ( mod[1] == 0xFFFFFFFF00000000ull) ++ vli_mmod_fast_sm2_256(result, product, mod, ndigits); ++ else ++ vli_mmod_fast_nist_256(result, product, mod, ndigits); ++#endif ++} ++ ++/* Computes result = (left * right) % mod. */ ++void vli_mod_mult(u64 *result, u64 *left, u64 *right, u64 *mod, u8 ndigits) ++{ ++ u64 product[2 * ndigits]; ++ ++ vli_mult(product, left, right, ndigits); ++ vli_mod(result, product, mod, ndigits); ++} ++ ++/* Computes result = left^2 % mod. */ ++void vli_mod_square(u64 *result, u64 *left, u64 *mod, u8 ndigits) ++{ ++ u64 product[2 * ndigits]; ++ ++ vli_square(product, left, ndigits); ++ vli_mod(result, product, mod, ndigits); ++} ++ ++#define DIGIT_2MSB(x) (u64)(((x) >> (VLI_DIGIT_BITS - 2)) & 0x03) ++/* Computes result = left^p % mod. */ ++void vli_mod_exp(u64 *result, u64 *left, u64 *p, u64 *mod, u8 ndigits) ++{ ++ u64 bpower[3][ndigits], t[ndigits]; ++ u64 ci_bits, ci; ++ u32 j, s; ++ u32 digits; ++ int i; ++ ++ vli_set(bpower[0], left, ndigits); ++ vli_mod_mult(bpower[1], bpower[0], left, mod, ndigits); ++ vli_mod_mult(bpower[2], bpower[1], left, mod, ndigits); ++ vli_clear(t, ndigits); ++ t[0] = 1; ++ ++ digits = vli_num_digits(p , ndigits); ++ ++ i = digits - 1; ++ for( ; i >= 0; i--){ ++ ci = p[i]; ++ ci_bits = VLI_DIGIT_BITS; ++ ++ if(i == (digits - 1)){ ++ while(!DIGIT_2MSB(ci)){ ++ ci <<= 2; ++ ci_bits -= 2; ++ } ++ } ++ ++ for( j = 0; j < ci_bits; j += 2) { ++ vli_mod_mult(t, t, t, mod, ndigits); ++ vli_mod_mult(t, t, t, mod, ndigits); ++ if((s = DIGIT_2MSB(ci)) != 0){ ++ vli_mod_mult(t, t, bpower[s-1], mod, ndigits); ++ } ++ ci <<= 2; ++ } ++ } ++ ++ vli_set(result, t, ndigits); ++} ++ ++#define EVEN(vli) (!(vli[0] & 1)) ++/* Computes result = (1 / p_input) % mod. All VLIs are the same size. ++ * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide" ++ * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf ++ */ ++void vli_mod_inv(u64 *result, u64 *input, u64 *mod, u8 ndigits) ++{ ++ u64 a[ndigits], b[ndigits]; ++ u64 u[ndigits], v[ndigits]; ++ u64 carry; ++ int cmp_result; ++ ++ if(vli_is_zero(input, ndigits)){ ++ vli_clear(result, ndigits); ++ return; ++ } ++ ++ vli_set(a, input, ndigits); ++ vli_set(b, mod, ndigits); ++ vli_clear(u, ndigits); ++ u[0] = 1; ++ vli_clear(v, ndigits); ++ ++ while((cmp_result = vli_cmp(a, b, ndigits)) != 0){ ++ carry = 0; ++ ++ if(EVEN(a)){ ++ vli_rshift(a, a, 1, ndigits); ++ ++ if(!EVEN(u)) ++ carry = vli_add(u, u, mod, ndigits); ++ ++ vli_rshift(u, u, 1, ndigits); ++ if (carry) ++ u[ndigits - 1] |= 0x8000000000000000ull; ++ } ++ else if(EVEN(b)){ ++ vli_rshift(b, b, 1, ndigits); ++ ++ if(!EVEN(v)) ++ carry = vli_add(v, v, mod, ndigits); ++ ++ vli_rshift(v, v, 1, ndigits); ++ if(carry) ++ v[ndigits - 1] |= 0x8000000000000000ull; ++ }else if(cmp_result > 0){ ++ vli_sub(a, a, b, ndigits); ++ vli_rshift(a, a, 1, ndigits); ++ ++ if(vli_cmp(u, v, ndigits) < 0) ++ vli_add(u, u, mod, ndigits); ++ ++ vli_sub(u, u, v, ndigits); ++ if(!EVEN(u)) ++ carry = vli_add(u, u, mod, ndigits); ++ ++ vli_rshift(u, u, 1, ndigits); ++ if(carry) ++ u[ndigits - 1] |= 0x8000000000000000ull; ++ } ++ else{ ++ vli_sub(b, b, a, ndigits); ++ vli_rshift(b, b, 1, ndigits); ++ ++ if(vli_cmp(v, u, ndigits) < 0) ++ vli_add(v, v, mod, ndigits); ++ ++ vli_sub(v, v, u, ndigits); ++ if(!EVEN(v)) ++ carry = vli_add(v, v, mod, ndigits); ++ ++ vli_rshift(v, v, 1, ndigits); ++ if(carry) ++ v[ndigits - 1] |= 0x8000000000000000ull; ++ } ++ } ++ ++ vli_set(result, u, ndigits); ++} +diff --git a/lib/sbi/sm/gm/ecc.c b/lib/sbi/sm/gm/ecc.c +new file mode 100644 +index 0000000..d34e7b9 +--- /dev/null ++++ b/lib/sbi/sm/gm/ecc.c +@@ -0,0 +1,356 @@ ++#include "sm/gm/ecc.h" ++#include "sm/gm/big.h" ++ ++/* Returns 1 if point is the point at infinity, 0 otherwise. */ ++int ecc_point_is_zero(struct ecc_curve *curve, ecc_point *point) ++{ ++ return (vli_is_zero(point->x, curve->ndigits) ++ && vli_is_zero(point->y, curve->ndigits)); ++} ++ ++/* Double in place */ ++void ecc_point_double_jacobian(struct ecc_curve *curve, u64 *X1, u64 *Y1, u64 *Z1) ++{ ++ /* t1 = X, t2 = Y, t3 = Z */ ++ u64 t4[ECC_MAX_DIGITS]; ++ u64 t5[ECC_MAX_DIGITS]; ++ ++ if(vli_is_zero(Z1, curve->ndigits)) ++ return; ++ ++ vli_mod_square_fast(t4, Y1, curve->p, curve->ndigits); /* t4 = y1^2 */ ++ vli_mod_mult_fast(t5, X1, t4, curve->p, curve->ndigits); /* t5 = x1*y1^2 = A */ ++ vli_mod_square_fast(t4, t4, curve->p, curve->ndigits); /* t4 = y1^4 */ ++ vli_mod_mult_fast(Y1, Y1, Z1, curve->p, curve->ndigits); /* t2 = y1*z1 = z3 */ ++ vli_mod_square_fast(Z1, Z1, curve->p, curve->ndigits); /* t3 = z1^2 */ ++ ++ vli_mod_add(X1, X1, Z1, curve->p, curve->ndigits); /* t1 = x1 + z1^2 */ ++ vli_mod_add(Z1, Z1, Z1, curve->p, curve->ndigits); /* t3 = 2*z1^2 */ ++ vli_mod_sub(Z1, X1, Z1, curve->p, curve->ndigits); /* t3 = x1 - z1^2 */ ++ vli_mod_mult_fast(X1, X1, Z1, curve->p, curve->ndigits); /* t1 = x1^2 - z1^4 */ ++ ++ vli_mod_add(Z1, X1, X1, curve->p, curve->ndigits); /* t3 = 2*(x1^2 - z1^4) */ ++ vli_mod_add(X1, X1, Z1, curve->p, curve->ndigits); /* t1 = 3*(x1^2 - z1^4) */ ++ if(vli_test_bit(X1, 0, curve->ndigits)){ ++ u64 carry = vli_add(X1, X1, curve->p, curve->ndigits); ++ vli_rshift(X1, X1, 1, curve->ndigits); ++ X1[ECC_MAX_DIGITS-1] |= carry << 63; ++ } ++ else{ ++ vli_rshift(X1, X1, 1, curve->ndigits); ++ } ++ ++ /* t1 = 3/2*(x1^2 - z1^4) = B */ ++ vli_mod_square_fast(Z1, X1, curve->p, curve->ndigits); /* t3 = B^2 */ ++ vli_mod_sub(Z1, Z1, t5, curve->p, curve->ndigits); /* t3 = B^2 - A */ ++ vli_mod_sub(Z1, Z1, t5, curve->p, curve->ndigits); /* t3 = B^2 - 2A = x3 */ ++ vli_mod_sub(t5, t5, Z1, curve->p, curve->ndigits); /* t5 = A - x3 */ ++ vli_mod_mult_fast(X1, X1, t5, curve->p, curve->ndigits); /* t1 = B * (A - x3) */ ++ vli_mod_sub(t4, X1, t4, curve->p, curve->ndigits); /* t4 = B * (A - x3) - y1^4 = y3 */ ++ ++ vli_set(X1, Z1, curve->ndigits); ++ vli_set(Z1, Y1, curve->ndigits); ++ vli_set(Y1, t4, curve->ndigits); ++} ++ ++/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */ ++void apply_z(struct ecc_curve *curve, u64 *X1, u64 *Y1, u64 *Z) ++{ ++ u64 t1[ECC_MAX_DIGITS]; ++ ++ vli_mod_square_fast(t1, Z, curve->p, curve->ndigits); /* z^2 */ ++ vli_mod_mult_fast(X1, X1, t1, curve->p, curve->ndigits); /* x1 * z^2 */ ++ vli_mod_mult_fast(t1, t1, Z, curve->p, curve->ndigits); /* z^3 */ ++ vli_mod_mult_fast(Y1, Y1, t1, curve->p, curve->ndigits); /* y1 * z^3 */ ++} ++ ++/* P = (x1, y1) => 2P, (x2, y2) => P' */ ++void XYcZ_initial_double(struct ecc_curve *curve, u64 *X1, u64 *Y1, u64 *X2, u64 *Y2, u64 *initialZ) ++{ ++ u64 z[ECC_MAX_DIGITS]; ++ ++ vli_set(X2, X1, curve->ndigits); ++ vli_set(Y2, Y1, curve->ndigits); ++ ++ if(initialZ){ ++ vli_set(z, initialZ, curve->ndigits); ++ } ++ else{ ++ vli_clear(z, curve->ndigits); ++ z[0] = 1; ++ } ++ apply_z(curve, X1, Y1, z); ++ ++ ecc_point_double_jacobian(curve, X1, Y1, z); ++ ++ apply_z(curve, X2, Y2, z); ++} ++ ++/* Input P = (x1, y1, Z), Q = (x2, y2, Z) ++ Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3) ++ or P => P', Q => P + Q ++ */ ++void XYcZ_add(struct ecc_curve *curve, u64 *X1, u64 *Y1, u64 *X2, u64 *Y2) ++{ ++ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ ++ u64 t5[ECC_MAX_DIGITS]; ++ ++ vli_mod_sub(t5, X2, X1, curve->p, curve->ndigits); /* t5 = x2 - x1 */ ++ vli_mod_square_fast(t5, t5, curve->p, curve->ndigits); /* t5 = (x2 - x1)^2 = A */ ++ vli_mod_mult_fast(X1, X1, t5, curve->p, curve->ndigits); /* t1 = x1*A = B */ ++ vli_mod_mult_fast(X2, X2, t5, curve->p, curve->ndigits); /* t3 = x2*A = C */ ++ vli_mod_sub(Y2, Y2, Y1, curve->p, curve->ndigits); /* t4 = y2 - y1 */ ++ vli_mod_square_fast(t5, Y2, curve->p, curve->ndigits); /* t5 = (y2 - y1)^2 = D */ ++ ++ vli_mod_sub(t5, t5, X1, curve->p, curve->ndigits); /* t5 = D - B */ ++ vli_mod_sub(t5, t5, X2, curve->p, curve->ndigits); /* t5 = D - B - C = x3 */ ++ vli_mod_sub(X2, X2, X1, curve->p, curve->ndigits); /* t3 = C - B */ ++ vli_mod_mult_fast(Y1, Y1, X2, curve->p, curve->ndigits); /* t2 = y1*(C - B) */ ++ vli_mod_sub(X2, X1, t5, curve->p, curve->ndigits); /* t3 = B - x3 */ ++ vli_mod_mult_fast(Y2, Y2, X2, curve->p, curve->ndigits); /* t4 = (y2 - y1)*(B - x3) */ ++ vli_mod_sub(Y2, Y2, Y1, curve->p, curve->ndigits); /* t4 = y3 */ ++ ++ vli_set(X2, t5, curve->ndigits); ++} ++ ++/* Input P = (x1, y1, Z), Q = (x2, y2, Z) ++ * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3) ++ * or P => P - Q, Q => P + Q ++ */ ++void XYcZ_addC(struct ecc_curve *curve, u64 *X1, u64 *Y1, u64 *X2, u64 *Y2) ++{ ++ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ ++ u64 t5[ECC_MAX_DIGITS]; ++ u64 t6[ECC_MAX_DIGITS]; ++ u64 t7[ECC_MAX_DIGITS]; ++ ++ vli_mod_sub(t5, X2, X1, curve->p, curve->ndigits); /* t5 = x2 - x1 */ ++ vli_mod_square_fast(t5, t5, curve->p, curve->ndigits); /* t5 = (x2 - x1)^2 = A */ ++ vli_mod_mult_fast(X1, X1, t5, curve->p, curve->ndigits); /* t1 = x1*A = B */ ++ vli_mod_mult_fast(X2, X2, t5, curve->p, curve->ndigits); /* t3 = x2*A = C */ ++ vli_mod_add(t5, Y2, Y1, curve->p, curve->ndigits); /* t4 = y2 + y1 */ ++ vli_mod_sub(Y2, Y2, Y1, curve->p, curve->ndigits); /* t4 = y2 - y1 */ ++ ++ vli_mod_sub(t6, X2, X1, curve->p, curve->ndigits); /* t6 = C - B */ ++ vli_mod_mult_fast(Y1, Y1, t6, curve->p, curve->ndigits); /* t2 = y1 * (C - B) */ ++ vli_mod_add(t6, X1, X2, curve->p, curve->ndigits); /* t6 = B + C */ ++ vli_mod_square_fast(X2, Y2, curve->p, curve->ndigits); /* t3 = (y2 - y1)^2 */ ++ vli_mod_sub(X2, X2, t6, curve->p, curve->ndigits); /* t3 = x3 */ ++ ++ vli_mod_sub(t7, X1, X2, curve->p, curve->ndigits); /* t7 = B - x3 */ ++ vli_mod_mult_fast(Y2, Y2, t7, curve->p, curve->ndigits); /* t4 = (y2 - y1)*(B - x3) */ ++ vli_mod_sub(Y2, Y2, Y1, curve->p, curve->ndigits); /* t4 = y3 */ ++ ++ vli_mod_square_fast(t7, t5, curve->p, curve->ndigits); /* t7 = (y2 + y1)^2 = F */ ++ vli_mod_sub(t7, t7, t6, curve->p, curve->ndigits); /* t7 = x3' */ ++ vli_mod_sub(t6, t7, X1, curve->p, curve->ndigits); /* t6 = x3' - B */ ++ vli_mod_mult_fast(t6, t6, t5, curve->p, curve->ndigits); /* t6 = (y2 + y1)*(x3' - B) */ ++ vli_mod_sub(Y1, t6, Y1, curve->p, curve->ndigits); /* t2 = y3' */ ++ ++ vli_set(X1, t7, curve->ndigits); ++} ++ ++void ecc_point_mult(struct ecc_curve *curve, ecc_point *result, ecc_point *point, u64 *scalar, u64 *initialZ) ++{ ++ /* R0 and R1 */ ++ u64 Rx[2][ECC_MAX_DIGITS]; ++ u64 Ry[2][ECC_MAX_DIGITS]; ++ u64 z[ECC_MAX_DIGITS]; ++ int i, nb; ++ ++ vli_set(Rx[1], point->x, curve->ndigits); ++ vli_set(Ry[1], point->y, curve->ndigits); ++ ++ XYcZ_initial_double(curve, Rx[1], Ry[1], Rx[0], Ry[0], initialZ); ++ ++ for(i = vli_num_bits(scalar, curve->ndigits) - 2; i > 0; --i){ ++ nb = !vli_test_bit(scalar, i, curve->ndigits); ++ XYcZ_addC(curve, Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]); ++ XYcZ_add(curve, Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]); ++ } ++ ++ nb = !vli_test_bit(scalar, 0, curve->ndigits); ++ XYcZ_addC(curve, Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]); ++ ++ /* Find final 1/Z value. */ ++ vli_mod_sub(z, Rx[1], Rx[0], curve->p, curve->ndigits); /* X1 - X0 */ ++ vli_mod_mult_fast(z, z, Ry[1-nb], curve->p, curve->ndigits); /* Yb * (X1 - X0) */ ++ vli_mod_mult_fast(z, z, point->x, curve->p, curve->ndigits); /* xP * Yb * (X1 - X0) */ ++ vli_mod_inv(z, z, curve->p, curve->ndigits); /* 1 / (xP * Yb * (X1 - X0)) */ ++ vli_mod_mult_fast(z, z, point->y, curve->p, curve->ndigits); /* yP / (xP * Yb * (X1 - X0)) */ ++ vli_mod_mult_fast(z, z, Rx[1-nb], curve->p, curve->ndigits); /* Xb * yP / (xP * Yb * (X1 - X0)) */ ++ /* End 1/Z calculation */ ++ ++ XYcZ_add(curve, Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]); ++ ++ apply_z(curve, Rx[0], Ry[0], z); ++ ++ vli_set(result->x, Rx[0], curve->ndigits); ++ vli_set(result->y, Ry[0], curve->ndigits); ++} ++ ++static u32 max(u32 a, u32 b) ++{ ++ return (a > b ? a : b); ++} ++ ++void ecc_point_mult2(struct ecc_curve *curve, ecc_point *result, ecc_point *g, ecc_point *p, u64 *s, u64 *t) ++{ ++ u64 tx[ECC_MAX_DIGITS]; ++ u64 ty[ECC_MAX_DIGITS]; ++ u64 tz[ECC_MAX_DIGITS]; ++ u64 z[ECC_MAX_DIGITS]; ++ ecc_point sum; ++ u64 *rx; ++ u64 *ry; ++ int i; ++ ++ rx = result->x; ++ ry = result->y; ++ ++ /* Calculate sum = G + Q. */ ++ vli_set(sum.x, p->x, curve->ndigits); ++ vli_set(sum.y, p->y, curve->ndigits); ++ vli_set(tx, g->x, curve->ndigits); ++ vli_set(ty, g->y, curve->ndigits); ++ ++ vli_mod_sub(z, sum.x, tx, curve->p, curve->ndigits); /* Z = x2 - x1 */ ++ XYcZ_add(curve, tx, ty, sum.x, sum.y); ++ vli_mod_inv(z, z, curve->p, curve->ndigits); /* Z = 1/Z */ ++ apply_z(curve, sum.x, sum.y, z); ++ ++ /* Use Shamir's trick to calculate u1*G + u2*Q */ ++ ecc_point *points[4] = {NULL, g, p, &sum}; ++ u32 numBits = max(vli_num_bits(s, curve->ndigits), vli_num_bits(t, curve->ndigits)); ++ ++ ecc_point *point = points[(!!vli_test_bit(s, numBits-1, curve->ndigits)) ++ | ((!!vli_test_bit(t, numBits-1, curve->ndigits)) << 1)]; ++ vli_set(rx, point->x, curve->ndigits); ++ vli_set(ry, point->y, curve->ndigits); ++ vli_clear(z, curve->ndigits); ++ z[0] = 1; ++ ++ for(i = numBits - 2; i >= 0; --i){ ++ ecc_point_double_jacobian(curve, rx, ry, z); ++ ++ int index = (!!vli_test_bit(s, i, curve->ndigits)) ++ | ((!!vli_test_bit(t, i, curve->ndigits)) << 1); ++ ecc_point *point = points[index]; ++ if(point){ ++ vli_set(tx, point->x, curve->ndigits); ++ vli_set(ty, point->y, curve->ndigits); ++ apply_z(curve, tx, ty, z); ++ vli_mod_sub(tz, rx, tx, curve->p, curve->ndigits); /* Z = x2 - x1 */ ++ XYcZ_add(curve, tx, ty, rx, ry); ++ vli_mod_mult_fast(z, z, tz, curve->p, curve->ndigits); ++ } ++ } ++ ++ vli_mod_inv(z, z, curve->p, curve->ndigits); /* Z = 1/Z */ ++ apply_z(curve, rx, ry, z); ++} ++ ++void ecc_point_add(struct ecc_curve *curve, ecc_point *result, ecc_point *left, ecc_point *right) ++{ ++ u64 x1[ECC_MAX_DIGITS]; ++ u64 y1[ECC_MAX_DIGITS]; ++ u64 x2[ECC_MAX_DIGITS]; ++ u64 y2[ECC_MAX_DIGITS]; ++ u64 z[ECC_MAX_DIGITS]; ++ ++ vli_set(x1, left->x, curve->ndigits); ++ vli_set(y1, left->y, curve->ndigits); ++ vli_set(x2, right->x, curve->ndigits); ++ vli_set(y2, right->y, curve->ndigits); ++ ++ vli_mod_sub(z, x2, x1, curve->p, curve->ndigits); /* Z = x2 - x1 */ ++ ++ XYcZ_add(curve, x1, y1, x2, y2); ++ vli_mod_inv(z, z, curve->p, curve->ndigits); /* Z = 1/Z */ ++ apply_z(curve, x2,y2, z); ++ ++ vli_set(result->x, x2, curve->ndigits); ++ vli_set(result->y, y2, curve->ndigits); ++} ++ ++void ecc_bytes2native(u64 *native, void *bytes, u8 ndigits) ++{ ++ u64 *_bytes = (u64*)bytes; ++ unsigned int i; ++ unsigned int le_int = 1; ++ unsigned char* le_ch = (unsigned char*)(&le_int); ++ ++ //little endian ++ if(*le_ch) ++ { ++ for(i = 0; i < ndigits/2; ++i){ ++ if(native == _bytes){ ++ u64 temp; ++ temp = be64_to_le64(native[i]); ++ native[i] = be64_to_le64(_bytes[ndigits - i - 1]); ++ _bytes[ndigits - i - 1] = temp; ++ } ++ else{ ++ native[i] = be64_to_le64(_bytes[ndigits - i - 1]); ++ native[ndigits - i - 1] = be64_to_le64(_bytes[i]); ++ } ++ } ++ } ++ //big endian ++ else ++ { ++ for(i = 0; i < ndigits/2; ++i){ ++ if(native == _bytes){ ++ u64 temp; ++ temp = native[i]; ++ native[i] = _bytes[ndigits - i - 1]; ++ _bytes[ndigits - i - 1] = temp; ++ } ++ else{ ++ native[i] = _bytes[ndigits - i - 1]; ++ native[ndigits - i - 1] = _bytes[i]; ++ } ++ } ++ } ++} ++ ++void ecc_native2bytes(void *bytes, u64 *native, u8 ndigits) ++{ ++ u64 *_bytes = (u64*)bytes; ++ unsigned int i; ++ unsigned int le_int = 1; ++ unsigned char* le_ch = (unsigned char*)(&le_int); ++ ++ //little endian ++ if(*le_ch) ++ { ++ for(i = 0; i < ndigits/2; ++i){ ++ if(_bytes == native){ ++ u64 temp; ++ temp = le64_to_be64(_bytes[ndigits - i - 1]); ++ _bytes[ndigits - i - 1] = le64_to_be64(native[i]); ++ native[i] = temp; ++ } ++ else{ ++ _bytes[i] = le64_to_be64(native[ndigits - i - 1]); ++ _bytes[ndigits - i - 1] = le64_to_be64(native[i]); ++ } ++ } ++ } ++ else ++ //big endian ++ { ++ for(i = 0; i < ndigits/2; ++i){ ++ if(_bytes == native){ ++ u64 temp; ++ temp = _bytes[ndigits - i - 1]; ++ _bytes[ndigits - i - 1] = native[i]; ++ native[i] = temp; ++ } ++ else{ ++ _bytes[i] = native[ndigits - i - 1]; ++ _bytes[ndigits - i - 1] = native[i]; ++ } ++ } ++ } ++} +diff --git a/lib/sbi/sm/gm/random.c b/lib/sbi/sm/gm/random.c +new file mode 100644 +index 0000000..1e48eeb +--- /dev/null ++++ b/lib/sbi/sm/gm/random.c +@@ -0,0 +1,18 @@ ++#include "sm/gm/random.h" ++ ++int vli_get_random(u8 *data, u32 len) ++{ ++ int ret = 0; ++ ++ //TODO: optimize it with real entropy machine ++ /*srand(0x11223344); ++ int i=0; ++ for(i=0; i < sizeof(u32)/sizeof(u8); ++i) ++ { ++ *data = (u8)rand(); ++ data += 1; ++ }*/ ++ *(u32*)data = 0x11223344; ++ ++ return ret; ++} +diff --git a/lib/sbi/sm/gm/sm2.c b/lib/sbi/sm/gm/sm2.c +new file mode 100644 +index 0000000..5a22dd7 +--- /dev/null ++++ b/lib/sbi/sm/gm/sm2.c +@@ -0,0 +1,603 @@ ++#include "sm/gm/random.h" ++#include "sm/gm/big.h" ++#include "sm/gm/ecc.h" ++#include "sm/gm/sm2.h" ++#include "sm/gm/sm3.h" ++#include "sbi/sbi_string.h" ++ ++void *memset(void *s, int c, size_t count) ++{ ++ return sbi_memset(s, c, count); ++} ++ ++static int mem_cmp(char* s1, char* s2, int count) ++{ ++ int i = 0; ++ ++ if(!s1 || !s2) ++ return -1; ++ ++ for(; i< count; ++i) ++ { ++ if(*(s1 + i) != *(s2 + i)) ++ return -1; ++ } ++ ++ return 0; ++} ++ ++struct ecc_curve sm2_curve = { ++ .ndigits = ECC_MAX_DIGITS, ++ .g = { ++ .x = { ++ 0x715A4589334C74C7ull, 0x8FE30BBFF2660BE1ull, ++ 0x5F9904466A39C994ull, 0x32C4AE2C1F198119ull ++ }, ++ .y = { ++ 0x02DF32E52139F0A0ull, 0xD0A9877CC62A4740ull, ++ 0x59BDCEE36B692153ull, 0xBC3736A2F4F6779Cull ++ }, ++ }, ++ .p = { ++ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull, ++ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFEFFFFFFFFull ++ }, ++ .n = { ++ 0x53BBF40939D54123ull, 0x7203DF6B21C6052Bull, ++ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFEFFFFFFFFull ++ }, ++ .h = { ++ 0x0000000000000001ull, 0x0000000000000000ull, ++ 0x0000000000000000ull, 0x0000000000000000ull, ++ }, ++ .a = { ++ 0xFFFFFFFFFFFFFFFCull, 0xFFFFFFFF00000000ull, ++ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFEFFFFFFFFull ++ }, ++ .b = { ++ 0xDDBCBD414D940E93ull, 0xF39789F515AB8F92ull, ++ 0x4D5A9E4BCF6509A7ull, 0x28E9FA9E9D9F5E34ull ++ }, ++}; ++ ++/*x¯2 = 2w + (x2&(2w − 1))*/ ++void sm2_w(u64 *result, u64 *x) ++{ ++ result[0] = x[0]; ++ result[1] = x[1]; ++ result[1] |= 0x80; ++ result[2] = 0; ++ result[3] = 0; ++} ++ ++void sm3_kdf(u8 *Z, u32 zlen, u8 *K, u32 klen) ++{ ++ u32 ct = 0x00000001; ++ u8 ct_char[32]; ++ u8 *hash = K; ++ u32 i, t; ++ struct sm3_context md[1]; ++ ++ t = klen/ECC_NUMWORD; ++ //s4: K=Ha1||Ha2||... ++ for(i = 0; i < t; i++){ ++ //s2: Hai=Hv(Z||ct) ++ sm3_init(md); ++ sm3_update(md, Z, zlen); ++ put_unaligned_be32(ct, ct_char); ++ sm3_update(md, ct_char, 4); ++ sm3_final(md, hash); ++ hash += 32; ++ ct++; ++ } ++ ++ t = klen % ECC_NUMBITS; ++ if(t){ ++ sm3_init(md); ++ sm3_update(md, Z, zlen); ++ put_unaligned_be32(ct, ct_char); ++ sm3_update(md, ct_char, 4); ++ sm3_final(md, ct_char); ++ sbi_memcpy(hash, ct_char, t); ++ } ++} ++ ++void sm3_z(u8 *id, u32 idlen, ecc_point *pub, u8 *hash) ++{ ++ u8 a[ECC_NUMWORD]; ++ u8 b[ECC_NUMWORD]; ++ u8 x[ECC_NUMWORD]; ++ u8 y[ECC_NUMWORD]; ++ u8 idlen_char[2]; ++ struct sm3_context md[1]; ++ ++ put_unaligned_be16(idlen<<3, idlen_char); ++ ++ ecc_bytes2native((u64*)a, sm2_curve.a, sm2_curve.ndigits); ++ ecc_bytes2native((u64*)b, sm2_curve.b, sm2_curve.ndigits); ++ ecc_bytes2native((u64*)x, sm2_curve.g.x, sm2_curve.ndigits); ++ ecc_bytes2native((u64*)y, sm2_curve.g.y, sm2_curve.ndigits); ++ ++ sm3_init(md); ++ sm3_update(md, idlen_char, 2); ++ sm3_update(md, id, idlen); ++ sm3_update(md, a, ECC_NUMWORD); ++ sm3_update(md, b, ECC_NUMWORD); ++ sm3_update(md, x, ECC_NUMWORD); ++ sm3_update(md, y, ECC_NUMWORD); ++ sm3_update(md, (u8*)pub->x, ECC_NUMWORD); ++ sm3_update(md, (u8*)pub->y, ECC_NUMWORD); ++ sm3_final(md, hash); ++ ++ return; ++} ++ ++int sm2_valid_public_key(ecc_point *publicKey) ++{ ++ u64 na[ECC_MAX_DIGITS] = {3}; /* a mod p = (-3) mod p */ ++ u64 tmp1[ECC_MAX_DIGITS]; ++ u64 tmp2[ECC_MAX_DIGITS]; ++ ++ if(ecc_point_is_zero(&sm2_curve, publicKey)) ++ return 1; ++ ++ if(vli_cmp(sm2_curve.p, publicKey->x, sm2_curve.ndigits) != 1 ++ || vli_cmp(sm2_curve.p, publicKey->y, sm2_curve.ndigits) != 1) ++ return 1; ++ ++ /* tmp1 = y^2 */ ++ vli_mod_square_fast(tmp1, publicKey->y, sm2_curve.p, sm2_curve.ndigits); ++ /* tmp2 = x^2 */ ++ vli_mod_square_fast(tmp2, publicKey->x, sm2_curve.p, sm2_curve.ndigits); ++ /* tmp2 = x^2 + a = x^2 - 3 */ ++ vli_mod_sub(tmp2, tmp2, na, sm2_curve.p, sm2_curve.ndigits); ++ /* tmp2 = x^3 + ax */ ++ vli_mod_mult_fast(tmp2, tmp2, publicKey->x, sm2_curve.p, sm2_curve.ndigits); ++ /* tmp2 = x^3 + ax + b */ ++ vli_mod_add(tmp2, tmp2, sm2_curve.b, sm2_curve.p, sm2_curve.ndigits); ++ ++ /* Make sure that y^2 == x^3 + ax + b */ ++ if(vli_cmp(tmp1, tmp2, sm2_curve.ndigits) != 0) ++ return 1; ++ ++ return 0; ++} ++ ++int sm2_make_prikey(u8 *prikey) ++{ ++ u64 pri[ECC_MAX_DIGITS]; ++ int i = 10; ++ ++ do{ ++ vli_get_random((u8*)pri, ECC_NUMWORD); ++ if(vli_cmp(sm2_curve.n, pri, sm2_curve.ndigits) != 1){ ++ vli_sub(pri, pri, sm2_curve.n, sm2_curve.ndigits); ++ } ++ ++ /* The private key cannot be 0 (mod p). */ ++ if(!vli_is_zero(pri, sm2_curve.ndigits)){ ++ ecc_native2bytes(prikey, pri, sm2_curve.ndigits); ++ return 0; ++ } ++ }while(i--); ++ ++ return -1; ++} ++ ++int sm2_make_pubkey(u8 *prikey, ecc_point *pubkey) ++{ ++ ecc_point pub[1]; ++ u64 pri[ECC_MAX_DIGITS]; ++ ++ ecc_bytes2native(pri, prikey, sm2_curve.ndigits); ++ ecc_point_mult(&sm2_curve, pub, &sm2_curve.g, pri, NULL); ++ ecc_native2bytes(pubkey->x, pub->x, sm2_curve.ndigits); ++ ecc_native2bytes(pubkey->y, pub->y, sm2_curve.ndigits); ++ ++ return 0; ++} ++ ++int sm2_make_keypair(u8 *prikey, ecc_point *pubkey) ++{ ++ sm2_make_prikey(prikey); ++ sm2_make_pubkey(prikey, pubkey); ++ return 0; ++} ++ ++int sm2_point_mult(ecc_point *G, u8 *k, ecc_point *P) ++{ ++ int rc = 0; ++ ++ ecc_point G_[1]; ++ ecc_point P_[1]; ++ u64 k_[ECC_MAX_DIGITS]; ++ ++ ecc_bytes2native(k_, k, sm2_curve.ndigits); ++ ecc_bytes2native(G_->x, G->x, sm2_curve.ndigits); ++ ecc_bytes2native(G_->y, G->y, sm2_curve.ndigits); ++ ++ ecc_point_mult(&sm2_curve, P_, G_, k_, NULL); ++ ++ ecc_native2bytes(P->x, P_->x, sm2_curve.ndigits); ++ ecc_native2bytes(P->y, P_->y, sm2_curve.ndigits); ++ ++ return rc; ++} ++ ++int sm2_sign(u8 *r_, u8 *s_, u8 *prikey, u8 *hash_) ++{ ++ u64 k[ECC_MAX_DIGITS]; ++ u64 one[ECC_MAX_DIGITS] = {1}; ++ u64 random[ECC_MAX_DIGITS]; ++ u64 pri[ECC_MAX_DIGITS]; ++ u64 hash[ECC_MAX_DIGITS]; ++ u64 r[ECC_MAX_DIGITS]; ++ u64 s[ECC_MAX_DIGITS]; ++ ++ ecc_point p; ++ ++ ecc_bytes2native(pri, prikey, sm2_curve.ndigits); ++ ecc_bytes2native(hash, hash_, sm2_curve.ndigits); ++ ++ vli_get_random((u8*)random, ECC_NUMWORD); ++ if(vli_is_zero(random, sm2_curve.ndigits)){ ++ /* The random number must not be 0. */ ++ return 0; ++ } ++ ++ vli_set(k, random, sm2_curve.ndigits); ++ if(vli_cmp(sm2_curve.n, k, sm2_curve.ndigits) != 1){ ++ vli_sub(k, k, sm2_curve.n, sm2_curve.ndigits); ++ } ++ ++ /* tmp = k * G */ ++ ecc_point_mult(&sm2_curve, &p, &sm2_curve.g, k, NULL); ++ ++ /* r = x1 + e (mod n) */ ++ vli_mod_add(r, p.x, hash, sm2_curve.n, sm2_curve.ndigits); ++ if(vli_cmp(sm2_curve.n, r, sm2_curve.ndigits) != 1){ ++ vli_sub(r, r, sm2_curve.n, sm2_curve.ndigits); ++ } ++ ++ if(vli_is_zero(r, sm2_curve.ndigits)){ ++ /* If r == 0, fail (need a different random number). */ ++ return 0; ++ } ++ ++ /* s = r*d */ ++ vli_mod_mult(s, r, pri, sm2_curve.n, sm2_curve.ndigits); ++ /* k-r*d */ ++ vli_mod_sub(s, k, s, sm2_curve.n, sm2_curve.ndigits); ++ /* 1+d */ ++ vli_mod_add(pri, pri, one, sm2_curve.n, sm2_curve.ndigits); ++ /* (1+d)' */ ++ vli_mod_inv(pri, pri, sm2_curve.n, sm2_curve.ndigits); ++ /* (1+d)'*(k-r*d) */ ++ vli_mod_mult(s, pri, s, sm2_curve.n, sm2_curve.ndigits); ++ ++ ecc_native2bytes(r_, r, sm2_curve.ndigits); ++ ecc_native2bytes(s_, s, sm2_curve.ndigits); ++ ++ return 1; ++} ++ ++int sm2_verify(ecc_point *pubkey, u8 *hash_, u8 *r_, u8 *s_) ++{ ++ ecc_point result; ++ ecc_point pub[1]; ++ u64 t[ECC_MAX_DIGITS]; ++ u64 r[ECC_MAX_DIGITS]; ++ u64 s[ECC_MAX_DIGITS]; ++ u64 hash[ECC_MAX_DIGITS]; ++ ++ ecc_bytes2native(pub->x, pubkey->x, sm2_curve.ndigits); ++ ecc_bytes2native(pub->y, pubkey->y, sm2_curve.ndigits); ++ ecc_bytes2native(r, r_, sm2_curve.ndigits); ++ ecc_bytes2native(s, s_, sm2_curve.ndigits); ++ ecc_bytes2native(hash, hash_, sm2_curve.ndigits); ++ ++ if(vli_is_zero(r, sm2_curve.ndigits) || vli_is_zero(s, sm2_curve.ndigits)){ ++ /* r, s must not be 0. */ ++ return -1; ++ } ++ ++ if(vli_cmp(sm2_curve.n, r, sm2_curve.ndigits) != 1 ++ || vli_cmp(sm2_curve.n, s, sm2_curve.ndigits) != 1){ ++ /* r, s must be < n. */ ++ return -1; ++ } ++ ++ vli_mod_add(t, r, s, sm2_curve.n, sm2_curve.ndigits); // r + s ++ if(t == 0) ++ return -1; ++ ++ ecc_point_mult2(&sm2_curve, &result, &sm2_curve.g, pub, s, t); ++ ++ /* v = x1 + e (mod n) */ ++ vli_mod_add(result.x, result.x, hash, sm2_curve.n, sm2_curve.ndigits); ++ ++ if(vli_cmp(sm2_curve.n, result.x, sm2_curve.ndigits) != 1){ ++ vli_sub(result.x, result.x, sm2_curve.n, sm2_curve.ndigits); ++ } ++ ++ /* Accept only if v == r. */ ++ return vli_cmp(result.x, r, sm2_curve.ndigits); ++} ++ ++int sm2_encrypt(ecc_point *pubKey, u8 *M, u32 Mlen, u8 *C, u32 *Clen) ++{ ++ u64 k[ECC_MAX_DIGITS]; ++ u8 t[SM3_DATA_LEN]; ++ ecc_point pub[1]; ++ ecc_point *C1 = (ecc_point *)C; ++ u8 *C2 = C + ECC_NUMWORD*2; ++ u8 *C3 = C + ECC_NUMWORD*2 + Mlen; ++ ++ ecc_point kP; ++ u8 *x2 = (u8*)kP.x; ++ u8 *y2 = (u8*)kP.y; ++ u8 *x2y2 = (u8*)kP.x; ++ struct sm3_context md[1]; ++ int i=0; ++ ++ ecc_bytes2native(pub->x, pubKey->x, sm2_curve.ndigits); ++ ecc_bytes2native(pub->y, pubKey->y, sm2_curve.ndigits); ++ ++ vli_get_random((u8*)k, ECC_NUMWORD); ++ ++ /* C1 = k * G */ ++ ecc_point_mult(&sm2_curve, C1, &sm2_curve.g, k, NULL); ++ ecc_native2bytes(C1->x, C1->x, sm2_curve.ndigits); ++ ecc_native2bytes(C1->y, C1->y, sm2_curve.ndigits); ++ ++ /* S = h * Pb */ ++ ecc_point S; ++ ecc_point_mult(&sm2_curve, &S, pub, sm2_curve.h, NULL); ++ if(sm2_valid_public_key(&S) != 0) ++ return -1; ++ ++ /* kP = k * Pb */ ++ ecc_point_mult(&sm2_curve, &kP, pub, k, NULL); ++ if(vli_is_zero(kP.x, sm2_curve.ndigits) ++ | vli_is_zero(kP.y, sm2_curve.ndigits)){ ++ return 0; ++ } ++ ecc_native2bytes(kP.x, kP.x, sm2_curve.ndigits); ++ ecc_native2bytes(kP.y, kP.y, sm2_curve.ndigits); ++ ++ /* t=KDF(x2 ∥ y2, klen) */ ++ sm3_kdf(x2y2, ECC_NUMWORD*2, t, Mlen); ++ ++ /* C2 = M ⊕ t;*/ ++ for(i = 0; i < Mlen; i++){ ++ C2[i] = M[i]^t[+i]; ++ } ++ ++ /*C3 = Hash(x2 ∥ M ∥ y2)*/ ++ sm3_init(md); ++ sm3_update(md, x2, ECC_NUMWORD); ++ sm3_update(md, M, Mlen); ++ sm3_update(md, y2, ECC_NUMWORD); ++ sm3_final(md, C3); ++ ++ if(Clen) ++ *Clen = Mlen + ECC_NUMWORD*2 + SM3_DATA_LEN; ++ ++ return 0; ++} ++ ++int sm2_decrypt(u8 *prikey, u8 *C, u32 Clen, u8 *M, u32 *Mlen) ++{ ++ u8 hash[SM3_DATA_LEN]; ++ u64 pri[ECC_MAX_DIGITS]; ++ ecc_point *C1 = (ecc_point *)C; ++ u8 *C2 = C + ECC_NUMWORD*2; ++ u8 *C3 = C + Clen - SM3_DATA_LEN; ++ ecc_point dB; ++ u64 *x2 = dB.x; ++ u64 *y2 = dB.y; ++ u64 *x2y2 = x2; ++ struct sm3_context md[1]; ++ int outlen = Clen - ECC_NUMWORD*2 - SM3_DATA_LEN; ++ int i=0; ++ ++ ecc_bytes2native(pri, prikey, sm2_curve.ndigits); ++ ecc_bytes2native(C1->x, C1->x, sm2_curve.ndigits); ++ ecc_bytes2native(C1->y, C1->y, sm2_curve.ndigits); ++ ++ if(sm2_valid_public_key(C1) != 0) ++ return -1; ++ ++ ecc_point S; ++ ecc_point_mult(&sm2_curve, &S, C1, sm2_curve.h, NULL); ++ if(sm2_valid_public_key(&S) != 0) ++ return -1; ++ ++ ecc_point_mult(&sm2_curve, &dB, C1, pri, NULL); ++ ecc_native2bytes(x2, x2, sm2_curve.ndigits); ++ ecc_native2bytes(y2, y2, sm2_curve.ndigits); ++ ++ sm3_kdf((u8*)x2y2, ECC_NUMWORD*2, M, outlen); ++ if(vli_is_zero(x2, sm2_curve.ndigits) ++ | vli_is_zero(y2, sm2_curve.ndigits)){ ++ return 0; ++ } ++ ++ for(i = 0; i < outlen; i++) ++ M[i]=M[i]^C2[i]; ++ ++ sm3_init(md); ++ sm3_update(md, (u8*)x2, ECC_NUMWORD); ++ sm3_update(md, M, outlen); ++ sm3_update(md, (u8*)y2, ECC_NUMWORD); ++ sm3_final(md, hash); ++ ++ *Mlen = outlen; ++ if(mem_cmp((void*)hash , (void*)C3, SM3_DATA_LEN) != 0) ++ return -1; ++ else ++ return 0; ++} ++ ++int sm2_shared_point(u8* selfPriKey, u8* selfTempPriKey, ecc_point* selfTempPubKey, ++ ecc_point *otherPubKey, ecc_point* otherTempPubKey, ecc_point *key) ++{ ++ ecc_point selfTempPub; ++ ecc_point otherTempPub; ++ ecc_point otherPub; ++ ecc_point U[1]; ++ ++ u64 selfTempPri[ECC_MAX_DIGITS]; ++ u64 selfPri[ECC_MAX_DIGITS]; ++ u64 temp1[ECC_MAX_DIGITS]; ++ u64 temp2[ECC_MAX_DIGITS]; ++ u64 tA[ECC_MAX_DIGITS]; ++ ++ ecc_bytes2native(selfTempPri, selfTempPriKey, sm2_curve.ndigits); ++ ecc_bytes2native(selfPri, selfPriKey, sm2_curve.ndigits); ++ ecc_bytes2native(selfTempPub.x, selfTempPubKey->x, sm2_curve.ndigits); ++ ecc_bytes2native(selfTempPub.y, selfTempPubKey->y, sm2_curve.ndigits); ++ ecc_bytes2native(otherTempPub.x, otherTempPubKey->x, sm2_curve.ndigits); ++ ecc_bytes2native(otherTempPub.y, otherTempPubKey->y, sm2_curve.ndigits); ++ ecc_bytes2native(otherPub.x, otherPubKey->x, sm2_curve.ndigits); ++ ecc_bytes2native(otherPub.y, otherPubKey->y, sm2_curve.ndigits); ++ ++ /***********x1_=2^w+x2 & (2^w-1)*************/ ++ sm2_w(temp1, selfTempPub.x); ++ /***********tA=(dA+x1_*rA)mod n *************/ ++ vli_mod_mult(temp1, selfTempPri, temp1, sm2_curve.n, sm2_curve.ndigits); ++ vli_mod_add(tA, selfPri, temp1, sm2_curve.n, sm2_curve.ndigits); ++ /***********x2_=2^w+x2 & (2^w-1)*************/ ++ if(sm2_valid_public_key(&otherTempPub) != 0) ++ return -1; ++ sm2_w(temp2, otherTempPub.x); ++ /**************U=[h*tA](PB+[x2_]RB)**********/ ++ /* U=[x2_]RB */ ++ ecc_point_mult(&sm2_curve, U, &otherTempPub, temp2, NULL); ++ /*U=PB+U*/ ++ ecc_point_add(&sm2_curve, U, &otherPub, U); ++ /*tA=tA*h */ ++ vli_mod_mult(tA, tA, sm2_curve.h, sm2_curve.n, sm2_curve.ndigits); ++ ecc_point_mult(&sm2_curve, U, U,tA, NULL); ++ ++ ecc_native2bytes(key->x, U->x, sm2_curve.ndigits); ++ ecc_native2bytes(key->y, U->y, sm2_curve.ndigits); ++ ++ return 0; ++} ++ ++int sm2_shared_key(ecc_point *point, u8 *ZA, u8 *ZB, u32 keyLen, u8 *key) ++{ ++ u8 Z[ECC_NUMWORD*4]; ++ sbi_memcpy(Z, point->x, ECC_NUMWORD); ++ sbi_memcpy(Z + ECC_NUMWORD, point->y, ECC_NUMWORD); ++ sbi_memcpy(Z + ECC_NUMWORD*2, ZA, ECC_NUMWORD); ++ sbi_memcpy(Z + ECC_NUMWORD*3, ZB, ECC_NUMWORD); ++ sm3_kdf(Z, ECC_NUMWORD*4, key, keyLen); ++ ++ return 0; ++} ++ ++/****hash = Hash(Ux||ZA||ZB||x1||y1||x2||y2)****/ ++int ECC_Key_ex_hash1(u8* x, ecc_point *RA, ecc_point* RB, u8 ZA[],u8 ZB[],u8 *hash) ++{ ++ struct sm3_context md[1]; ++ ++ sm3_init(md); ++ sm3_update(md, x, ECC_NUMWORD); ++ sm3_update(md, ZA, ECC_NUMWORD); ++ sm3_update(md, ZB, ECC_NUMWORD); ++ sm3_update(md, (u8*)RA->x, ECC_NUMWORD); ++ sm3_update(md, (u8*)RA->y, ECC_NUMWORD); ++ sm3_update(md, (u8*)RB->x, ECC_NUMWORD); ++ sm3_update(md, (u8*)RB->y, ECC_NUMWORD); ++ sm3_final(md, (u8*)hash); ++ ++ return 0; ++} ++ ++/****SA = Hash(temp||Uy||Hash)****/ ++int ECC_Key_ex_hash2(u8 temp, u8* y,u8 *hash, u8* SA) ++{ ++ struct sm3_context md[1]; ++ ++ sm3_init(md); ++ sm3_update(md, &temp,1); ++ sm3_update(md, y,ECC_NUMWORD); ++ sm3_update(md, hash,ECC_NUMWORD); ++ sm3_final(md, SA); ++ ++ return 0; ++} ++ ++int ECC_KeyEx_Init_I(u8 *pri, ecc_point *pub) ++{ ++ return sm2_make_pubkey(pri, pub); ++} ++ ++int ECC_KeyEx_Re_I(u8 *rb, u8 *dB, ecc_point *RA, ecc_point *PA, u8* ZA, u8 *ZB, u8 *K, u32 klen, ecc_point *RB, ecc_point *V, u8* SB) ++{ ++ u8 Z[ECC_NUMWORD*2 + ECC_NUMBITS/4]={0}; ++ u8 hash[ECC_NUMWORD]; ++ u8 temp=0x02; ++ ++ //--------B2: RB=[rb]G=(x2,y2)-------- ++ sm2_make_pubkey(rb, RB); ++ /********************************************/ ++ sm2_shared_point(dB, rb, RB, PA, RA, V); ++ //------------B7:KB=KDF(VX,VY,ZA,ZB,KLEN)---------- ++ sbi_memcpy(Z, V->x, ECC_NUMWORD); ++ sbi_memcpy(Z+ECC_NUMWORD, (u8*)V->y, ECC_NUMWORD); ++ sbi_memcpy(Z+ECC_NUMWORD*2, ZA,ECC_NUMWORD); ++ sbi_memcpy(Z+ECC_NUMWORD*3, ZB,ECC_NUMWORD); ++ sm3_kdf(Z,ECC_NUMWORD*4, K, klen); ++ //---------------B8:(optional) SB=hash(0x02||Vy||HASH(Vx||ZA||ZB||x1||y1||x2||y2)------------- ++ ECC_Key_ex_hash1((u8*)V->x, RA, RB, ZA, ZB, hash); ++ ECC_Key_ex_hash2(temp, (u8*)V->y, hash, SB); ++ ++ return 0; ++} ++ ++int ECC_KeyEx_Init_II(u8* ra, u8* dA, ecc_point* RA, ecc_point* RB, ecc_point* PB, u8 ++ ZA[],u8 ZB[],u8 SB[],u8 K[], u32 klen,u8 SA[]) ++{ ++ u8 Z[ECC_NUMWORD*2 + ECC_NUMWORD*2]={0}; ++ u8 hash[ECC_NUMWORD],S1[ECC_NUMWORD]; ++ u8 temp[2]={0x02,0x03}; ++ ecc_point U[1]; ++ ++ /********************************************/ ++ sm2_shared_point(dA, ra, RA, PB, RB, U); ++ /************KA=KDF(UX,UY,ZA,ZB,KLEN)**********/ ++ sbi_memcpy(Z, U->x,ECC_NUMWORD); ++ sbi_memcpy(Z+ECC_NUMWORD, U->y,ECC_NUMWORD); ++ sbi_memcpy(Z+ECC_NUMWORD*2,ZA,ECC_NUMWORD); ++ sbi_memcpy(Z+ECC_NUMWORD*2 +ECC_NUMWORD ,ZB,ECC_NUMWORD); ++ sm3_kdf(Z,ECC_NUMWORD*2+ECC_NUMWORD*2, K, klen); ++ /****S1 = Hash(0x02||Uy||Hash(Ux||ZA||ZB||x1||y1||x2||y2))****/ ++ ECC_Key_ex_hash1((u8*)U->x, RA, RB, ZA, ZB, hash); ++ ECC_Key_ex_hash2(temp[0], (u8*)U->y, hash, S1); ++ /*test S1=SB?*/ ++ if(mem_cmp((void*)S1, (void*)SB, ECC_NUMWORD)!=0) ++ return -1; ++ /*SA = Hash(0x03||yU||Hash(xU||ZA||ZB||x1||y1||x2||y2)) */ ++ ECC_Key_ex_hash2(temp[1], (u8*)U->y, hash, SA); ++ ++ return 0; ++} ++ ++int ECC_KeyEx_Re_II(ecc_point *V, ecc_point *RA, ecc_point *RB, u8 ZA[], u8 ZB[], u8 SA[]) ++{ ++ u8 hash[ECC_NUMWORD]; ++ u8 S2[ECC_NUMWORD]; ++ u8 temp=0x03; ++ ++ /*S2 = Hash(0x03||Vy||Hash(Vx||ZA||ZB||x1||y1||x2||y2))*/ ++ ECC_Key_ex_hash1((u8*)V->x, RA, RB, ZA, ZB, hash); ++ ECC_Key_ex_hash2(temp, (u8*)V->y, hash, S2); ++ ++ if(mem_cmp((void*)S2, (void*)SA, ECC_NUMWORD)!=0) ++ return -1; ++ ++ return 0; ++} +diff --git a/lib/sbi/sm/gm/sm3.c b/lib/sbi/sm/gm/sm3.c +new file mode 100644 +index 0000000..f0bb75c +--- /dev/null ++++ b/lib/sbi/sm/gm/sm3.c +@@ -0,0 +1,325 @@ ++#include "sm/gm/sm3.h" ++#include "sbi/sbi_string.h" ++ ++/* ++ * 32-bit integer manipulation macros (big endian) ++ */ ++#ifndef GET_ULONG_BE ++#define GET_ULONG_BE(n, b, i) \ ++{ \ ++ (n) = ( (unsigned long)(b)[(i)] << 24 ) \ ++ | ( (unsigned long)(b)[(i) + 1] << 16 ) \ ++ | ( (unsigned long)(b)[(i) + 2] << 8 ) \ ++ | ( (unsigned long)(b)[(i) + 3] ); \ ++} ++#endif ++ ++#ifndef PUT_ULONG_BE ++#define PUT_ULONG_BE(n, b, i) \ ++{ \ ++ (b)[(i)] = (unsigned char)((n) >> 24); \ ++ (b)[(i) + 1] = (unsigned char)((n) >> 16); \ ++ (b)[(i) + 2] = (unsigned char)((n) >> 8); \ ++ (b)[(i) + 3] = (unsigned char)((n)); \ ++} ++#endif ++ ++/* ++ * SM3 context setup ++ */ ++void sm3_init(struct sm3_context *ctx) ++{ ++ ctx->total[0] = 0; ++ ctx->total[1] = 0; ++ ++ ctx->state[0] = 0x7380166F; ++ ctx->state[1] = 0x4914B2B9; ++ ctx->state[2] = 0x172442D7; ++ ctx->state[3] = 0xDA8A0600; ++ ctx->state[4] = 0xA96F30BC; ++ ctx->state[5] = 0x163138AA; ++ ctx->state[6] = 0xE38DEE4D; ++ ctx->state[7] = 0xB0FB0E4E; ++} ++ ++static void sm3_process(struct sm3_context *ctx, unsigned char data[64]) ++{ ++ unsigned long SS1, SS2, TT1, TT2, W[68], W1[64]; ++ unsigned long A, B, C, D, E, F, G, H; ++ unsigned long T[64]; ++ unsigned long Temp1, Temp2, Temp3, Temp4, Temp5; ++ int j; ++ ++ for(j = 0; j < 16; j++) ++ T[j] = 0x79CC4519; ++ for(j = 16; j < 64; j++) ++ T[j] = 0x7A879D8A; ++ ++ GET_ULONG_BE(W[ 0], data, 0); ++ GET_ULONG_BE(W[ 1], data, 4); ++ GET_ULONG_BE(W[ 2], data, 8); ++ GET_ULONG_BE(W[ 3], data, 12); ++ GET_ULONG_BE(W[ 4], data, 16); ++ GET_ULONG_BE(W[ 5], data, 20); ++ GET_ULONG_BE(W[ 6], data, 24); ++ GET_ULONG_BE(W[ 7], data, 28); ++ GET_ULONG_BE(W[ 8], data, 32); ++ GET_ULONG_BE(W[ 9], data, 36); ++ GET_ULONG_BE(W[10], data, 40); ++ GET_ULONG_BE(W[11], data, 44); ++ GET_ULONG_BE(W[12], data, 48); ++ GET_ULONG_BE(W[13], data, 52); ++ GET_ULONG_BE(W[14], data, 56); ++ GET_ULONG_BE(W[15], data, 60); ++ ++#define FF0(x,y,z) ((x) ^ (y) ^ (z)) ++#define FF1(x,y,z) (((x) & (y)) | ( (x) & (z)) | ( (y) & (z))) ++ ++#define GG0(x,y,z) ( (x) ^ (y) ^ (z)) ++#define GG1(x,y,z) (((x) & (y)) | ( (~(x)) & (z)) ) ++ ++#define SHL(x,n) (((x) & 0xFFFFFFFF) << n) ++#define ROTL(x,n) (SHL((x),n) | ((x) >> (32 - n))) ++ ++#define P0(x) ((x) ^ ROTL((x),9) ^ ROTL((x),17)) ++#define P1(x) ((x) ^ ROTL((x),15) ^ ROTL((x),23)) ++ ++ for(j = 16; j < 68; j++ ) ++ { ++ Temp1 = W[j - 16] ^ W[j - 9]; ++ Temp2 = ROTL(W[j - 3], 15); ++ Temp3 = Temp1 ^ Temp2; ++ Temp4 = P1(Temp3); ++ Temp5 = ROTL(W[j - 13], 7 ) ^ W[j - 6]; ++ W[j] = Temp4 ^ Temp5; ++ } ++ ++ for(j = 0; j < 64; j++) ++ { ++ W1[j] = W[j] ^ W[j + 4]; ++ } ++ ++ A = ctx->state[0]; ++ B = ctx->state[1]; ++ C = ctx->state[2]; ++ D = ctx->state[3]; ++ E = ctx->state[4]; ++ F = ctx->state[5]; ++ G = ctx->state[6]; ++ H = ctx->state[7]; ++ ++ for(j = 0; j < 16; j++) ++ { ++ SS1 = ROTL((ROTL(A, 12) + E + ROTL(T[j], j)), 7); ++ SS2 = SS1 ^ ROTL(A, 12); ++ TT1 = FF0(A, B, C) + D + SS2 + W1[j]; ++ TT2 = GG0(E, F, G) + H + SS1 + W[j]; ++ D = C; ++ C = ROTL(B, 9); ++ B = A; ++ A = TT1; ++ H = G; ++ G = ROTL(F, 19); ++ F = E; ++ E = P0(TT2); ++ } ++ ++ for(j = 16; j < 64; j++) ++ { ++ SS1 = ROTL((ROTL(A, 12) + E + ROTL(T[j], j)), 7); ++ SS2 = SS1 ^ ROTL(A, 12); ++ TT1 = FF1(A, B, C) + D + SS2 + W1[j]; ++ TT2 = GG1(E, F, G) + H + SS1 + W[j]; ++ D = C; ++ C = ROTL(B, 9); ++ B = A; ++ A = TT1; ++ H = G; ++ G = ROTL(F, 19); ++ F = E; ++ E = P0(TT2); ++ } ++ ++ ctx->state[0] ^= A; ++ ctx->state[1] ^= B; ++ ctx->state[2] ^= C; ++ ctx->state[3] ^= D; ++ ctx->state[4] ^= E; ++ ctx->state[5] ^= F; ++ ctx->state[6] ^= G; ++ ctx->state[7] ^= H; ++} ++ ++/* ++ * SM3 process buffer ++ */ ++void sm3_update(struct sm3_context *ctx, unsigned char *input, int ilen) ++{ ++ int fill; ++ unsigned long left; ++ ++ if(ilen <= 0) ++ return; ++ ++ left = ctx->total[0] & 0x3F; ++ fill = 64 - left; ++ ++ ctx->total[0] += ilen; ++ ctx->total[0] &= 0xFFFFFFFF; ++ ++ if(ctx->total[0] < (unsigned long)ilen) ++ ctx->total[1]++; ++ ++ if(left && ilen >= fill) ++ { ++ sbi_memcpy((void *)(ctx->buffer + left), ++ (void *)input, fill); ++ sm3_process(ctx, ctx->buffer); ++ input += fill; ++ ilen -= fill; ++ left = 0; ++ } ++ ++ while(ilen >= 64) ++ { ++ sm3_process( ctx, input ); ++ input += 64; ++ ilen -= 64; ++ } ++ ++ if(ilen > 0) ++ { ++ sbi_memcpy((void*)(ctx->buffer + left), ++ (void*)input, ilen); ++ } ++} ++ ++static const unsigned char sm3_padding[64] = ++{ ++ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ++}; ++ ++/* ++ * SM3 final digest ++ */ ++void sm3_final(struct sm3_context *ctx, unsigned char output[32]) ++{ ++ unsigned long last, padn; ++ unsigned long high, low; ++ unsigned char msglen[8]; ++ ++ high = (ctx->total[0] >> 29) ++ | (ctx->total[1] << 3); ++ low = (ctx->total[0] << 3); ++ ++ PUT_ULONG_BE( high, msglen, 0 ); ++ PUT_ULONG_BE( low, msglen, 4 ); ++ ++ last = ctx->total[0] & 0x3F; ++ padn = (last < 56) ? (56 - last) : (120 - last); ++ ++ sm3_update( ctx, (unsigned char *) sm3_padding, padn ); ++ sm3_update( ctx, msglen, 8 ); ++ ++ PUT_ULONG_BE( ctx->state[0], output, 0 ); ++ PUT_ULONG_BE( ctx->state[1], output, 4 ); ++ PUT_ULONG_BE( ctx->state[2], output, 8 ); ++ PUT_ULONG_BE( ctx->state[3], output, 12 ); ++ PUT_ULONG_BE( ctx->state[4], output, 16 ); ++ PUT_ULONG_BE( ctx->state[5], output, 20 ); ++ PUT_ULONG_BE( ctx->state[6], output, 24 ); ++ PUT_ULONG_BE( ctx->state[7], output, 28 ); ++} ++ ++/* ++ * output = SM3( input buffer ) ++ */ ++void sm3(unsigned char *input, int ilen, ++ unsigned char output[32]) ++{ ++ struct sm3_context ctx; ++ ++ sm3_init(&ctx); ++ sm3_update(&ctx, input, ilen); ++ sm3_final(&ctx, output); ++ ++ sbi_memset(&ctx, 0, sizeof(struct sm3_context)); ++} ++ ++/* ++ * SM3 HMAC context setup ++ */ ++void sm3_hmac_init(struct sm3_context *ctx, unsigned char *key, int keylen) ++{ ++ int i; ++ unsigned char sum[32]; ++ ++ if(keylen > 64) ++ { ++ sm3(key, keylen, sum); ++ keylen = 32; ++ //keylen = ( is224 ) ? 28 : 32; ++ key = sum; ++ } ++ ++ sbi_memset(ctx->ipad, 0x36, 64); ++ sbi_memset(ctx->opad, 0x5C, 64); ++ ++ for(i = 0; i < keylen; i++) ++ { ++ ctx->ipad[i] = (unsigned char)(ctx->ipad[i] ^ key[i]); ++ ctx->opad[i] = (unsigned char)(ctx->opad[i] ^ key[i]); ++ } ++ ++ sm3_init(ctx); ++ sm3_update(ctx, ctx->ipad, 64); ++ ++ sbi_memset(sum, 0, sizeof(sum)); ++} ++ ++/* ++ * SM3 HMAC process buffer ++ */ ++void sm3_hmac_update(struct sm3_context *ctx, unsigned char *input, int ilen) ++{ ++ sm3_update(ctx, input, ilen); ++} ++ ++/* ++ * SM3 HMAC final digest ++ */ ++void sm3_hmac_final(struct sm3_context *ctx, unsigned char output[32]) ++{ ++ int hlen; ++ unsigned char tmpbuf[32]; ++ ++ //is224 = ctx->is224; ++ hlen = 32; ++ ++ sm3_final(ctx, tmpbuf); ++ sm3_init(ctx); ++ sm3_update(ctx, ctx->opad, 64); ++ sm3_update(ctx, tmpbuf, hlen); ++ sm3_final(ctx, output); ++ ++ sbi_memset(tmpbuf, 0, sizeof(tmpbuf)); ++} ++ ++/* ++ * output = HMAC-SM#( hmac key, input buffer ) ++ */ ++void sm3_hmac(unsigned char *key, int keylen, ++ unsigned char *input, int ilen, ++ unsigned char output[32]) ++{ ++ struct sm3_context ctx; ++ ++ sm3_hmac_init(&ctx, key, keylen); ++ sm3_hmac_update(&ctx, input, ilen); ++ sm3_hmac_final(&ctx, output); ++ ++ sbi_memset(&ctx, 0, sizeof(struct sm3_context)); ++} +diff --git a/lib/sbi/sm/platform/pt_area/platform.c b/lib/sbi/sm/platform/pt_area/platform.c +new file mode 100644 +index 0000000..a07b0d8 +--- /dev/null ++++ b/lib/sbi/sm/platform/pt_area/platform.c +@@ -0,0 +1,31 @@ ++#include "sm/platform/pt_area/platform_thread.h" ++#include "sm/pmp.h" ++#include "sm/sm.h" ++ ++/** ++ * \brief It uses two PMP regions. ++ * Region-0 is for protecting secure monitor's memory ++ * Region-last is for allowing host kernel to access any other mem. ++ * ++ */ ++int platform_init() ++{ ++ clear_pmp(0); ++ ++ ///config the PMP 0 to protect security monitor ++ struct pmp_config_t pmp_config; ++ pmp_config.paddr = (uintptr_t)SM_BASE; ++ pmp_config.size = (unsigned long)SM_SIZE; ++ pmp_config.mode = PMP_A_NAPOT; ++ pmp_config.perm = PMP_NO_PERM; ++ set_pmp(0, pmp_config); ++ ++ ///config the last PMP to allow kernel to access memory ++ pmp_config.paddr = 0; ++ pmp_config.size = -1UL; ++ pmp_config.mode = PMP_A_NAPOT; ++ pmp_config.perm = PMP_R | PMP_W | PMP_X; ++ set_pmp(NPMP-1, pmp_config); ++ ++ return 0; ++} +diff --git a/lib/sbi/sm/platform/pt_area/platform_thread.c b/lib/sbi/sm/platform/pt_area/platform_thread.c +new file mode 100644 +index 0000000..43161b3 +--- /dev/null ++++ b/lib/sbi/sm/platform/pt_area/platform_thread.c +@@ -0,0 +1,56 @@ ++#include "sm/platform/pt_area/platform_thread.h" ++#include "sm/enclave.h" ++#include "sbi/riscv_encoding.h" ++ ++void platform_enter_enclave_world() ++{ ++ ///TODO: add register to indicate whether in encalve world or not ++ return; ++} ++ ++void platform_exit_enclave_world() ++{ ++ ///TODO: add register to indicate whether in encalve world or not ++ return; ++} ++ ++int platform_check_in_enclave_world() ++{ ++ ///TODO: add register to indicate whether in encalve world or not ++ return 0; ++} ++ ++/** ++ * \brief Compare the used satp and the enclave ptbr (encl_ptr) ++ * It's supposed to be equal. ++ * ++ * \param enclave the check enclave ++ */ ++int platform_check_enclave_authentication(struct enclave_t* enclave) ++{ ++ if(enclave->thread_context.encl_ptbr != csr_read(CSR_SATP)) ++ return -1; ++ return 0; ++} ++ ++/** ++ * \brief Switch to enclave's ptbr (enclave_ptbr). ++ * ++ * \param thread the current enclave thread. ++ * \param enclave_ptbr the enclave ptbr value. ++ */ ++void platform_switch_to_enclave_ptbr(struct thread_state_t* thread, uintptr_t enclave_ptbr) ++{ ++ csr_write(CSR_SATP, enclave_ptbr); ++} ++ ++/** ++ * \brief Switch to host's ptbr (host_ptbr). ++ * ++ * \param thread the current enclave thread. ++ * \param host_ptbr the host ptbr value. ++ */ ++void platform_switch_to_host_ptbr(struct thread_state_t* thread, uintptr_t host_ptbr) ++{ ++ csr_write(CSR_SATP, host_ptbr); ++} +diff --git a/lib/sbi/sm/pmp.c b/lib/sbi/sm/pmp.c +new file mode 100644 +index 0000000..66cc02c +--- /dev/null ++++ b/lib/sbi/sm/pmp.c +@@ -0,0 +1,240 @@ ++#include "sm/pmp.h" ++#include "sm/ipi.h" ++#include "sbi/riscv_asm.h" ++#include "sbi/sbi_pmp.h" ++#include "sbi/sbi_console.h" ++ ++/** ++ * \brief Set pmp and sync all harts. ++ * ++ * \param pmp_idx_arg The pmp index. ++ * \param pmp_config_arg The pmp config. ++ */ ++void set_pmp_and_sync(int pmp_idx_arg, struct pmp_config_t pmp_config_arg) ++{ ++ struct pmp_data_t pmp_data; ++ u32 source_hart = current_hartid(); ++ ++ //set current hart's pmp ++ set_pmp(pmp_idx_arg, pmp_config_arg); ++ //sync all other harts ++ SBI_PMP_DATA_INIT(&pmp_data, pmp_config_arg, pmp_idx_arg, source_hart); ++ sbi_send_pmp(0xFFFFFFFF&(~(1<>1)-1)) >> 2; ++ break; ++ case PMP_OFF: ++ pmp_address = 0; ++ break; ++ default: ++ pmp_address = 0; ++ break; ++ } ++ set_pmp_reg(pmp_idx, &pmp_address, &pmp_config); ++ ++ return; ++} ++ ++/** ++ * \brief clear the configuration of a PMP register ++ * ++ * \param pmp_idx the index of target PMP register ++ */ ++void clear_pmp(int pmp_idx) ++{ ++ struct pmp_config_t pmp_cfg; ++ ++ pmp_cfg.mode = PMP_OFF; ++ pmp_cfg.perm = PMP_NO_PERM; ++ pmp_cfg.paddr = 0; ++ pmp_cfg.size = 0; ++ set_pmp(pmp_idx, pmp_cfg); ++ ++ return; ++} ++ ++/** ++ * \brief Get the configuration of a pmp register (pmp_idx) ++ * ++ * \param pmp_idx the index of target PMP register ++ */ ++struct pmp_config_t get_pmp(int pmp_idx) ++{ ++ struct pmp_config_t pmp = {0,}; ++ uintptr_t pmp_address = 0; ++ uintptr_t pmp_config = 0; ++ unsigned long order = 0; ++ unsigned long size = 0; ++ ++ set_pmp_reg(pmp_idx, &pmp_address, &pmp_config); ++ ++ pmp_config >>= (uintptr_t)PMPCFG_BIT_NUM * (pmp_idx % PMP_PER_CFG_REG); ++ pmp_config &= PMPCFG_BITS; ++ switch(pmp_config & PMP_A) ++ { ++ case PMP_A_TOR: ++ break; ++ case PMP_A_NA4: ++ size = 4; ++ break; ++ case PMP_A_NAPOT: ++ while(pmp_address & 1) ++ { ++ order += 1; ++ pmp_address >>= 1; ++ } ++ order += 3; ++ size = 1 << order; ++ pmp_address <<= (order-1); ++ break; ++ case PMP_OFF: ++ pmp_address = 0; ++ size = 0; ++ break; ++ } ++ ++ pmp.mode = pmp_config & PMP_A; ++ pmp.perm = pmp_config & (PMP_R | PMP_W | PMP_X); ++ pmp.paddr = pmp_address; ++ pmp.size = size; ++ ++ return pmp; ++} ++ ++/** ++ * \brief Check the validness of a range to be PMP config ++ * e.g., the size should be powers of 2 ++ * ++ * \param paddr the start address of the PMP region ++ * \param size the size of the PMP region ++ */ ++int illegal_pmp_addr(uintptr_t paddr, uintptr_t size) ++{ ++ if(paddr & (size - 1)) ++ return -1; ++ ++ if((size == 0) || (size & (size - 1))) ++ return -1; ++ ++ if(size < RISCV_PGSIZE) ++ return -1; ++ ++ return 0; ++} ++ ++//check whether two regions are overlapped ++int region_overlap(uintptr_t pa_0, uintptr_t size_0, uintptr_t pa_1, uintptr_t size_1) ++{ ++ return (pa_0 <= pa_1 && (pa_0 + size_0) > pa_1) || (pa_1 <= pa_0 && (pa_1 + size_1) > pa_0); ++} ++ ++//check whether two regions are included ++int region_contain(uintptr_t pa_0, uintptr_t size_0, uintptr_t pa_1, uintptr_t size_1) ++{ ++ return (pa_0 <= pa_1 && (pa_0 + size_0) >= (pa_1 + size_1)) ++ || (pa_1 <= pa_0 && (pa_1 + size_1) >= (pa_0 + size_0)); ++} +diff --git a/lib/sbi/sm/relay_page.c b/lib/sbi/sm/relay_page.c +new file mode 100644 +index 0000000..1791b98 +--- /dev/null ++++ b/lib/sbi/sm/relay_page.c +@@ -0,0 +1,221 @@ ++#include "sbi/sbi_console.h" ++#include "sm/sm.h" ++#include "sm/enclave.h" ++#include "sm/enclave_vm.h" ++#include "sm/server_enclave.h" ++#include "sm/ipi.h" ++#include "sm/relay_page.h" ++#include "sm/enclave_mm.h" ++ ++/**************************************************************/ ++/* called by enclave */ ++/**************************************************************/ ++ ++/** ++ * \brief Monitor is responsible for change the relay page ownership: ++ * It can be divide into two phases: First umap the relay page for caller ++ * enclave, and map the relay page for subsequent enclave asynchronously. ++ * Second, change the relay page ownership entry in the relay page linke memory. ++ * ++ * note: the relay_page_addr_u is the virtual address of relay page. However in the relay page entry, ++ * it binds the enclave name with the physical address. ++ * ++ * The first enclave in the enclave call chain can only hold single relay page region (now version), ++ * but can split to atmost N piece ans transfer to different enclaves. The following enclave can receive\ ++ * multiple relay page entry. ++ * ++ * \param enclave The enclave structure. ++ * \param relay_page_addr_u The relay page address. ++ * \param relay_page_size The relay page size. ++ * \param enclave_name_u The given enclave name. ++ */ ++uintptr_t transfer_relay_page(struct enclave_t *enclave, unsigned long relay_page_addr_u, unsigned long relay_page_size, char *enclave_name_u) ++{ ++ uintptr_t ret = 0; ++ char *enclave_name = NULL; ++ unsigned long relay_page_addr = 0; ++ ++ enclave_name = va_to_pa((uintptr_t*)(enclave->root_page_table), enclave_name_u); ++ relay_page_addr = (unsigned long)va_to_pa((uintptr_t*)(enclave->root_page_table), (char *)relay_page_addr_u); ++ if(!enclave_name) ++ { ++ ret = -1UL; ++ goto failed; ++ } ++ //unmap the relay page for call enclave ++ unmap((uintptr_t*)(enclave->root_page_table), relay_page_addr_u, relay_page_size); ++ for (int kk = 0; kk < 5; kk++) ++ { ++ if(enclave->mm_arg_paddr[kk] == relay_page_addr) ++ { ++ enclave->mm_arg_paddr[kk] = 0; ++ enclave->mm_arg_size[kk] = 0; ++ } ++ } ++ ++ //change the relay page ownership ++ if (change_relay_page_ownership((unsigned long)relay_page_addr, relay_page_size, enclave_name) < 0) ++ { ++ ret = -1UL; ++ sbi_bug("M mode: transfer_relay_page: change relay page ownership failed\n"); ++ } ++ release_enclave_metadata_lock(); ++ return ret; ++failed: ++ release_enclave_metadata_lock(); ++ sbi_bug("M MODE: transfer_relay_page: failed\n"); ++ return ret; ++} ++ ++/** ++ * \brief Handle the asyn enclave call. Obtain the corresponding relay page virtual address and size, and ++ * invoke the transfer_relay_page. ++ * ++ * \param enclave_name The callee enclave name ++ */ ++uintptr_t asyn_enclave_call(uintptr_t* regs, uintptr_t enclave_name, uintptr_t arg) ++{ ++ uintptr_t ret = 0; ++ struct enclave_t *enclave = NULL; ++ int eid = 0; ++ if(check_in_enclave_world() < 0) ++ { ++ sbi_bug("M mode: asyn_enclave_call: CPU not in the enclave mode\n"); ++ return -1UL; ++ } ++ ++ acquire_enclave_metadata_lock(); ++ ++ eid = get_curr_enclave_id(); ++ enclave = __get_enclave(eid); ++ if(!enclave) ++ { ++ ret = -1UL; ++ goto failed; ++ } ++ struct call_enclave_arg_t call_arg; ++ struct call_enclave_arg_t* call_arg0 = va_to_pa((uintptr_t*)(enclave->root_page_table), (void*)arg); ++ if(!call_arg0) ++ { ++ ret = -1UL; ++ goto failed; ++ } ++ copy_from_host(&call_arg, call_arg0, sizeof(struct call_enclave_arg_t)); ++ if (transfer_relay_page(enclave, call_arg.req_vaddr, call_arg.req_size, (char *)enclave_name) < 0) ++ { ++ sbi_bug("M mode: asyn_enclave_call: transfer relay page is failed\n"); ++ goto failed; ++ } ++ release_enclave_metadata_lock(); ++ return ret; ++failed: ++ release_enclave_metadata_lock(); ++ sbi_bug("M MODE: asyn_enclave_call: failed\n"); ++ return ret; ++} ++ ++/** ++ * \brief Split relay page into two pieces: ++ * it will update the relay page entry in the global link memory, ++ * and add a new splitted entry. Also, it will update the enclave->mm_arg_paddr ++ * and enclave->mm_arg_size. If the relay page owned by single enclave is upper ++ * than RELAY_PAGE_NUM, an error will be reported. ++ * ++ * \param mem_addr_u The split memory address. ++ * \param mem_size The split memory size. ++ * \param split_addr_u Thesplit point in the memory region. ++ */ ++uintptr_t split_mem_region(uintptr_t *regs, uintptr_t mem_addr_u, uintptr_t mem_size, uintptr_t split_addr_u) ++{ ++ uintptr_t ret = 0; ++ struct enclave_t *enclave = NULL; ++ uintptr_t mem_addr = 0, split_addr = 0; ++ int eid = 0; ++ if(check_in_enclave_world() < 0) ++ { ++ sbi_bug("M mode: split_mem_region: CPU not in the enclave mode\n"); ++ return -1UL; ++ } ++ ++ acquire_enclave_metadata_lock(); ++ ++ eid = get_curr_enclave_id(); ++ enclave = __get_enclave(eid); ++ if(!enclave) ++ { ++ ret = -1UL; ++ goto failed; ++ } ++ if((split_addr_u < mem_addr_u) || (split_addr_u > (mem_addr_u + mem_size))) ++ { ++ sbi_bug("M mode: split_mem_region: split address is not in the relay page region \n"); ++ ret = -1UL; ++ goto failed; ++ } ++ mem_addr = (unsigned long)va_to_pa((uintptr_t*)(enclave->root_page_table), (char *)mem_addr_u); ++ split_addr = (unsigned long)va_to_pa((uintptr_t*)(enclave->root_page_table), (char *)split_addr_u); ++ int found_corres_entry = 0; ++ for(int kk = 0; kk < RELAY_PAGE_NUM; kk++) ++ { ++ if ((enclave->mm_arg_paddr[kk] == mem_addr) && (enclave->mm_arg_size[kk] == mem_size)) ++ { ++ unsigned long split_size = enclave->mm_arg_paddr[kk] + enclave->mm_arg_size[kk] - split_addr; ++ int found_empty_entry = 0; ++ //free the old relay page entry in the global link memory ++ __free_relay_page_entry(enclave->mm_arg_paddr[kk], enclave->mm_arg_size[kk]); ++ //adjust the relay page region for enclave metadata ++ enclave->mm_arg_size[kk] = split_addr - enclave->mm_arg_paddr[kk]; ++ //add the adjusted relay page entry in the global link memory ++ __alloc_relay_page_entry(enclave->enclave_name, enclave->mm_arg_paddr[kk], enclave->mm_arg_size[kk]); ++ //find the empty relay page entry for this enclave ++ sbi_bug("M mode: split_mem_region1: split addr %lx split size %lx \n", enclave->mm_arg_paddr[kk], enclave->mm_arg_size[kk]); ++ for(int jj = kk; jj < RELAY_PAGE_NUM; jj++) ++ { ++ if ((enclave->mm_arg_paddr[jj] == 0) && (enclave->mm_arg_size[jj] == 0)) ++ { ++ //add the new splitted relay page entry in the enclave metadata ++ enclave->mm_arg_paddr[jj] = split_addr; ++ enclave->mm_arg_size[jj] = split_size; ++ sbi_printf("M mode: split_mem_region2: split addr %lx split size %lx \n", enclave->mm_arg_paddr[jj], enclave->mm_arg_size[jj]); ++ __alloc_relay_page_entry(enclave->enclave_name, enclave->mm_arg_paddr[jj], enclave->mm_arg_size[jj]); ++ found_empty_entry = 1; ++ break; ++ } ++ } ++ if (!found_empty_entry) ++ { ++ sbi_bug("M mode: split mem region: can not find the empty entry for splitted relay page \n"); ++ ret = -1UL; ++ goto failed; ++ } ++ found_corres_entry = 1; ++ break; ++ } ++ } ++ if (!found_corres_entry) ++ { ++ sbi_bug("M mode: split mem region: can not find the correspongind relay page region\n"); ++ ret = -1UL; ++ goto failed; ++ } ++ release_enclave_metadata_lock(); ++ return ret; ++failed: ++ release_enclave_metadata_lock(); ++ sbi_bug("M MODE: split_mem_region: failed\n"); ++ return ret; ++} ++ ++int free_all_relay_page(unsigned long *mm_arg_paddr, unsigned long *mm_arg_size) ++{ ++ int ret = 0; ++ for(int kk = 0; kk < RELAY_PAGE_NUM; kk++) ++ { ++ if (mm_arg_paddr[kk]) ++ { ++ ret = __free_secure_memory(mm_arg_paddr[kk], mm_arg_size[kk]); ++ ret = __free_relay_page_entry(mm_arg_paddr[kk], mm_arg_size[kk]); ++ } ++ } ++ return ret; ++} +\ No newline at end of file +diff --git a/lib/sbi/sm/server_enclave.c b/lib/sbi/sm/server_enclave.c +new file mode 100644 +index 0000000..2107188 +--- /dev/null ++++ b/lib/sbi/sm/server_enclave.c +@@ -0,0 +1,469 @@ ++#include "sm/sm.h" ++#include "sm/enclave.h" ++#include "sm/enclave_vm.h" ++#include "sm/enclave_mm.h" ++#include "sm/server_enclave.h" ++#include "sm/ipi.h" ++#include "sbi/sbi_string.h" ++#include "sbi/sbi_console.h" ++ ++struct link_mem_t* server_enclave_head = NULL; ++struct link_mem_t* server_enclave_tail = NULL; ++ ++// Compare the server enclave name. ++static int server_name_cmp(char* name1, char* name2) ++{ ++ for(int i=0; inext_link_mem) ++ { ++ for(int i = 0; i < (cur->slab_num); i++) ++ { ++ server_enclave = (struct server_enclave_t*)(cur->addr) + i; ++ if(server_enclave->entity && server_name_cmp(server_name, server_enclave->server_name)==0) ++ { ++ sbi_bug("M mode: __alloc_server_enclave: server already existed!\n"); ++ server_enclave = (void*)(-1UL); ++ goto failed; ++ } ++ } ++ } ++ ++ found = 0; ++ for(cur = server_enclave_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(int i = 0; i < (cur->slab_num); i++) ++ { ++ server_enclave = (struct server_enclave_t*)(cur->addr) + i; ++ if(!(server_enclave->entity)) ++ { ++ sbi_memcpy(server_enclave->server_name, server_name, NAME_LEN); ++ server_enclave->entity = enclave; ++ found = 1; ++ break; ++ } ++ } ++ if(found) ++ break; ++ } ++ ++ //don't have enough enclave metadata ++ if(!found) ++ { ++ next = add_link_mem(&server_enclave_tail); ++ if(next == NULL) ++ { ++ sbi_bug("M mode: __alloc_server_enclave: don't have enough mem\n"); ++ server_enclave = NULL; ++ goto failed; ++ } ++ server_enclave = (struct server_enclave_t*)(next->addr); ++ sbi_memcpy(server_enclave->server_name, server_name, NAME_LEN); ++ server_enclave->entity = enclave; ++ } ++ ++ return server_enclave; ++ ++failed: ++ if(enclave) ++ __free_enclave(enclave->eid); ++ if(server_enclave == (void *)-1UL) ++ return (void *)-1UL; ++ if(server_enclave) ++ sbi_memset((void*)server_enclave, 0, sizeof(struct server_enclave_t)); ++ ++ return NULL; ++} ++ ++/** ++ * \brief Get the server enclav by eid. ++ * ++ * \param eid The server enclave id. ++ */ ++static struct server_enclave_t* __get_server_enclave(int eid) ++{ ++ struct link_mem_t *cur; ++ struct server_enclave_t *server_enclave; ++ int found; ++ ++ found = 0; ++ for(cur = server_enclave_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(int i=0; i < (cur->slab_num); ++i) ++ { ++ server_enclave = (struct server_enclave_t*)(cur->addr) + i; ++ if(server_enclave->entity && server_enclave->entity->eid == eid) ++ { ++ found = 1; ++ break; ++ } ++ } ++ } ++ ++ //haven't alloc this eid ++ if(!found) ++ { ++ sbi_bug("M mode: __get_server_enclave: haven't alloc this enclave:%d\r\n", eid); ++ server_enclave = NULL; ++ } ++ ++ return server_enclave; ++} ++ ++/** ++ * \brief Get the server enclave by the given name. ++ * ++ * \param server_name The given server enclave name. ++ */ ++static struct server_enclave_t* __get_server_enclave_by_name(char* server_name) ++{ ++ struct link_mem_t *cur; ++ struct server_enclave_t *server_enclave; ++ int i, found; ++ ++ found = 0; ++ for(cur = server_enclave_head; cur != NULL; cur = cur->next_link_mem) ++ { ++ for(i=0; i < (cur->slab_num); ++i) ++ { ++ server_enclave = (struct server_enclave_t*)(cur->addr) + i; ++ if(server_enclave->entity && server_name_cmp(server_enclave->server_name, server_name)==0) ++ { ++ found = 1; ++ break; ++ } ++ } ++ } ++ ++ //haven't alloc this eid ++ if(!found) ++ { ++ sbi_bug("M mode: __get_server_enclave_by_name: haven't alloc this enclave:%s\n", server_name); ++ server_enclave = NULL; ++ } ++ ++ return server_enclave; ++} ++ ++/**************************************************************/ ++/* called by host */ ++/**************************************************************/ ++/** ++ * \brief Create the server enclave with the given create argument. ++ * ++ * \param create_args The create argument using in the creating server enclave phase. ++ */ ++uintptr_t create_server_enclave(enclave_create_param_t create_args) ++{ ++ struct enclave_t* enclave = NULL; ++ struct server_enclave_t* server_enclave = NULL; ++ uintptr_t ret = 0; ++ int need_free_secure_memory = 0; ++ ++ acquire_enclave_metadata_lock(); ++ ++ if(!enable_enclave()) ++ { ++ ret = ENCLAVE_ERROR; ++ goto failed; ++ } ++ if((create_args.paddr & (RISCV_PGSIZE-1)) || (create_args.size & (RISCV_PGSIZE-1)) || create_args.size < RISCV_PGSIZE) ++ { ++ ret = ENCLAVE_ERROR; ++ goto failed; ++ } ++ ++ //check enclave memory layout ++ if(check_and_set_secure_memory(create_args.paddr, create_args.size) != 0) ++ { ++ ret = ENCLAVE_ERROR; ++ goto failed; ++ } ++ need_free_secure_memory = 1; ++ ++ //check enclave memory layout ++ if(check_enclave_layout(create_args.paddr + RISCV_PGSIZE, 0, -1UL, create_args.paddr, create_args.paddr + create_args.size) != 0) ++ { ++ ret = ENCLAVE_ERROR; ++ goto failed; ++ } ++ ++ server_enclave = __alloc_server_enclave(create_args.name); ++ if(server_enclave == (void*)(-1UL)) ++ { ++ ret = ENCLAVE_ERROR; ++ goto failed; ++ } ++ if(!server_enclave) ++ { ++ ret = ENCLAVE_NO_MEM; ++ goto failed; ++ } ++ ++ enclave = server_enclave->entity; ++ enclave->entry_point = create_args.entry_point; ++ enclave->ocall_func_id = create_args.ecall_arg0; ++ enclave->ocall_arg0 = create_args.ecall_arg1; ++ enclave->ocall_arg1 = create_args.ecall_arg2; ++ enclave->ocall_syscall_num = create_args.ecall_arg3; ++ enclave->host_ptbr = csr_read(CSR_SATP); ++ enclave->root_page_table = create_args.paddr + RISCV_PGSIZE; ++ enclave->thread_context.encl_ptbr = ((create_args.paddr+RISCV_PGSIZE) >> RISCV_PGSHIFT) | SATP_MODE_CHOICE; ++ enclave->type = SERVER_ENCLAVE; ++ //we directly set server_enclave's state as RUNNABLE as it won't be called by run_enclave call ++ enclave->state = RUNNABLE; ++ enclave->caller_eid = -1; ++ enclave->top_caller_eid = -1; ++ enclave->cur_callee_eid = -1; ++ ++ //traverse vmas ++ struct pm_area_struct* pma = (struct pm_area_struct*)(create_args.paddr); ++ struct vm_area_struct* vma = (struct vm_area_struct*)(create_args.paddr + sizeof(struct pm_area_struct)); ++ pma->paddr = create_args.paddr; ++ pma->size = create_args.size; ++ pma->free_mem = create_args.free_mem; ++ pma->pm_next = NULL; ++ enclave->pma_list = pma; ++ traverse_vmas(enclave->root_page_table, vma); ++ ++ //FIXME: here we assume there are exactly text(include text/data/bss) vma and stack vma ++ while(vma) ++ { ++ if(vma->va_start == ENCLAVE_DEFAULT_TEXT_BASE) ++ { ++ enclave->text_vma = vma; ++ } ++ if(vma->va_end == ENCLAVE_DEFAULT_STACK_BASE) ++ { ++ enclave->stack_vma = vma; ++ enclave->_stack_top = enclave->stack_vma->va_start; ++ } ++ vma->pma = pma; ++ vma = vma->vm_next; ++ } ++ if(enclave->text_vma) ++ enclave->text_vma->vm_next = NULL; ++ if(enclave->stack_vma) ++ enclave->stack_vma->vm_next = NULL; ++ enclave->_heap_top = ENCLAVE_DEFAULT_HEAP_BASE; ++ enclave->heap_vma = NULL; ++ enclave->mmap_vma = NULL; ++ ++ enclave->free_pages = NULL; ++ enclave->free_pages_num = 0; ++ uintptr_t free_mem = create_args.paddr + create_args.size - RISCV_PGSIZE; ++ while(free_mem >= create_args.free_mem) ++ { ++ struct page_t *page = (struct page_t*)free_mem; ++ page->paddr = free_mem; ++ page->next = enclave->free_pages; ++ enclave->free_pages = page; ++ enclave->free_pages_num += 1; ++ free_mem -= RISCV_PGSIZE; ++ } ++ // check shm ++ if(create_args.shm_paddr && create_args.shm_size && ++ !(create_args.shm_paddr & (RISCV_PGSIZE-1)) && !(create_args.shm_size & (RISCV_PGSIZE-1))) ++ { ++ mmap((uintptr_t*)(enclave->root_page_table), &(enclave->free_pages), ENCLAVE_DEFAULT_SHM_BASE, create_args.shm_paddr, create_args.shm_size); ++ enclave->shm_paddr = create_args.shm_paddr; ++ enclave->shm_size = create_args.shm_size; ++ } ++ else ++ { ++ enclave->shm_paddr = 0; ++ enclave->shm_size = 0; ++ } ++ copy_word_to_host((unsigned int*)create_args.eid_ptr, enclave->eid); ++ release_enclave_metadata_lock(); ++ return ret; ++ ++failed: ++ if(need_free_secure_memory) ++ { ++ free_secure_memory(create_args.paddr, create_args.size); ++ } ++ release_enclave_metadata_lock(); ++ return ret; ++} ++ ++/** ++ * \brief Destroy the server enclave if server enclave is not runnable. ++ * ++ * \param regs The host regs. ++ * \param eid The server enclave id. ++ */ ++uintptr_t destroy_server_enclave(uintptr_t* regs, unsigned int eid) ++{ ++ uintptr_t retval = 0; ++ struct enclave_t *enclave = NULL; ++ struct server_enclave_t *server_enclave = NULL; ++ struct pm_area_struct* pma = NULL; ++ int need_free_enclave_memory = 0; ++ ++ acquire_enclave_metadata_lock(); ++ ++ server_enclave = __get_server_enclave(eid); ++ if(!server_enclave) ++ { ++ sbi_bug("M mode: destroy_server_enclave: server%d is not found\r\n", eid); ++ retval = -1UL; ++ goto out; ++ } ++ enclave = server_enclave->entity; ++ if(!enclave || enclave->state < FRESH) ++ { ++ sbi_bug("M mode: destroy_server_enclave: server%d can not be accessed\r\n", eid); ++ retval = -1UL; ++ goto out; ++ } ++ sbi_memset((void*)server_enclave, 0, sizeof(struct server_enclave_t)); ++ ++ if(enclave->state != RUNNING) ++ { ++ pma = enclave->pma_list; ++ need_free_enclave_memory = 1; ++ __free_enclave(eid); ++ } ++ else ++ { ++ //TODO: use ipi to stop the server enclave ++ sbi_bug("M mode: destroy_server_enclave: server enclave is running, can not destroy\n"); ++ } ++ ++out: ++ release_enclave_metadata_lock(); ++ if(need_free_enclave_memory) ++ { ++ free_enclave_memory(pma); ++ } ++ ++ return retval; ++} ++/**************************************************************/ ++/* called by enclave */ ++/**************************************************************/ ++/** ++ * \brief Acquire the server enclave and retrieve the corresponding server enclave handler. ++ * ++ * \param server_name_u The acquired server enclave name. ++ */ ++uintptr_t acquire_server_enclave(uintptr_t *regs, char* server_name_u) ++{ ++ uintptr_t ret = 0; ++ struct enclave_t *enclave = NULL; ++ struct server_enclave_t *server_enclave = NULL; ++ char *server_name = NULL; ++ int eid = 0; ++ if(check_in_enclave_world() < 0) ++ { ++ return -1UL; ++ } ++ ++ acquire_enclave_metadata_lock(); ++ ++ eid = get_curr_enclave_id(); ++ enclave = __get_enclave(eid); ++ if(!enclave) ++ { ++ ret = -1UL; ++ goto failed; ++ } ++ ++ server_name = va_to_pa((uintptr_t*)(enclave->root_page_table), server_name_u); ++ if(!server_name) ++ { ++ ret = -1UL; ++ goto failed; ++ } ++ server_enclave = __get_server_enclave_by_name(server_name); ++ if(!server_enclave) ++ { ++ ret = -1UL; ++ goto failed; ++ } ++ ret = server_enclave->entity->eid; ++ ++ release_enclave_metadata_lock(); ++ return ret; ++ ++failed: ++ release_enclave_metadata_lock(); ++ sbi_bug("M MODE: acquire_server_enclave: acquire encalve failed\n"); ++ return ret; ++} ++ ++ ++/* Retrive the eid of the caller enclave */ ++/** ++ * \brief Get the enclave id. ++ */ ++uintptr_t get_caller_id(uintptr_t* regs) ++{ ++ uintptr_t ret = 0; ++ struct enclave_t *enclave = NULL; ++ int eid = 0; ++ if(check_in_enclave_world() < 0) ++ { ++ return -1UL; ++ } ++ ++ acquire_enclave_metadata_lock(); ++ ++ eid = get_curr_enclave_id(); ++ enclave = __get_enclave(eid); ++ if(!enclave) ++ { ++ ret = -1UL; ++ goto failed; ++ } ++ ++ ret = enclave->caller_eid;; ++ ++ release_enclave_metadata_lock(); ++ return ret; ++ ++failed: ++ release_enclave_metadata_lock(); ++ sbi_bug("M MODE: get_caller_id: failed\n"); ++ return ret; ++} +diff --git a/lib/sbi/sm/sm.ac b/lib/sbi/sm/sm.ac +new file mode 100644 +index 0000000..2c62c70 +--- /dev/null ++++ b/lib/sbi/sm/sm.ac +@@ -0,0 +1,9 @@ ++AC_ARG_WITH([target_platform], AS_HELP_STRING([--with-target-platform], [Set a specific platform for the sm to build with]), ++ [AC_SUBST([TARGET_PLATFORM], $with_target_platform, [Set a specific platform for the sm to build with])], ++ [AC_SUBST([TARGET_PLATFORM], default, [Set a specific platform for the sm to build with])]) ++AS_IF([test "$TARGET_PLATFORM" == "default"], [ ++ AC_DEFINE([pt_area_enabled],,[Define if the Penglai uses PT Area]) ++],[ ++ AC_DEFINE([${TARGET_PLATFORM}_enabled],,[Define if the Penglai uses other platform]) ++]) ++AC_DEFINE([SOFTWARE_PT_AREA],,[Define as we always use the SW PT AREA by default]) +diff --git a/lib/sbi/sm/sm.c b/lib/sbi/sm/sm.c +new file mode 100644 +index 0000000..4572ab5 +--- /dev/null ++++ b/lib/sbi/sm/sm.c +@@ -0,0 +1,1374 @@ ++#include "sbi/riscv_atomic.h" ++#include "sbi/sbi_tvm.h" ++#include "sbi/sbi_console.h" ++#include "sm/sm.h" ++#include "sm/pmp.h" ++#include "sm/enclave.h" ++#include "sm/enclave_vm.h" ++#include "sm/enclave_mm.h" ++#include "sm/server_enclave.h" ++#include "sm/relay_page.h" ++#include "sm/platform/pt_area/platform.h" ++ ++/** ++ * Init secure monitor by invoking platform_init ++ */ ++void sm_init() ++{ ++ platform_init(); ++} ++ ++//Init the monitor organized memory. ++uintptr_t sm_mm_init(uintptr_t paddr, uintptr_t size) ++{ ++ uintptr_t retval = 0; ++ ++ retval = mm_init(paddr, size); ++ ++ return retval; ++} ++ ++//Extand the monitor organized memory. ++uintptr_t sm_mm_extend(uintptr_t paddr, uintptr_t size) ++{ ++ uintptr_t retval = 0; ++ ++ retval = mm_init(paddr, size); ++ ++ return retval; ++} ++ ++uintptr_t pt_area_base = 0; ++uintptr_t pt_area_size = 0; ++uintptr_t mbitmap_base = 0; ++uintptr_t mbitmap_size = 0; ++uintptr_t pgd_order = 0; ++uintptr_t pmd_order = 0; ++spinlock_t mbitmap_lock = SPINLOCK_INIT; ++ ++/** ++ * \brief This function validates whether the enclave environment is ready ++ * It will check the PT_AREA and MBitmap. ++ * If the two regions are properly configured, it means the host OS ++ * has invoked SM_INIT sbi call and everything to run enclave is ready. ++ * ++ */ ++int enable_enclave() ++{ ++ return pt_area_base && pt_area_size && mbitmap_base && mbitmap_size; ++} ++ ++/** ++ * \brief Init the bitmap, set the bitmap memory as the secure memory. ++ * ++ * \param _mbitmap_base The start address of the bitmap. ++ * \param _mbitmap_size The bitmap memory size. ++ */ ++int init_mbitmap(uintptr_t _mbitmap_base, uintptr_t _mbitmap_size) ++{ ++ page_meta* meta = (page_meta*)_mbitmap_base; ++ uintptr_t cur = 0; ++ while(cur < _mbitmap_size) ++ { ++ *meta = MAKE_PUBLIC_PAGE(NORMAL_PAGE); ++ meta += 1; ++ cur += sizeof(page_meta); ++ } ++ ++ return 0; ++} ++ ++/** ++ * \brief Check whether the pfn range contains the secure memory. ++ * ++ * \param pfn The start page frame. ++ * \param pagenum The page number in the pfn range. ++ */ ++int contain_private_range(uintptr_t pfn, uintptr_t pagenum) ++{ ++ if(!enable_enclave()) ++ return 0; ++ ++ if(pfn < ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT)){ ++ sbi_bug("M mode: contain_private_range: pfn is out of the DRAM range\r\n"); ++ return -1; ++ } ++ ++ pfn = pfn - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ page_meta* meta = (page_meta*)mbitmap_base + pfn; ++ if((uintptr_t)(meta + pagenum) > (mbitmap_base + mbitmap_size)){ ++ sbi_bug("M mode: contain_private_range: meta is out of the mbitmap range\r\n"); ++ return -1; ++ } ++ ++ uintptr_t cur = 0; ++ while(cur < pagenum) ++ { ++ if(IS_PRIVATE_PAGE(*meta)) ++ return 1; ++ meta += 1; ++ cur += 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \brief The function checks whether a range of physical memory is untrusted memory (for ++ * Host OS/apps to use) ++ * Return value: ++ * -1: some pages are not public (untrusted) ++ * 0: all pages are public (untrusted). ++ * ++ * \param pfn The start page frame. ++ * \param pagenum The page number in the pfn range. ++ */ ++int test_public_range(uintptr_t pfn, uintptr_t pagenum) ++{ ++ if(!enable_enclave()) ++ return 0; ++ ++ if(pfn < ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT)){ ++ sbi_bug("M mode: test_public_range: pfn is out of DRAM range\r\n"); ++ return -1; ++ } ++ ++ pfn = pfn - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ page_meta* meta = (page_meta*)mbitmap_base + pfn; ++ if((uintptr_t)(meta + pagenum) > (mbitmap_base + mbitmap_size)){ ++ sbi_bug("M mode: test_public_range: meta is out of range\r\n"); ++ return -1; ++ } ++ ++ uintptr_t cur = 0; ++ while(cur < pagenum) ++ { ++ if(!IS_PUBLIC_PAGE(*meta)){ ++ sbi_bug("M mode: test_public_range: IS_PUBLIC_PAGE is failed\r\n"); ++ return -1; ++ } ++ meta += 1; ++ cur += 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \brief This function will set a range of physical pages, [pfn, pfn+pagenum], ++ * to secure pages (or private pages). ++ * This function only updates the metadata of physical pages, but not unmap ++ * them in the host PT pages. ++ * Also, the function will not check whether a page is already secure. ++ * The caller of the function should be careful to perform the above two tasks. ++ * ++ * \param pfn The start page frame. ++ * \param pagenum The page number in the pfn range. ++ */ ++int set_private_range(uintptr_t pfn, uintptr_t pagenum) ++{ ++ if(!enable_enclave()) ++ return 0; ++ ++ if(pfn < ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT)) ++ return -1; ++ ++ pfn = pfn - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ page_meta* meta = (page_meta*)mbitmap_base + pfn; ++ if((uintptr_t)(meta + pagenum) > (mbitmap_base + mbitmap_size)) ++ return -1; ++ ++ uintptr_t cur = 0; ++ while(cur < pagenum) ++ { ++ *meta = MAKE_PRIVATE_PAGE(*meta); ++ meta += 1; ++ cur += 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \brief Similiar to set_private_pages, but set_public range turns a set of pages ++ * into public (or untrusted). ++ * ++ * \param pfn The start page frame. ++ * \param pagenum The page number in the pfn range. ++ */ ++int set_public_range(uintptr_t pfn, uintptr_t pagenum) ++{ ++ if(!enable_enclave()) ++ return 0; ++ ++ if(pfn < ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT)) ++ return -1; ++ ++ pfn = pfn - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ page_meta* meta = (page_meta*)mbitmap_base + pfn; ++ if((uintptr_t)(meta + pagenum) > (mbitmap_base + mbitmap_size)) ++ return -1; ++ ++ uintptr_t cur = 0; ++ while(cur < pagenum) ++ { ++ *meta = MAKE_PUBLIC_PAGE(*meta); ++ meta += 1; ++ cur += 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \brief Init the schrodinger page. Check whether can mark these pages as schrodinger page. ++ * ++ * \param paddr The start page frame. ++ * \param size The page number in the pfn range. ++ */ ++uintptr_t sm_schrodinger_init(uintptr_t paddr, uintptr_t size) ++{ ++ int ret = 0; ++ if(!enable_enclave()) ++ return 0; ++ ++ if(paddr & (RISCV_PGSIZE-1) || !(paddr >= (uintptr_t)DRAM_BASE ++ /*&& paddr + size <= (uintptr_t)DRAM_BASE + */)) ++ return -1; ++ ++ if(size < RISCV_PGSIZE || size & (RISCV_PGSIZE-1)) ++ return -1; ++ ++ spin_lock(&mbitmap_lock); ++ ++ uintptr_t pagenum = size >> RISCV_PGSHIFT; ++ uintptr_t pfn = PADDR_TO_PFN(paddr); ++ if(test_public_range(pfn, pagenum) != 0) ++ { ++ ret = -1; ++ goto out; ++ } ++ ++ //fast path ++ uintptr_t _pfn = pfn - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ page_meta* meta = (page_meta*)mbitmap_base + _pfn; ++ uintptr_t cur = 0; ++ while(cur < pagenum) ++ { ++ if(!IS_SCHRODINGER_PAGE(*meta)) ++ break; ++ meta += 1; ++ cur += 1; ++ _pfn += 1; ++ } ++ if(cur >= pagenum) ++ { ++ ret = 0; ++ goto out; ++ } ++ ++ //slow path ++ uintptr_t *pte = (uintptr_t*)pt_area_base; ++ uintptr_t pte_pos = 0; ++ uintptr_t *pte_end = (uintptr_t*)(pt_area_base + pt_area_size); ++ uintptr_t pfn_base = PADDR_TO_PFN((uintptr_t)DRAM_BASE) + _pfn; ++ uintptr_t pfn_end = PADDR_TO_PFN(paddr + size); ++ uintptr_t _pfn_base = pfn_base - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ uintptr_t _pfn_end = pfn_end - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ //check whether these page only has one mapping in the kernel ++ //pte @ pt entry address ++ //pfn @ the pfn in the current pte ++ //pte_pos @ the offset begin the pte and pt_area_base ++ while(pte < pte_end) ++ { ++ if(!IS_PGD(*pte) && PTE_VALID(*pte)) ++ { ++ pfn = PTE_TO_PFN(*pte); ++ //huge page entry ++ if( ((unsigned long)pte >= pt_area_base + (1<= pfn_base && (pfn + RISCV_PTENUM) <= pfn_end) ++ { ++ _pfn = pfn - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ //mark the page as schrodinger page, note: a huge page has 512 schrodinger pages ++ for(int i=0; i= pfn_end || (pfn+RISCV_PTENUM )<= pfn_base) ++ { ++ //There is no overlap between the pmd region and schrodinger region ++ } ++ else ++ { ++ sbi_bug(" M mode: ERROR: schrodinger_init: non-split page\r\n"); ++ return -1; ++ //map_pt_pte_page(pte); ++ } ++ } ++ else if( ((unsigned long)pte >= pt_area_base + (1<= pfn_base && pfn < pfn_end) ++ { ++ sbi_printf("M mode: schrodinger_init: pfn %lx in pte\r\n", pfn); ++ _pfn = pfn - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ meta = (page_meta*)mbitmap_base + _pfn; ++ //check whether this physical page is already be a schrodinger page, but pt psoition is not current position ++ if(IS_SCHRODINGER_PAGE(*meta) && SCHRODINGER_PTE_POS(*meta) != pte_pos) ++ { ++ sbi_bug("M mode: schrodinger_init: page0x%lx is multi mapped\r\n", pfn); ++ ret = -1; ++ goto failed; ++ } ++ *meta = MAKE_SCHRODINGER_PAGE(0, pte_pos); ++ } ++ } ++ } ++ pte_pos += 1; ++ pte += 1; ++ } ++ while(_pfn_base < _pfn_end) ++ { ++ meta = (page_meta*)mbitmap_base + _pfn_base; ++ if(!IS_SCHRODINGER_PAGE(*meta)) ++ *meta = MAKE_ZERO_MAP_PAGE(*meta); ++ _pfn_base += 1; ++ } ++out: ++ spin_unlock(&mbitmap_lock); ++ return ret; ++ ++failed: ++ _pfn_base = pfn_base - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ _pfn_end = pfn_end - ((uintptr_t)DRAM_BASE >> RISCV_PGSHIFT); ++ while(_pfn_base < _pfn_end) ++ { ++ meta = (page_meta*)mbitmap_base + _pfn_base; ++ *meta = MAKE_PUBLIC_PAGE(NORMAL_PAGE); ++ _pfn_base += 1; ++ } ++ ++ spin_unlock(&mbitmap_lock); ++ return ret; ++} ++ ++int sm_count = 0; ++/** ++ * \brief Auxiliary function for debug. ++ */ ++uintptr_t sm_print(uintptr_t paddr, uintptr_t size) ++{ ++ sm_count++; ++ return 0; ++ int zero_map_num = 0; ++ int single_map_num = 0; ++ int multi_map_num = 0; ++ uintptr_t pfn = PADDR_TO_PFN(paddr); ++ uintptr_t _pfn = pfn - PADDR_TO_PFN((uintptr_t)DRAM_BASE); ++ uintptr_t pagenum = size >> RISCV_PGSHIFT; ++ page_meta* meta = (page_meta*)mbitmap_base + _pfn; ++ uintptr_t i = 0; ++ while(i < pagenum) ++ { ++ if(IS_ZERO_MAP_PAGE(*meta)) ++ zero_map_num+=1; ++ else if(IS_SCHRODINGER_PAGE(*meta)) ++ single_map_num+=1; ++ else ++ multi_map_num+=1; ++ i += 1; ++ meta += 1; ++ } ++ sbi_printf("sm_print: paddr:0x%lx, zeromapnum:0x%x,singleapnum:0x%x,multimapnum:0x%x\r\n", ++ paddr, zero_map_num, single_map_num, multi_map_num); ++ return 0; ++} ++ ++/** ++ * \brief split the pte (a huge page, 2M) into new_pte_addr (4K PT page) ++ * ++ * \param pmd The given huge pmd entry. ++ * \param new_pte_addr The new pte page address. ++ */ ++uintptr_t sm_map_pte(uintptr_t* pmd, uintptr_t* new_pte_addr) ++{ ++ unsigned long pte_attribute = PAGE_ATTRIBUTION(*pmd); ++ unsigned long pfn = PTE_TO_PFN(*pmd); ++ *pmd = PA_TO_PTE((uintptr_t)new_pte_addr, PTE_V); ++ for(int i = 0; i */) ++ return -1; ++ uintptr_t _pfn = PADDR_TO_PFN(paddr) - PADDR_TO_PFN((uintptr_t)DRAM_BASE); ++ uintptr_t pfn_base = PADDR_TO_PFN((uintptr_t)DRAM_BASE) + _pfn; ++ uintptr_t pfn_end = PADDR_TO_PFN(paddr + size); ++ uintptr_t *pte = (uintptr_t*)pt_area_base; ++ uintptr_t *pte_end = (uintptr_t*)(pt_area_base + pt_area_size); ++ while(pte < pte_end) ++ { ++ if(!IS_PGD(*pte) && PTE_VALID(*pte)) ++ { ++ uintptr_t pfn = PTE_TO_PFN(*pte); ++ if( ((unsigned long)pte >= pt_area_base + (1<= pfn_end || (pfn+RISCV_PTENUM )<= pfn_base) ++ { ++ //There is no overlap between the pmd region and remap region ++ pte += 1; ++ continue; ++ } ++ else if(pfn_base<=pfn && pfn_end>=(pfn+RISCV_PTENUM)) ++ { ++ pte += 1; ++ continue; ++ } ++ else ++ { ++ split_pmd_local.pte_addr = (unsigned long)pte; ++ split_pmd_local.pte = *pte; ++ break; ++ } ++ } ++ } ++ pte += 1; ++ } ++ retval = copy_to_host((struct pt_entry_t*)split_pmd, ++ &split_pmd_local, ++ sizeof(struct pt_entry_t)); ++ return retval; ++} ++ ++/** ++ * \brief Unmap a memory region, [paddr, paddr + size], from host's all PTEs ++ * We can achieve a fast path unmapping if the unmapped pages are SCHRODINGER PAGEs. ++ * ++ * \param paddr The unmap memory address. ++ * \param size The unmap memory size. ++ */ ++int unmap_mm_region(unsigned long paddr, unsigned long size) ++{ ++ if(!enable_enclave()) ++ return 0; ++ ++ if(paddr < (uintptr_t)DRAM_BASE /*|| (paddr + size) > */){ ++ sbi_bug("M mode: unmap_mm_region: paddr is less than DRAM_BASE\r\n"); ++ return -1; ++ } ++ ++ //fast path ++ uintptr_t _pfn = PADDR_TO_PFN(paddr) - PADDR_TO_PFN((uintptr_t)DRAM_BASE); ++ page_meta* meta = (page_meta*)mbitmap_base + _pfn; ++ uintptr_t pagenum = size >> RISCV_PGSHIFT; ++ uintptr_t cur = 0; ++ while(cur < pagenum) ++ { ++ if(!IS_SCHRODINGER_PAGE(*meta)) ++ break; ++ if(!IS_ZERO_MAP_PAGE(*meta)) ++ { ++ //Get pte addr in the pt_area region ++ uintptr_t *pte = (uintptr_t*)pt_area_base + SCHRODINGER_PTE_POS(*meta); ++ *pte = INVALIDATE_PTE(*pte); ++ } ++ cur += 1; ++ _pfn += 1; ++ meta += 1; ++ } ++ if(cur >= pagenum) ++ return 0; ++ ++ //slow path ++ if(_pfn != (PADDR_TO_PFN(paddr) - PADDR_TO_PFN((uintptr_t)DRAM_BASE))) ++ { ++ sbi_printf("M mode: Error in unmap_mm_region, pfn is conflict, _pfn_old is %lx _pfn_new is %lx\r\n", ++ _pfn, (PADDR_TO_PFN(paddr) - PADDR_TO_PFN((uintptr_t)DRAM_BASE))); ++ } ++ uintptr_t pfn_base = PADDR_TO_PFN((uintptr_t)DRAM_BASE) + _pfn; ++ uintptr_t pfn_end = PADDR_TO_PFN(paddr + size); ++ uintptr_t *pte = (uintptr_t*)pt_area_base; ++ uintptr_t *pte_end = (uintptr_t*)(pt_area_base + pt_area_size); ++ ++ while(pte < pte_end) ++ { ++ if(!IS_PGD(*pte) && PTE_VALID(*pte)) ++ { ++ uintptr_t pfn = PTE_TO_PFN(*pte); ++ if( ((unsigned long)pte >= pt_area_base + (1<= pfn_end || (pfn+RISCV_PTENUM )<= pfn_base) ++ { ++ //There is no overlap between the pmd region and remap region ++ pte += 1; ++ continue; ++ } ++ else if(pfn_base<=pfn && pfn_end>=(pfn+RISCV_PTENUM)) ++ { ++ //This huge page is covered by remap region ++ *pte = INVALIDATE_PTE(*pte); ++ } ++ else ++ { ++ sbi_bug("M mode: ERROR: unmap_mm_region: non-split page\r\n"); ++ return -1; ++ } ++ } ++ else if( ((unsigned long)pte >= pt_area_base + (1<= pfn_base && pfn < pfn_end) ++ { ++ *pte = INVALIDATE_PTE(*pte); ++ } ++ } ++ } ++ pte += 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \brief Remap a set of pages to host PTEs. ++ * It's usually used when we try to free a set of secure pages. ++ * ++ * \param paddr The mmap memory address. ++ * \param size The mmap memory size. ++ */ ++int remap_mm_region(unsigned long paddr, unsigned long size) ++{ ++ if(!enable_enclave()) ++ return 0; ++ ++ if(paddr < (uintptr_t)DRAM_BASE /*|| (paddr + size) > */) ++ return -1; ++ ++ //Fast path ++ uintptr_t _pfn = PADDR_TO_PFN(paddr) - PADDR_TO_PFN((uintptr_t)DRAM_BASE); ++ page_meta* meta = (page_meta*)mbitmap_base + _pfn; ++ uintptr_t cur = 0; ++ uintptr_t pagenum = size >> RISCV_PGSHIFT; ++ while(cur < pagenum) ++ { ++ if(!IS_SCHRODINGER_PAGE(*meta)) ++ break; ++ if(!IS_ZERO_MAP_PAGE(*meta)) ++ { ++ uintptr_t *pte = (uintptr_t*)pt_area_base + SCHRODINGER_PTE_POS(*meta); ++ *pte = VALIDATE_PTE(*pte); ++ } ++ cur += 1; ++ _pfn += 1; ++ meta += 1; ++ } ++ if(cur >= pagenum) ++ return 0; ++ ++ //Slow path ++ uintptr_t pfn_base = PADDR_TO_PFN((uintptr_t)DRAM_BASE) + _pfn; ++ uintptr_t pfn_end = PADDR_TO_PFN(paddr + size); ++ uintptr_t *pte = (uintptr_t*)pt_area_base; ++ uintptr_t *pte_end = (uintptr_t*)(pt_area_base + pt_area_size); ++ while(pte < pte_end) ++ { ++ if(!IS_PGD(*pte)) ++ { ++ uintptr_t pfn = PTE_TO_PFN(*pte); ++ if( ((unsigned long)pte >= pt_area_base + (1<= pfn_end || (pfn+RISCV_PTENUM )<= pfn_base) ++ { ++ //There is no overlap between the pmd region and remap region ++ pte += 1; ++ continue; ++ } ++ else if(pfn_base<=pfn && pfn_end>=(pfn+RISCV_PTENUM)) ++ { ++ //This huge page is covered by remap region ++ *pte = VALIDATE_PTE(*pte); ++ } ++ else ++ { ++ sbi_bug("M mode: The partial of his huge page is belong to enclave and the rest is belong to untrusted OS\r\n"); ++ return -1; ++ } ++ } ++ else if( ((unsigned long)pte >= pt_area_base + (1<= pfn_base && pfn < pfn_end) ++ { ++ *pte = VALIDATE_PTE(*pte); ++ } ++ } ++ } ++ pte += 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \brief Set a single pte entry. It will be triggled by the untrusted OS when setting the new pte entry value. ++ * ++ * \param pte_dest The location of pt entry in pt area ++ * \param pte_src The content of pt entry ++ */ ++int set_single_pte(uintptr_t *pte_dest, uintptr_t pte_src) ++{ ++ if(!enable_enclave()) ++ { ++ *pte_dest = pte_src; ++ return 0; ++ } ++ ++ uintptr_t pfn = 0; ++ uintptr_t _pfn = 0; ++ page_meta* meta = NULL; ++ int huge_page = 0; ++ //Check whether it is a huge page mapping ++ if( ((unsigned long)pte_dest >= pt_area_base + (1< pte_addr)) ++ { ++ if (((pt_area_base + (1< pa) || ((pt_area_base + ((1< pte_addr)) ++ { ++ if((pte_src & PTE_V) && !(pte_src & PTE_R) && !(pte_src & PTE_W) && !(pte_src & PTE_X)) ++ { ++ if (((pt_area_base + ((1< pa) || ((pt_area_base + pt_area_size) < pa) ) ++ { ++ sbi_printf("pt_area_base %lx pt_area_pte_base %lx pt_area_pte_end %lx pte_addr %lx pa %lx\r\n", pt_area_base, (pt_area_base + ((1<>3; ++ for(i=0; i= 0) && (ret <= SBI_LEGAL_MAX)) ++ { ++ regs[10] = 0; ++ regs[11] = ret; ++ } ++ return ret; ++} ++ ++/** ++ * \brief This transitional function is for handling yield() triggered in the enclave. ++ * ++ * \param regs The enclave reg. ++ * \param mcause CSR mcause value. ++ * \param mepc CSR mepc value. ++ */ ++uintptr_t sm_handle_yield(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) ++{ ++ uintptr_t ret = 0; ++ ++ ret = do_yield(regs, mcause, mepc); ++ ++ return ret; ++} ++ ++/**************************************************************/ ++/* Interfaces for server enclave */ ++/**************************************************************/ ++/** ++ * \brief This transitional function creates the server enclave. ++ * ++ * \param regs The enclave reg. ++ * \param mcause CSR mcause value. ++ * \param mepc CSR mepc value. ++ */ ++uintptr_t sm_create_server_enclave(uintptr_t enclave_sbi_param) ++{ ++ enclave_create_param_t enclave_sbi_param_local; ++ uintptr_t retval = 0; ++ if(test_public_range(PADDR_TO_PFN(enclave_sbi_param),1)<0){ ++ return ENCLAVE_ERROR; ++ } ++ retval = copy_from_host(&enclave_sbi_param_local, ++ (enclave_create_param_t*)enclave_sbi_param, ++ sizeof(enclave_create_param_t)); ++ if(retval != 0) ++ return ENCLAVE_ERROR; ++ ++ retval = create_server_enclave(enclave_sbi_param_local); ++ ++ return retval; ++} ++ ++/** ++ * \brief This transitional function destroys the server enclave. ++ * ++ * \param enclave_sbi_param The arguments for creating the shadow enclave. ++ */ ++uintptr_t sm_destroy_server_enclave(uintptr_t *regs, uintptr_t enclave_id) ++{ ++ //TODO ++ uintptr_t ret = 0; ++ ++ ret = destroy_server_enclave(regs, enclave_id); ++ ++ return ret; ++} ++ ++/** ++ * \brief This transitional function acquires the server enclave handler. ++ * ++ * \param regs The enclave regs. ++ * \param server_name The acquired server enclave name. ++ */ ++uintptr_t sm_server_enclave_acquire(uintptr_t *regs, uintptr_t server_name) ++{ ++ uintptr_t ret = 0; ++ ++ ret = acquire_server_enclave(regs, (char*)server_name); ++ ++ return ret; ++} ++ ++/** ++ * \brief This transitional function gets the enclave id. ++ * ++ * \param regs The enclave regs. ++ */ ++uintptr_t sm_get_caller_id(uintptr_t *regs) ++{ ++ uintptr_t ret = 0; ++ ++ ret = get_caller_id(regs); ++ ++ return ret; ++} ++ ++/** ++ * \brief This transitional function call the server enclave. ++ * ++ * \param regs The enclave regs. ++ * \param eid The callee enclave id. ++ * \param arg The calling arguments. ++ */ ++uintptr_t sm_call_enclave(uintptr_t* regs, uintptr_t eid, uintptr_t arg) ++{ ++ uintptr_t retval = 0; ++ ++ retval = call_enclave(regs, (unsigned int)eid, arg); ++ ++ return retval; ++} ++ ++/** ++ * \brief This transitional function is for server enclave return . ++ * ++ * \param regs The enclave regs. ++ * \param arg The return arguments. ++ */ ++uintptr_t sm_enclave_return(uintptr_t* regs, uintptr_t arg) ++{ ++ uintptr_t ret = 0; ++ ++ ret = enclave_return(regs, arg); ++ ++ return ret; ++} ++ ++/** ++ * \brief This transitional function is for the asynchronous call to the server enclave. ++ * ++ * \param regs The enclave regs. ++ * \param enclave_name The callee enclave name. ++ * \param arg The calling arguments. ++ */ ++uintptr_t sm_asyn_enclave_call(uintptr_t *regs, uintptr_t enclave_name, uintptr_t arg) ++{ ++ uintptr_t ret = 0; ++ ++ ret = asyn_enclave_call(regs, enclave_name, arg); ++ return ret; ++} ++ ++/** ++ * \brief This transitional function splits the enclave memory into two pieces. ++ * ++ * \param regs The enclave regs. ++ * \param mem_addr The splitted memory address. ++ * \param mem_size The splitted memory size. ++ * \param split_addr The split point in the memory range. ++ */ ++uintptr_t sm_split_mem_region(uintptr_t *regs, uintptr_t mem_addr, uintptr_t mem_size, uintptr_t split_addr) ++{ ++ uintptr_t ret = 0; ++ ++ ret = split_mem_region(regs, mem_addr, mem_size, split_addr); ++ ++ return ret; ++} +\ No newline at end of file +diff --git a/lib/sbi/sm/thread.c b/lib/sbi/sm/thread.c +new file mode 100644 +index 0000000..4c68478 +--- /dev/null ++++ b/lib/sbi/sm/thread.c +@@ -0,0 +1,101 @@ ++#include "sm/thread.h" ++#include "sbi/riscv_encoding.h" ++#include "sbi/riscv_asm.h" ++/** ++ * \brief swap general registers in thread->prev_state and regs ++ * ++ * \param thread is the thread abstraction in enclaves ++ * \param regs usually is the location to save regs of host/enclaves (before trap) ++ */ ++void swap_prev_state(struct thread_state_t* thread, uintptr_t* regs) ++{ ++ int i; ++ ++ uintptr_t* prev = (uintptr_t*) &thread->prev_state; ++ for(i = 1; i < N_GENERAL_REGISTERS; ++i) ++ { ++ //swap general register ++ uintptr_t tmp = prev[i]; ++ prev[i] = regs[i]; ++ regs[i] = tmp; ++ } ++ ++ return; ++} ++ ++/** ++ * \brief it switch the mepc with an enclave, and updates the mepc csr ++ * ++ * \param thread is the thread abstraction in enclaves ++ * \param current_mepc is the current mepc value ++ */ ++void swap_prev_mepc(struct thread_state_t* thread, uintptr_t current_mepc) ++{ ++ uintptr_t tmp = thread->prev_mepc; ++ thread->prev_mepc = current_mepc; ++ csr_write(CSR_MEPC, tmp); ++} ++ ++/** ++ * \brief it switch the stvec with an enclave, and updates the stvec csr ++ * ++ * \param thread is the thread abstraction in enclaves ++ * \param current_stvec is the current stvec value ++ */ ++void swap_prev_stvec(struct thread_state_t* thread, uintptr_t current_stvec) ++{ ++ uintptr_t tmp = thread->prev_stvec; ++ thread->prev_stvec = current_stvec; ++ csr_write(CSR_STVEC, tmp); ++} ++ ++/** ++ * \brief it switches the enclave cache binding status ++ * ++ * \param thread is the thread abstraction in enclaves ++ * \param current_cache_binding is the current cache binding status ++ */ ++void swap_prev_cache_binding(struct thread_state_t* thread, uintptr_t current_cache_binding) ++{ ++ thread->prev_cache_binding = current_cache_binding; ++ //TODO ++} ++ ++/** ++ * \brief it switches the enclave mie status ++ * ++ * \param thread is the thread abstraction in enclaves ++ * \param current_cache_binding is the current mie status ++ */ ++void swap_prev_mie(struct thread_state_t* thread, uintptr_t current_mie) ++{ ++ uintptr_t tmp = thread->prev_mie; ++ thread->prev_mie = current_mie; ++ csr_write(CSR_MIE, tmp); ++} ++ ++/** ++ * \brief it switches the enclave mideleg status ++ * ++ * \param thread is the thread abstraction in enclaves ++ * \param current_cache_binding is the current mideleg status ++ */ ++void swap_prev_mideleg(struct thread_state_t* thread, uintptr_t current_mideleg) ++{ ++ uintptr_t tmp = thread->prev_mideleg; ++ thread->prev_mideleg = current_mideleg; ++ csr_write(CSR_MIDELEG, tmp); ++} ++ ++/** ++ * \brief it switches the enclave medeleg status ++ * ++ * \param thread is the thread abstraction in enclaves ++ * \param current_cache_binding is the current medeleg status ++ */ ++void swap_prev_medeleg(struct thread_state_t* thread, uintptr_t current_medeleg) ++{ ++ uintptr_t tmp = thread->prev_medeleg; ++ thread->prev_medeleg = current_medeleg; ++ csr_write(CSR_MEDELEG, tmp); ++} +-- +2.17.1 + diff --git a/include/sbi/sbi_types.h b/include/sbi/sbi_types.h index 0952d5c..b004419 100644 --- a/include/sbi/sbi_types.h +++ b/include/sbi/sbi_types.h @@ -47,6 +47,7 @@ typedef unsigned long long uint64_t; typedef int bool; typedef unsigned long ulong; typedef unsigned long uintptr_t; +typedef long intptr_t; typedef unsigned long size_t; typedef long ssize_t; typedef unsigned long virtual_addr_t; diff --git a/include/sm/enclave.h b/include/sm/enclave.h index 4e80514..39275e5 100644 --- a/include/sm/enclave.h +++ b/include/sm/enclave.h @@ -7,10 +7,9 @@ #include "sbi/riscv_locks.h" #include "sbi/sbi_string.h" #include "sbi/riscv_asm.h" +#include "sbi/sbi_types.h" #include "sm/thread.h" #include "sm/vm.h" -#include -#include diff --git a/include/sm/enclave_mm.h b/include/sm/enclave_mm.h index f80efad..5d7a1d3 100644 --- a/include/sm/enclave_mm.h +++ b/include/sm/enclave_mm.h @@ -1,7 +1,7 @@ #ifndef _ENCLAVE_MM_H #define _ENCLAVE_MM_H -#include +#include "sbi/sbi_types.h" #include "sm/enclave.h" struct mm_region_list_t diff --git a/include/sm/ipi.h b/include/sm/ipi.h index 0cccc02..e8316cd 100644 --- a/include/sm/ipi.h +++ b/include/sm/ipi.h @@ -3,7 +3,7 @@ #include "sbi/riscv_atomic.h" #include "sbi/riscv_locks.h" -#include "stdint.h" +#include "sbi/sbi_types.h" #define IPI_PMP_SYNC 0x1 #define IPI_STOP_ENCLAVE 0x2 diff --git a/include/sm/pmp.h b/include/sm/pmp.h index a8e71ed..ca963c7 100644 --- a/include/sm/pmp.h +++ b/include/sm/pmp.h @@ -1,7 +1,7 @@ #ifndef _PMP_H #define _PMP_H -#include +#include "sbi/sbi_types.h" #include "sbi/riscv_encoding.h" #include "sbi/sbi_hartmask.h" diff --git a/include/sm/sm.h b/include/sm/sm.h index 8195712..2e7a638 100644 --- a/include/sm/sm.h +++ b/include/sm/sm.h @@ -1,7 +1,7 @@ #ifndef _SM_H #define _SM_H -#include +#include "sbi/sbi_types.h" #include "sm/enclave_args.h" #include "sm/ipi.h" diff --git a/include/sm/thread.h b/include/sm/thread.h index c23077e..9a42d56 100644 --- a/include/sm/thread.h +++ b/include/sm/thread.h @@ -1,7 +1,7 @@ #ifndef __THREAD_H__ #define __THREAD_H__ -#include +#include "sbi/sbi_types.h" /// \brief define the number of general registers #define N_GENERAL_REGISTERS 32 diff --git a/include/sm/vm.h b/include/sm/vm.h index d7876c9..bc87a20 100644 --- a/include/sm/vm.h +++ b/include/sm/vm.h @@ -3,7 +3,7 @@ #include "sbi/riscv_encoding.h" #include "sbi/sbi_bitops.h" -#include +#include "sbi/sbi_types.h" #define MEGAPAGE_SIZE ((uintptr_t)(RISCV_PGSIZE << RISCV_PGLEVEL_BITS)) #if __riscv_xlen == 64 diff --git a/lib/sbi/sm/pmp.c b/lib/sbi/sm/pmp.c index 35ed675..66cc02c 100644 --- a/lib/sbi/sm/pmp.c +++ b/lib/sbi/sm/pmp.c @@ -3,7 +3,6 @@ #include "sbi/riscv_asm.h" #include "sbi/sbi_pmp.h" #include "sbi/sbi_console.h" -#include /** * \brief Set pmp and sync all harts.