From 18ad0cce24addd45271edf3172ab9ce873186d7a Mon Sep 17 00:00:00 2001 From: Imre Kis Date: Tue, 18 Apr 2023 16:41:51 +0200 Subject: [PATCH] core: spmc: handle non-secure interrupts Add FFA_INTERRUPT and FFA_RUN support for signaling non-secure interrupts and for resuming to the secure world. If a secure partition is preempted by a non-secure interrupt OP-TEE saves the SP's state and sends an FFA_INTERRUPT to the normal world. After handling the interrupt the normal world should send an FFA_RUN to OP-TEE so it can continue running the SP. If OP-TEE is the active FF-A endpoint (i.e. it is running TAs) the non-secure interrupts are signaled by the existing OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT message instead of FFA_INTERRUPT. Upstream-Status: Submitted [https://github.com/OP-TEE/optee_os/pull/6002] Signed-off-by: Imre Kis Change-Id: I577ebe86d416ee494963216a66a3bfc8206921b4 --- core/arch/arm/include/ffa.h | 2 +- .../arch/arm/include/kernel/spmc_sp_handler.h | 11 +++++++ core/arch/arm/kernel/secure_partition.c | 17 ++++++++++ core/arch/arm/kernel/spmc_sp_handler.c | 26 ++++++++++++++++ core/arch/arm/kernel/thread.c | 7 +++++ core/arch/arm/kernel/thread_spmc.c | 31 ++++++++++++++++++- core/arch/arm/kernel/thread_spmc_a64.S | 30 ++++++++++++++++++ 7 files changed, 122 insertions(+), 2 deletions(-) diff --git a/core/arch/arm/include/ffa.h b/core/arch/arm/include/ffa.h index 5a19fb0c7ff3..b3d1d354735d 100644 --- a/core/arch/arm/include/ffa.h +++ b/core/arch/arm/include/ffa.h @@ -50,7 +50,7 @@ #define FFA_ID_GET U(0x84000069) #define FFA_MSG_WAIT U(0x8400006B) #define FFA_MSG_YIELD U(0x8400006C) -#define FFA_MSG_RUN U(0x8400006D) +#define FFA_RUN U(0x8400006D) #define FFA_MSG_SEND U(0x8400006E) #define FFA_MSG_SEND_DIRECT_REQ_32 U(0x8400006F) #define FFA_MSG_SEND_DIRECT_REQ_64 U(0xC400006F) diff --git a/core/arch/arm/include/kernel/spmc_sp_handler.h b/core/arch/arm/include/kernel/spmc_sp_handler.h index f5bda7bfe7d0..30c1e4691273 100644 --- a/core/arch/arm/include/kernel/spmc_sp_handler.h +++ b/core/arch/arm/include/kernel/spmc_sp_handler.h @@ -25,6 +25,8 @@ void spmc_sp_start_thread(struct thread_smc_args *args); int spmc_sp_add_share(struct ffa_rxtx *rxtx, size_t blen, uint64_t *global_handle, struct sp_session *owner_sp); +void spmc_sp_set_to_preempted(struct ts_session *ts_sess); +int spmc_sp_resume_from_preempted(uint16_t endpoint_id); #else static inline void spmc_sp_start_thread(struct thread_smc_args *args __unused) { @@ -37,6 +39,15 @@ static inline int spmc_sp_add_share(struct ffa_rxtx *rxtx __unused, { return FFA_NOT_SUPPORTED; } + +static inline void spmc_sp_set_to_preempted(struct ts_session *ts_sess __unused) +{ +} + +static inline int spmc_sp_resume_from_preempted(uint16_t endpoint_id __unused) +{ + return FFA_NOT_SUPPORTED; +} #endif #endif /* __KERNEL_SPMC_SP_HANDLER_H */ diff --git a/core/arch/arm/kernel/secure_partition.c b/core/arch/arm/kernel/secure_partition.c index d386f1e4d211..740be6d22e47 100644 --- a/core/arch/arm/kernel/secure_partition.c +++ b/core/arch/arm/kernel/secure_partition.c @@ -999,6 +999,8 @@ static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, struct sp_session *sp_s = to_sp_session(s); struct ts_session *sess = NULL; struct thread_ctx_regs *sp_regs = NULL; + uint32_t thread_id = THREAD_ID_INVALID; + uint32_t rpc_target_info = 0; uint32_t panicked = false; uint32_t panic_code = 0; @@ -1011,8 +1013,23 @@ static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, sp_regs->cpsr = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); + + /* + * Store endpoint ID and thread ID in rpc_target_info. This will be used + * as w1 in FFA_INTERRUPT in case of a NWd interrupt. + */ + rpc_target_info = thread_get_tsd()->rpc_target_info; + thread_id = thread_get_id(); + assert((thread_id & ~0xffff) == 0); + thread_get_tsd()->rpc_target_info = (sp_s->endpoint_id << 16) | + (thread_id & 0xffff); + __thread_enter_user_mode(sp_regs, &panicked, &panic_code); + sp_regs->cpsr = cpsr; + /* Restore rpc_target_info */ + thread_get_tsd()->rpc_target_info = rpc_target_info; + thread_unmask_exceptions(exceptions); thread_user_clear_vfp(&ctx->uctx); diff --git a/core/arch/arm/kernel/spmc_sp_handler.c b/core/arch/arm/kernel/spmc_sp_handler.c index 46a15646ecf0..12681151a796 100644 --- a/core/arch/arm/kernel/spmc_sp_handler.c +++ b/core/arch/arm/kernel/spmc_sp_handler.c @@ -366,6 +366,32 @@ cleanup: return res; } +void spmc_sp_set_to_preempted(struct ts_session *ts_sess) +{ + if (ts_sess && is_sp_ctx(ts_sess->ctx)) { + struct sp_session *sp_sess = to_sp_session(ts_sess); + + assert(sp_sess->state == sp_busy); + + sp_sess->state = sp_preempted; + } +} + +int spmc_sp_resume_from_preempted(uint16_t endpoint_id) +{ + struct sp_session *sp_sess = sp_get_session(endpoint_id); + + if (!sp_sess) + return FFA_INVALID_PARAMETERS; + + if (sp_sess->state != sp_preempted) + return FFA_DENIED; + + sp_sess->state = sp_busy; + + return FFA_OK; +} + static bool check_rxtx(struct ffa_rxtx *rxtx) { return rxtx && rxtx->rx && rxtx->tx && rxtx->size > 0; diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c index 1e7f9f96b558..8cd4dc961b02 100644 --- a/core/arch/arm/kernel/thread.c +++ b/core/arch/arm/kernel/thread.c @@ -531,6 +531,13 @@ int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) core_mmu_set_user_map(NULL); } + if (IS_ENABLED(CFG_SECURE_PARTITION)) { + struct ts_session *ts_sess = + TAILQ_FIRST(&threads[ct].tsd.sess_stack); + + spmc_sp_set_to_preempted(ts_sess); + } + l->curr_thread = THREAD_ID_INVALID; if (IS_ENABLED(CFG_VIRTUALIZATION)) diff --git a/core/arch/arm/kernel/thread_spmc.c b/core/arch/arm/kernel/thread_spmc.c index 3b4ac0b4e35c..bc4e7687d618 100644 --- a/core/arch/arm/kernel/thread_spmc.c +++ b/core/arch/arm/kernel/thread_spmc.c @@ -45,7 +45,7 @@ struct mem_frag_state { #endif /* Initialized in spmc_init() below */ -static uint16_t my_endpoint_id; +uint16_t my_endpoint_id; /* * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. @@ -437,6 +437,32 @@ out: FFA_PARAM_MBZ, FFA_PARAM_MBZ); cpu_spin_unlock(&rxtx->spinlock); } + +static void spmc_handle_run(struct thread_smc_args *args) +{ + uint16_t endpoint = (args->a1 >> 16) & 0xffff; + uint16_t thread_id = (args->a1 & 0xffff); + uint32_t rc = 0; + + if (endpoint != my_endpoint_id) { + /* + * The endpoint should be an SP, try to resume the SP from + * preempted into busy state. + */ + rc = spmc_sp_resume_from_preempted(endpoint); + if (rc) + goto out; + } + + thread_resume_from_rpc(thread_id, 0, 0, 0, 0); + + /* thread_resume_from_rpc return only of the thread_id is invalid */ + rc = FFA_INVALID_PARAMETERS; + +out: + spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, + FFA_PARAM_MBZ, FFA_PARAM_MBZ); +} #endif /*CFG_CORE_SEL1_SPMC*/ static void handle_yielding_call(struct thread_smc_args *args) @@ -970,6 +996,9 @@ void thread_spmc_msg_recv(struct thread_smc_args *args) case FFA_PARTITION_INFO_GET: spmc_handle_partition_info_get(args, &nw_rxtx); break; + case FFA_RUN: + spmc_handle_run(args); + break; #endif /*CFG_CORE_SEL1_SPMC*/ case FFA_INTERRUPT: itr_core_handler(); diff --git a/core/arch/arm/kernel/thread_spmc_a64.S b/core/arch/arm/kernel/thread_spmc_a64.S index 21cb62513a42..7297005a6038 100644 --- a/core/arch/arm/kernel/thread_spmc_a64.S +++ b/core/arch/arm/kernel/thread_spmc_a64.S @@ -14,6 +14,20 @@ #include #include +#if CFG_SECURE_PARTITION +LOCAL_FUNC thread_ffa_interrupt , : + mov_imm x0, FFA_INTERRUPT /* FID */ + /* X1: Endpoint/vCPU IDs is set by caller */ + mov x2, #FFA_PARAM_MBZ /* Param MBZ */ + mov x3, #FFA_PARAM_MBZ /* Param MBZ */ + mov x4, #FFA_PARAM_MBZ /* Param MBZ */ + mov x5, #FFA_PARAM_MBZ /* Param MBZ */ + mov x6, #FFA_PARAM_MBZ /* Param MBZ */ + mov x7, #FFA_PARAM_MBZ /* Param MBZ */ + b .ffa_msg_loop +END_FUNC thread_ffa_msg_wait +#endif /* CFG_SECURE_PARTITION */ + FUNC thread_ffa_msg_wait , : mov_imm x0, FFA_MSG_WAIT /* FID */ mov x1, #FFA_TARGET_INFO_MBZ /* Target info MBZ */ @@ -171,6 +185,14 @@ END_FUNC thread_rpc * The current thread as indicated by @thread_index has just been * suspended. The job here is just to inform normal world the thread id to * resume when returning. + * If the active FF-A endpoint is OP-TEE (or a TA) then an this function send an + * OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT message to the normal world via the + * FFA_MSG_SEND_DIRECT_RESP interface. This is handled by the OP-TEE + * driver in Linux so it can schedule task to the thread. + * If the active endpoint is an SP the function sends an FFA_INTERRUPT. This is + * handled by the FF-A driver and after taking care of the NWd interrupts it + * returns via an FFA_RUN call. + * The active endpoint is determined by the upper 16 bits of rpc_target_info. */ FUNC thread_foreign_intr_exit , : /* load threads[w0].tsd.rpc_target_info into w1 */ @@ -178,6 +200,14 @@ FUNC thread_foreign_intr_exit , : adr_l x2, threads madd x1, x1, x0, x2 ldr w1, [x1, #THREAD_CTX_TSD_RPC_TARGET_INFO] +#if CFG_SECURE_PARTITION + adr_l x2, my_endpoint_id + ldrh w2, [x2] + lsr w3, w1, #16 + cmp w2, w3 + /* (threads[w0].tsd.rpc_target_info >> 16) != my_endpoint_id */ + bne thread_ffa_interrupt +#endif /* CFG_SECURE_PARTITION */ mov x2, #FFA_PARAM_MBZ mov w3, #FFA_PARAM_MBZ mov w4, #OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT