diff --git a/arch/arm/include/arm/irq.h b/arch/arm/include/arm/irq.h index a3321d7c62c05..246410e47d4f6 100644 --- a/arch/arm/include/arm/irq.h +++ b/arch/arm/include/arm/irq.h @@ -127,12 +127,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/arm/include/armv6-m/irq.h b/arch/arm/include/armv6-m/irq.h index 752e3ed394008..6d142accfa988 100644 --- a/arch/arm/include/armv6-m/irq.h +++ b/arch/arm/include/armv6-m/irq.h @@ -152,12 +152,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/arm/include/armv7-a/irq.h b/arch/arm/include/armv7-a/irq.h index 6ca190b07155c..803d5807d3eab 100644 --- a/arch/arm/include/armv7-a/irq.h +++ b/arch/arm/include/armv7-a/irq.h @@ -253,12 +253,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/arm/include/armv7-m/irq.h b/arch/arm/include/armv7-m/irq.h index 88db5d3ba36d0..c387899f6053f 100644 --- a/arch/arm/include/armv7-m/irq.h +++ b/arch/arm/include/armv7-m/irq.h @@ -212,12 +212,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/arm/include/armv7-r/irq.h b/arch/arm/include/armv7-r/irq.h index 6a00fbdbc321b..402606a546a19 100644 --- a/arch/arm/include/armv7-r/irq.h +++ b/arch/arm/include/armv7-r/irq.h @@ -253,12 +253,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/arm/include/armv8-m/irq.h b/arch/arm/include/armv8-m/irq.h index f14d84f6553e8..fb23d569b7db8 100644 --- a/arch/arm/include/armv8-m/irq.h +++ b/arch/arm/include/armv8-m/irq.h @@ -223,12 +223,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/arm/include/armv8-r/irq.h b/arch/arm/include/armv8-r/irq.h index 360b4a74585f8..c3be8199f3bfd 100644 --- a/arch/arm/include/armv8-r/irq.h +++ b/arch/arm/include/armv8-r/irq.h @@ -253,12 +253,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/arm/include/cxd56xx/irq.h b/arch/arm/include/cxd56xx/irq.h index 325d2f0e6ad9a..4269825b5d695 100644 --- a/arch/arm/include/cxd56xx/irq.h +++ b/arch/arm/include/cxd56xx/irq.h @@ -158,7 +158,7 @@ #define CXD56_IRQ_SPH13 (CXD56_IRQ_EXTINT+93) /* SPH13 IRQ number */ #define CXD56_IRQ_SPH14 (CXD56_IRQ_EXTINT+94) /* SPH14 IRQ number */ #define CXD56_IRQ_SPH15 (CXD56_IRQ_EXTINT+95) /* SPH15 IRQ number */ -#define CXD56_IRQ_SW_INT (CXD56_IRQ_EXTINT+96) /* SW_INT IRQ number */ +#define CXD56_IRQ_SMP_CALL (CXD56_IRQ_EXTINT+96) /* SMP_CALL IRQ number */ #define CXD56_IRQ_TIMER0 (CXD56_IRQ_EXTINT+97) /* TIMER0 IRQ number */ #define CXD56_IRQ_TIMER1 (CXD56_IRQ_EXTINT+98) /* TIMER1 IRQ number */ #define CXD56_IRQ_TIMER2 (CXD56_IRQ_EXTINT+99) /* TIMER2 IRQ number */ diff --git a/arch/arm/include/lc823450/irq.h b/arch/arm/include/lc823450/irq.h index 72d8115aa6d50..56c4f96d1a60b 100644 --- a/arch/arm/include/lc823450/irq.h +++ b/arch/arm/include/lc823450/irq.h @@ -59,11 +59,11 @@ #define LC823450_IRQ_INTERRUPTS (16) /* Vector number of the first external interrupt */ #define LC823450_IRQ_CTXM3_00 (LC823450_IRQ_INTERRUPTS+0) /* 16: CortexM3_00 interrupt */ -#define LC823450_IRQ_CTXM3_01 (LC823450_IRQ_INTERRUPTS+1) /* 17: CortexM3_01 interrupt */ +#define LC823450_IRQ_SMP_CALL_01 (LC823450_IRQ_INTERRUPTS+1) /* 17: CortexM3_01 interrupt */ #define LC823450_IRQ_CTXM3_02 (LC823450_IRQ_INTERRUPTS+2) /* 18: CortexM3_02 interrupt */ #define LC823450_IRQ_CTXM3_03 (LC823450_IRQ_INTERRUPTS+3) /* 19: CortexM3_03 interrupt */ #define LC823450_IRQ_CTXM3_10 (LC823450_IRQ_INTERRUPTS+4) /* 20: CortexM3_00 interrupt */ -#define LC823450_IRQ_CTXM3_11 (LC823450_IRQ_INTERRUPTS+5) /* 21: CortexM3_01 interrupt */ +#define LC823450_IRQ_SMP_CALL_11 (LC823450_IRQ_INTERRUPTS+5) /* 21: CortexM3_01 interrupt */ #define LC823450_IRQ_CTXM3_12 (LC823450_IRQ_INTERRUPTS+6) /* 22: CortexM3_02 interrupt */ #define LC823450_IRQ_CTXM3_13 (LC823450_IRQ_INTERRUPTS+7) /* 23: CortexM3_03 interrupt */ #define LC823450_IRQ_LPDSP0 (LC823450_IRQ_INTERRUPTS+8) /* 24: LPDSP0 interrupt */ diff --git a/arch/arm/include/rp2040/irq.h b/arch/arm/include/rp2040/irq.h index a8b776f8c6b2e..0b79f6f0789cd 100644 --- a/arch/arm/include/rp2040/irq.h +++ b/arch/arm/include/rp2040/irq.h @@ -75,8 +75,8 @@ #define RP2040_DMA_IRQ_1 (RP2040_IRQ_EXTINT+12) #define RP2040_IO_IRQ_BANK0 (RP2040_IRQ_EXTINT+13) #define RP2040_IO_IRQ_QSPI (RP2040_IRQ_EXTINT+14) -#define RP2040_SIO_IRQ_PROC0 (RP2040_IRQ_EXTINT+15) -#define RP2040_SIO_IRQ_PROC1 (RP2040_IRQ_EXTINT+16) +#define RP2040_SMP_CALL_PROC0 (RP2040_IRQ_EXTINT+15) +#define RP2040_SMP_CALL_PROC1 (RP2040_IRQ_EXTINT+16) #define RP2040_CLOCKS_IRQ (RP2040_IRQ_EXTINT+17) #define RP2040_SPI0_IRQ (RP2040_IRQ_EXTINT+18) #define RP2040_SPI1_IRQ (RP2040_IRQ_EXTINT+19) diff --git a/arch/arm/include/sam34/sam4cm_irq.h b/arch/arm/include/sam34/sam4cm_irq.h index 959ef7d8b78d7..815fc5e5feb24 100644 --- a/arch/arm/include/sam34/sam4cm_irq.h +++ b/arch/arm/include/sam34/sam4cm_irq.h @@ -114,7 +114,7 @@ #define SAM_IRQ_TC5 (SAM_IRQ_EXTINT+SAM_PID_TC5) /* PID 28: Timer Counter 5 */ #define SAM_IRQ_ADC (SAM_IRQ_EXTINT+SAM_PID_ADC) /* PID 29: Analog To Digital Converter */ #define SAM_IRQ_ARM (SAM_IRQ_EXTINT+SAM_PID_ARM) /* PID 30: FPU signals (only on CM4P1 core): FPIXC, FPOFC, FPUFC, FPIOC, FPDZC, FPIDC, FPIXC */ -#define SAM_IRQ_IPC0 (SAM_IRQ_EXTINT+SAM_PID_IPC0) /* PID 31: Interprocessor communication 0 */ +#define SAM_IRQ_SMP_CALL0 (SAM_IRQ_EXTINT+SAM_PID_IPC0) /* PID 31: Interprocessor communication 0 */ #define SAM_IRQ_SLCDC (SAM_IRQ_EXTINT+SAM_PID_SLCDC) /* PID 32: Segment LCD Controller */ #define SAM_IRQ_TRNG (SAM_IRQ_EXTINT+SAM_PID_TRNG) /* PID 33: True Random Generator */ #define SAM_IRQ_ICM (SAM_IRQ_EXTINT+SAM_PID_ICM) /* PID 34: Integrity Check Module */ @@ -122,7 +122,7 @@ #define SAM_IRQ_AES (SAM_IRQ_EXTINT+SAM_PID_AES) /* PID 36: Advanced Enhanced Standard */ #define SAM_IRQ_PIOC (SAM_IRQ_EXTINT+SAM_PID_PIOC) /* PID 37: Parallel I/O Controller C */ #define SAM_IRQ_UART1 (SAM_IRQ_EXTINT+SAM_PID_UART1) /* PID 38: Universal Asynchronous Receiver Transmitter 1 */ -#define SAM_IRQ_IPC1 (SAM_IRQ_EXTINT+SAM_PID_IPC1) /* PID 39: Interprocessor communication 1 */ +#define SAM_IRQ_SMP_CALL1 (SAM_IRQ_EXTINT+SAM_PID_IPC1) /* PID 39: Interprocessor communication 1 */ #define SAM_IRQ_RESERVED_40 (SAM_IRQ_EXTINT+SAM_PID_RESERVED_40) /* PID 40: Reserved */ #define SAM_IRQ_PWM (SAM_IRQ_EXTINT+SAM_PID_PWM) /* PID 41: Pulse Width Modulation */ #define SAM_IRQ_SRAM (SAM_IRQ_EXTINT+SAM_PID_SRAM) /* PID 42: SRAM1 (I/D Code bus of CM4P1), SRAM2 (Systembus of CM4P1) */ diff --git a/arch/arm/include/tlsr82/irq.h b/arch/arm/include/tlsr82/irq.h index 72cb08c9bfcf9..c5c4161d51df8 100644 --- a/arch/arm/include/tlsr82/irq.h +++ b/arch/arm/include/tlsr82/irq.h @@ -158,12 +158,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved register array pointer used during * signal processing. */ diff --git a/arch/arm/src/arm/arm_schedulesigaction.c b/arch/arm/src/arm/arm_schedulesigaction.c index a0a4017bf4235..1eca541afadb6 100644 --- a/arch/arm/src/arm/arm_schedulesigaction.c +++ b/arch/arm/src/arm/arm_schedulesigaction.c @@ -75,70 +75,61 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; + /* In this case just deliver the signal now. */ - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. - */ - - sinfo("rtcb=%p current_regs=%p\n", this_task(), - this_task()->xcp.regs); - - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + else + { + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. */ - else - { - /* Save the return lr and cpsr and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. - */ - - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *) - ((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *) + ((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled - */ + /* Then set up to vector to the trampoline with interrupts + * disabled + */ - tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; - tcb->xcp.regs[REG_CPSR] = PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT; + tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; + tcb->xcp.regs[REG_CPSR] = PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT; #ifdef CONFIG_ARM_THUMB - tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT; + tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT; #endif - } } } diff --git a/arch/arm/src/arm/arm_sigdeliver.c b/arch/arm/src/arm/arm_sigdeliver.c index 21a0b98baac5d..22589d1e6e77e 100644 --- a/arch/arm/src/arm/arm_sigdeliver.c +++ b/arch/arm/src/arm/arm_sigdeliver.c @@ -59,8 +59,8 @@ void arm_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); #ifndef CONFIG_SUPPRESS_INTERRUPTS /* Then make sure that interrupts are enabled. Signal handlers must always @@ -72,7 +72,7 @@ void arm_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -92,7 +92,7 @@ void arm_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/arm/src/armv6-m/arm_schedulesigaction.c b/arch/arm/src/armv6-m/arm_schedulesigaction.c index 4e5da4c5efd95..9eeda2acb31cc 100644 --- a/arch/arm/src/armv6-m/arm_schedulesigaction.c +++ b/arch/arm/src/armv6-m/arm_schedulesigaction.c @@ -78,103 +78,60 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); - DEBUGASSERT(tcb != NULL && sigdeliver != NULL); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* In this case just deliver the signal now. + * REVISIT: Signal handle will run in a critical section! */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), - this_task()->xcp.regs); - - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handle will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else - { - /* CASE 2: The task that needs to receive the signal is running. - * This could happen if the task is running on another CPU OR if - * we are in an interrupt handler and the task is running on this - * CPU. In the former case, we will have to PAUSE the other CPU - * first. But in either case, we will have to modify the return - * state as well as the state in the TCB. - */ - - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ - -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the return PC, CPSR and either the BASEPRI or PRIMASK - * registers (and perhaps also the LR). These will be restored - * by the signal trampoline after the signal has been delivered. - */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the return PC, CPSR and either the BASEPRI or PRIMASK + * registers (and perhaps also the LR). These will be restored + * by the signal trampoline after the signal has been delivered. + */ - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *) - ((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *) + ((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled. We must already be in privileged thread mode to be - * here. - */ + /* Then set up to vector to the trampoline with interrupts + * disabled. We must already be in privileged thread mode to be + * here. + */ - tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; - tcb->xcp.regs[REG_PRIMASK] = 1; - tcb->xcp.regs[REG_XPSR] = ARMV6M_XPSR_T; + tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; + tcb->xcp.regs[REG_PRIMASK] = 1; + tcb->xcp.regs[REG_XPSR] = ARMV6M_XPSR_T; #ifdef CONFIG_BUILD_PROTECTED - tcb->xcp.regs[REG_LR] = EXC_RETURN_THREAD; - tcb->xcp.regs[REG_EXC_RETURN] = EXC_RETURN_THREAD; - tcb->xcp.regs[REG_CONTROL] = getcontrol() & ~CONTROL_NPRIV; -#endif - -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } + tcb->xcp.regs[REG_LR] = EXC_RETURN_THREAD; + tcb->xcp.regs[REG_EXC_RETURN] = EXC_RETURN_THREAD; + tcb->xcp.regs[REG_CONTROL] = getcontrol() & ~CONTROL_NPRIV; #endif - } } } diff --git a/arch/arm/src/armv6-m/arm_sigdeliver.c b/arch/arm/src/armv6-m/arm_sigdeliver.c index d8390b375ca23..fb039d57d8851 100644 --- a/arch/arm/src/armv6-m/arm_sigdeliver.c +++ b/arch/arm/src/armv6-m/arm_sigdeliver.c @@ -68,8 +68,8 @@ void arm_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -101,7 +101,7 @@ void arm_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -148,7 +148,7 @@ void arm_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of * execution. diff --git a/arch/arm/src/armv7-a/CMakeLists.txt b/arch/arm/src/armv7-a/CMakeLists.txt index 042be6cffbc71..3fcc9a3679789 100644 --- a/arch/arm/src/armv7-a/CMakeLists.txt +++ b/arch/arm/src/armv7-a/CMakeLists.txt @@ -109,7 +109,7 @@ if(CONFIG_ARCH_FPU) endif() if(CONFIG_SMP) - list(APPEND SRCS arm_cpustart.c arm_cpupause.c arm_cpuidlestack.c arm_scu.c) + list(APPEND SRCS arm_cpustart.c arm_smpcall.c arm_cpuidlestack.c arm_scu.c) endif() if(CONFIG_ARCH_HAVE_PSCI) diff --git a/arch/arm/src/armv7-a/Make.defs b/arch/arm/src/armv7-a/Make.defs index f757e3d11c454..8aa01008f38c7 100644 --- a/arch/arm/src/armv7-a/Make.defs +++ b/arch/arm/src/armv7-a/Make.defs @@ -94,7 +94,7 @@ ifeq ($(CONFIG_ARCH_FPU),y) endif ifeq ($(CONFIG_SMP),y) - CMN_CSRCS += arm_cpustart.c arm_cpupause.c arm_cpuidlestack.c + CMN_CSRCS += arm_cpustart.c arm_smpcall.c arm_cpuidlestack.c CMN_CSRCS += arm_scu.c endif diff --git a/arch/arm/src/armv7-a/arm_cpupause.c b/arch/arm/src/armv7-a/arm_cpupause.c deleted file mode 100644 index 81dc1a534e70d..0000000000000 --- a/arch/arm/src/armv7-a/arm_cpupause.c +++ /dev/null @@ -1,425 +0,0 @@ -/**************************************************************************** - * arch/arm/src/armv7-a/arm_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include - -#include -#include -#include -#include - -#include "arm_internal.h" -#include "gic.h" -#include "sched/sched.h" - -#ifdef CONFIG_SMP - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Release the g_cpu_paused spinlock to synchronize with the - * requesting CPU. - */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - /* Wait for the spinlock to be released. The requesting CPU will release - * the spinlock when the CPU is resumed. - */ - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: arm_pause_async_handler - * - * Description: - * This is the handler for async pause. - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It porcess g_delivertasks - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int arm_pause_async_handler(int irq, void *context, void *arg) -{ - int cpu = this_cpu(); - - nxsched_process_delivered(cpu); - return OK; -} - -/**************************************************************************** - * Name: arm_pause_handler - * - * Description: - * This is the handler for SGI2. It performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int arm_pause_handler(int irq, void *context, void *arg) -{ - int cpu = this_cpu(); - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused(). If the pause event has already - * been processed then g_cpu_paused[cpu] will not be locked. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - arm_cpu_sgi(GIC_SMP_CPUPAUSE_ASYNC, (1 << cpu)); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2 - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/armv7-a/arm_gicv2.c b/arch/arm/src/armv7-a/arm_gicv2.c index bb4dba37173c7..66d9f3547f34c 100644 --- a/arch/arm/src/armv7-a/arm_gicv2.c +++ b/arch/arm/src/armv7-a/arm_gicv2.c @@ -219,11 +219,8 @@ void arm_gic0_initialize(void) /* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */ DEBUGVERIFY(irq_attach(GIC_SMP_CPUSTART, arm_start_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm_pause_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, - arm_pause_async_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, - nxsched_smp_call_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_SCHED, arm_smp_sched_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CALL, nxsched_smp_call_handler, NULL)); #endif arm_gic_dump("Exit arm_gic0_initialize", true, 0); @@ -756,27 +753,6 @@ void arm_cpu_sgi(int sgi, unsigned int cpuset) putreg32(regval, GIC_ICDSGIR); } -#ifdef CONFIG_SMP -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - up_trigger_irq(GIC_SMP_CPUCALL, cpuset); -} -#endif - /**************************************************************************** * Name: up_get_legacy_irq * diff --git a/arch/arm/src/armv7-a/arm_schedulesigaction.c b/arch/arm/src/armv7-a/arm_schedulesigaction.c index 187af9b18ba63..9ac4edec49941 100644 --- a/arch/arm/src/armv7-a/arm_schedulesigaction.c +++ b/arch/arm/src/armv7-a/arm_schedulesigaction.c @@ -77,98 +77,56 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on this CPU. + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), - this_task()->xcp.regs); - - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handler will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else - { - /* CASE 2: The task that needs to receive the signal is running. - * This could happen if the task is running on another CPU OR if - * we are in an interrupt handler and the task is running on this - * CPU. In the former case, we will have to PAUSE the other CPU - * first. But in either case, we will have to modify the return - * state as well as the state in the TCB. - */ - - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ - -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the return lr and cpsr and one scratch register. These - * will be restored by the signal trampoline after the signals - * have been delivered. - */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the return lr and cpsr and one scratch register. These + * will be restored by the signal trampoline after the signals + * have been delivered. + */ - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *) - ((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *) + ((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled - */ + /* Then set up to vector to the trampoline with interrupts + * disabled + */ - tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; - tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT); + tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; + tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT); #ifdef CONFIG_ARM_THUMB - tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT; -#endif - -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } + tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT; #endif - } } } diff --git a/arch/arm/src/armv7-a/arm_sigdeliver.c b/arch/arm/src/armv7-a/arm_sigdeliver.c index e1777e5ae256e..98ac0e115b6fd 100644 --- a/arch/arm/src/armv7-a/arm_sigdeliver.c +++ b/arch/arm/src/armv7-a/arm_sigdeliver.c @@ -68,8 +68,8 @@ void arm_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -101,7 +101,7 @@ void arm_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -148,7 +148,7 @@ void arm_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/arm/src/armv7-a/arm_smpcall.c b/arch/arm/src/armv7-a/arm_smpcall.c new file mode 100644 index 0000000000000..29f5ecbcc31e6 --- /dev/null +++ b/arch/arm/src/armv7-a/arm_smpcall.c @@ -0,0 +1,122 @@ +/**************************************************************************** + * arch/arm/src/armv7-a/arm_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include + +#include +#include +#include +#include + +#include "arm_internal.h" +#include "gic.h" +#include "sched/sched.h" + +#ifdef CONFIG_SMP + +/**************************************************************************** + * Private Data + ****************************************************************************/ + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: arm_smp_sched_handler + * + * Description: + * This is the handler for sched. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int arm_smp_sched_handler(int irq, void *context, void *arg) +{ + int cpu = this_cpu(); + + nxsched_process_delivered(cpu); + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + arm_cpu_sgi(GIC_SMP_SCHED, (1 << cpu)); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + up_trigger_irq(GIC_SMP_CALL, cpuset); +} + +#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/armv7-a/arm_syscall.c b/arch/arm/src/armv7-a/arm_syscall.c index e2791e1817ceb..8ad689db5a8d7 100644 --- a/arch/arm/src/armv7-a/arm_syscall.c +++ b/arch/arm/src/armv7-a/arm_syscall.c @@ -430,7 +430,7 @@ uint32_t *arm_syscall(uint32_t *regs) /* Copy "info" into user stack */ - if (rtcb->xcp.sigdeliver) + if (rtcb->sigdeliver) { usp = rtcb->xcp.saved_regs[REG_SP]; } diff --git a/arch/arm/src/armv7-a/gic.h b/arch/arm/src/armv7-a/gic.h index ce7cbd0ec6cd7..48769ee644712 100644 --- a/arch/arm/src/armv7-a/gic.h +++ b/arch/arm/src/armv7-a/gic.h @@ -635,14 +635,12 @@ #ifdef CONFIG_ARCH_TRUSTZONE_SECURE # define GIC_SMP_CPUSTART GIC_IRQ_SGI9 -# define GIC_SMP_CPUPAUSE GIC_IRQ_SGI10 -# define GIC_SMP_CPUCALL GIC_IRQ_SGI11 -# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI12 +# define GIC_SMP_CALL GIC_IRQ_SGI10 +# define GIC_SMP_SCHED GIC_IRQ_SGI11 #else # define GIC_SMP_CPUSTART GIC_IRQ_SGI1 -# define GIC_SMP_CPUPAUSE GIC_IRQ_SGI2 -# define GIC_SMP_CPUCALL GIC_IRQ_SGI3 -# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI4 +# define GIC_SMP_CALL GIC_IRQ_SGI2 +# define GIC_SMP_SCHED GIC_IRQ_SGI3 #endif /**************************************************************************** @@ -836,34 +834,10 @@ int arm_start_handler(int irq, void *context, void *arg); #endif /**************************************************************************** - * Name: arm_pause_handler + * Name: arm_smp_sched_handler * * Description: - * This is the handler for SGI2. It performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -int arm_pause_handler(int irq, void *context, void *arg); -#endif - -/**************************************************************************** - * Name: arm_pause_async_handler - * - * Description: - * This is the handler for async pause. + * This is the handler for sched. * * 1. It saves the current task state at the head of the current assigned * task list. @@ -880,7 +854,7 @@ int arm_pause_handler(int irq, void *context, void *arg); ****************************************************************************/ #ifdef CONFIG_SMP -int arm_pause_async_handler(int irq, void *context, void *arg); +int arm_smp_sched_handler(int irq, void *context, void *arg); #endif /**************************************************************************** * Name: arm_gic_dump diff --git a/arch/arm/src/armv7-m/arm_schedulesigaction.c b/arch/arm/src/armv7-m/arm_schedulesigaction.c index 531e295bbd84d..8d4749ca2a492 100644 --- a/arch/arm/src/armv7-m/arm_schedulesigaction.c +++ b/arch/arm/src/armv7-m/arm_schedulesigaction.c @@ -79,107 +79,64 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); - DEBUGASSERT(tcb != NULL && sigdeliver != NULL); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* In this case just deliver the signal now. + * REVISIT: Signal handle will run in a critical section! */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), - this_task()->xcp.regs); - - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handle will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else - { - /* CASE 2: The task that needs to receive the signal is running. - * This could happen if the task is running on another CPU OR if - * we are in an interrupt handler and the task is running on this - * CPU. In the former case, we will have to PAUSE the other CPU - * first. But in either case, we will have to modify the return - * state as well as the state in the TCB. - */ - - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ - -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the return PC, CPSR and either the BASEPRI or PRIMASK - * registers (and perhaps also the LR). These will be restored - * by the signal trampoline after the signal has been delivered. - */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the return PC, CPSR and either the BASEPRI or PRIMASK + * registers (and perhaps also the LR). These will be restored + * by the signal trampoline after the signal has been delivered. + */ - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *) - ((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *) + ((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled. We must already be in privileged thread mode to be - * here. - */ + /* Then set up to vector to the trampoline with interrupts + * disabled. We must already be in privileged thread mode to be + * here. + */ - tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; + tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; #ifdef CONFIG_ARMV7M_USEBASEPRI - tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; + tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; #else - tcb->xcp.regs[REG_PRIMASK] = 1; + tcb->xcp.regs[REG_PRIMASK] = 1; #endif - tcb->xcp.regs[REG_XPSR] = ARMV7M_XPSR_T; + tcb->xcp.regs[REG_XPSR] = ARMV7M_XPSR_T; #ifdef CONFIG_BUILD_PROTECTED - tcb->xcp.regs[REG_LR] = EXC_RETURN_THREAD; - tcb->xcp.regs[REG_EXC_RETURN] = EXC_RETURN_THREAD; - tcb->xcp.regs[REG_CONTROL] = getcontrol() & ~CONTROL_NPRIV; -#endif - -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } + tcb->xcp.regs[REG_LR] = EXC_RETURN_THREAD; + tcb->xcp.regs[REG_EXC_RETURN] = EXC_RETURN_THREAD; + tcb->xcp.regs[REG_CONTROL] = getcontrol() & ~CONTROL_NPRIV; #endif - } } } diff --git a/arch/arm/src/armv7-m/arm_sigdeliver.c b/arch/arm/src/armv7-m/arm_sigdeliver.c index 961ca31b59880..524f2c445b691 100644 --- a/arch/arm/src/armv7-m/arm_sigdeliver.c +++ b/arch/arm/src/armv7-m/arm_sigdeliver.c @@ -68,8 +68,8 @@ void arm_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -105,7 +105,7 @@ void arm_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -156,7 +156,7 @@ void arm_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of * execution. diff --git a/arch/arm/src/armv7-r/CMakeLists.txt b/arch/arm/src/armv7-r/CMakeLists.txt index 784b9ab0946c6..0fbbeb6882591 100644 --- a/arch/arm/src/armv7-r/CMakeLists.txt +++ b/arch/arm/src/armv7-r/CMakeLists.txt @@ -71,7 +71,7 @@ if(CONFIG_SMP) SRCS arm_cpuhead.S arm_cpustart.c - arm_cpupause.c + arm_smpcall.c arm_cpuidlestack.c arm_scu.c) endif() diff --git a/arch/arm/src/armv7-r/Make.defs b/arch/arm/src/armv7-r/Make.defs index 8cf8645ba261f..bc33650802fbc 100644 --- a/arch/arm/src/armv7-r/Make.defs +++ b/arch/arm/src/armv7-r/Make.defs @@ -59,6 +59,6 @@ endif ifeq ($(CONFIG_SMP),y) CMN_ASRCS += arm_cpuhead.S - CMN_CSRCS += arm_cpustart.c arm_cpupause.c + CMN_CSRCS += arm_cpustart.c arm_smpcall.c CMN_CSRCS += arm_cpuidlestack.c arm_scu.c endif diff --git a/arch/arm/src/armv7-r/arm_cpupause.c b/arch/arm/src/armv7-r/arm_cpupause.c deleted file mode 100644 index c7d1ebe38eddb..0000000000000 --- a/arch/arm/src/armv7-r/arm_cpupause.c +++ /dev/null @@ -1,423 +0,0 @@ -/**************************************************************************** - * arch/arm/src/armv7-r/arm_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include - -#include -#include -#include -#include - -#include "arm_internal.h" -#include "gic.h" -#include "sched/sched.h" - -#ifdef CONFIG_SMP - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Release the g_cpu_paused spinlock to synchronize with the - * requesting CPU. - */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - /* Wait for the spinlock to be released. The requesting CPU will release - * the spinlock when the CPU is resumed. - */ - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: arm_pause_handler - * - * Description: - * This is the handler for SGI2. It performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int arm_pause_handler(int irq, void *context, void *arg) -{ - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused(). If the pause event has already - * been processed then g_cpu_paused[cpu] will not be locked. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - return OK; -} - -/**************************************************************************** - * Name: arm_pause_async_handler - * - * Description: - * This is the handler for async pause. - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It porcess g_delivertasks - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int arm_pause_async_handler(int irq, void *context, void *arg) -{ - int cpu = this_cpu(); - - nxsched_process_delivered(cpu); - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - arm_cpu_sgi(GIC_SMP_CPUPAUSE_ASYNC, (1 << cpu)); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2 - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/armv7-r/arm_gicv2.c b/arch/arm/src/armv7-r/arm_gicv2.c index 431ace37a4cf5..4ddf85d04adff 100644 --- a/arch/arm/src/armv7-r/arm_gicv2.c +++ b/arch/arm/src/armv7-r/arm_gicv2.c @@ -160,11 +160,8 @@ void arm_gic0_initialize(void) /* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */ DEBUGVERIFY(irq_attach(GIC_SMP_CPUSTART, arm_start_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm_pause_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, - arm_pause_async_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, - nxsched_smp_call_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_SCHED, arm_smp_sched_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CALL, nxsched_smp_call_handler, NULL)); #endif arm_gic_dump("Exit arm_gic0_initialize", true, 0); @@ -661,24 +658,4 @@ int arm_gic_irq_trigger(int irq, bool edge) return -EINVAL; } -# ifdef CONFIG_SMP -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - up_trigger_irq(GIC_SMP_CPUCALL, cpuset); -} -# endif #endif /* CONFIG_ARMV7R_HAVE_GICv2 */ diff --git a/arch/arm/src/armv7-r/arm_schedulesigaction.c b/arch/arm/src/armv7-r/arm_schedulesigaction.c index b21a0c9188d59..4946d0e0ac39f 100644 --- a/arch/arm/src/armv7-r/arm_schedulesigaction.c +++ b/arch/arm/src/armv7-r/arm_schedulesigaction.c @@ -75,90 +75,56 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on any CPU. + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), - this_task()->xcp.regs); - - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handler will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else - { - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ - -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the return lr and cpsr and one scratch register. These - * will be restored by the signal trampoline after the signals - * have been delivered. - */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the return lr and cpsr and one scratch register. These + * will be restored by the signal trampoline after the signals + * have been delivered. + */ - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *) - ((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *) + ((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled - */ + /* Then set up to vector to the trampoline with interrupts + * disabled + */ - tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; - tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT); + tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; + tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT); #ifdef CONFIG_ARM_THUMB - tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT; -#endif - -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } + tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT; #endif - } } } diff --git a/arch/arm/src/armv7-r/arm_sigdeliver.c b/arch/arm/src/armv7-r/arm_sigdeliver.c index c21b77ed10b17..2f72c1beeee5f 100644 --- a/arch/arm/src/armv7-r/arm_sigdeliver.c +++ b/arch/arm/src/armv7-r/arm_sigdeliver.c @@ -68,8 +68,8 @@ void arm_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -101,7 +101,7 @@ void arm_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -145,7 +145,7 @@ void arm_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/arm/src/armv7-r/arm_smpcall.c b/arch/arm/src/armv7-r/arm_smpcall.c new file mode 100644 index 0000000000000..524e3740057db --- /dev/null +++ b/arch/arm/src/armv7-r/arm_smpcall.c @@ -0,0 +1,122 @@ +/**************************************************************************** + * arch/arm/src/armv7-r/arm_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include + +#include +#include +#include +#include + +#include "arm_internal.h" +#include "gic.h" +#include "sched/sched.h" + +#ifdef CONFIG_SMP + +/**************************************************************************** + * Private Data + ****************************************************************************/ + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: arm_smp_sched_handler + * + * Description: + * This is the handler for sched. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int arm_smp_sched_handler(int irq, void *context, void *arg) +{ + int cpu = this_cpu(); + + nxsched_process_delivered(cpu); + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + arm_cpu_sgi(GIC_SMP_SCHED, (1 << cpu)); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + up_trigger_irq(GIC_SMP_CALL, cpuset); +} + +#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/armv7-r/arm_syscall.c b/arch/arm/src/armv7-r/arm_syscall.c index d7dcdbcfe9728..aa01170622e48 100644 --- a/arch/arm/src/armv7-r/arm_syscall.c +++ b/arch/arm/src/armv7-r/arm_syscall.c @@ -427,7 +427,7 @@ uint32_t *arm_syscall(uint32_t *regs) /* Copy "info" into user stack */ - if (rtcb->xcp.sigdeliver) + if (rtcb->sigdeliver) { usp = rtcb->xcp.saved_regs[REG_SP]; } diff --git a/arch/arm/src/armv7-r/gic.h b/arch/arm/src/armv7-r/gic.h index d8632330564ba..eef4e3cb90305 100644 --- a/arch/arm/src/armv7-r/gic.h +++ b/arch/arm/src/armv7-r/gic.h @@ -608,14 +608,12 @@ #ifdef CONFIG_ARCH_TRUSTZONE_SECURE # define GIC_SMP_CPUSTART GIC_IRQ_SGI9 -# define GIC_SMP_CPUPAUSE GIC_IRQ_SGI10 -# define GIC_SMP_CPUCALL GIC_IRQ_SGI11 -# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI12 +# define GIC_SMP_CALL GIC_IRQ_SGI10 +# define GIC_SMP_SCHED GIC_IRQ_SGI11 #else # define GIC_SMP_CPUSTART GIC_IRQ_SGI1 -# define GIC_SMP_CPUPAUSE GIC_IRQ_SGI2 -# define GIC_SMP_CPUCALL GIC_IRQ_SGI3 -# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI4 +# define GIC_SMP_CALL GIC_IRQ_SGI2 +# define GIC_SMP_SCHED GIC_IRQ_SGI3 #endif /**************************************************************************** @@ -806,34 +804,10 @@ int arm_start_handler(int irq, void *context, void *arg); #endif /**************************************************************************** - * Name: arm_pause_handler + * Name: arm_smp_sched_handler * * Description: - * This is the handler for SGI2. It performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -int arm_pause_handler(int irq, void *context, void *arg); -#endif - -/**************************************************************************** - * Name: arm_pause_async_handler - * - * Description: - * This is the handler for async pause. + * This is the handler for sched. * * 1. It saves the current task state at the head of the current assigned * task list. @@ -850,7 +824,7 @@ int arm_pause_handler(int irq, void *context, void *arg); ****************************************************************************/ #ifdef CONFIG_SMP -int arm_pause_async_handler(int irq, void *context, void *arg); +int arm_smp_sched_handler(int irq, void *context, void *arg); #endif /**************************************************************************** diff --git a/arch/arm/src/armv8-m/arm_schedulesigaction.c b/arch/arm/src/armv8-m/arm_schedulesigaction.c index 42b0ccfb06445..ea12ca1e488e4 100644 --- a/arch/arm/src/armv8-m/arm_schedulesigaction.c +++ b/arch/arm/src/armv8-m/arm_schedulesigaction.c @@ -79,107 +79,64 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); - DEBUGASSERT(tcb != NULL && sigdeliver != NULL); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* In this case just deliver the signal now. + * REVISIT: Signal handle will run in a critical section! */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), - this_task()->xcp.regs); - - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handle will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else - { - /* CASE 2: The task that needs to receive the signal is running. - * This could happen if the task is running on another CPU OR if - * we are in an interrupt handler and the task is running on this - * CPU. In the former case, we will have to PAUSE the other CPU - * first. But in either case, we will have to modify the return - * state as well as the state in the TCB. - */ - - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ - -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the return PC, CPSR and either the BASEPRI or PRIMASK - * registers (and perhaps also the LR). These will be restored - * by the signal trampoline after the signal has been delivered. - */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the return PC, CPSR and either the BASEPRI or PRIMASK + * registers (and perhaps also the LR). These will be restored + * by the signal trampoline after the signal has been delivered. + */ - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *) - ((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *) + ((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled. We must already be in privileged thread mode to be - * here. - */ + /* Then set up to vector to the trampoline with interrupts + * disabled. We must already be in privileged thread mode to be + * here. + */ - tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; + tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; #ifdef CONFIG_ARMV8M_USEBASEPRI - tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; + tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; #else - tcb->xcp.regs[REG_PRIMASK] = 1; + tcb->xcp.regs[REG_PRIMASK] = 1; #endif - tcb->xcp.regs[REG_XPSR] = ARMV8M_XPSR_T; + tcb->xcp.regs[REG_XPSR] = ARMV8M_XPSR_T; #ifdef CONFIG_BUILD_PROTECTED - tcb->xcp.regs[REG_LR] = EXC_RETURN_THREAD; - tcb->xcp.regs[REG_EXC_RETURN] = EXC_RETURN_THREAD; - tcb->xcp.regs[REG_CONTROL] = getcontrol() & ~CONTROL_NPRIV; -#endif - -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } + tcb->xcp.regs[REG_LR] = EXC_RETURN_THREAD; + tcb->xcp.regs[REG_EXC_RETURN] = EXC_RETURN_THREAD; + tcb->xcp.regs[REG_CONTROL] = getcontrol() & ~CONTROL_NPRIV; #endif - } } } diff --git a/arch/arm/src/armv8-m/arm_sigdeliver.c b/arch/arm/src/armv8-m/arm_sigdeliver.c index f8588136fbc97..2db03f17d166b 100644 --- a/arch/arm/src/armv8-m/arm_sigdeliver.c +++ b/arch/arm/src/armv8-m/arm_sigdeliver.c @@ -68,8 +68,8 @@ void arm_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -105,7 +105,7 @@ void arm_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -156,7 +156,7 @@ void arm_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of * execution. diff --git a/arch/arm/src/armv8-r/arm_gic.h b/arch/arm/src/armv8-r/arm_gic.h index 717e3660d22e0..44057a4378067 100644 --- a/arch/arm/src/armv8-r/arm_gic.h +++ b/arch/arm/src/armv8-r/arm_gic.h @@ -311,14 +311,12 @@ #ifdef CONFIG_ARCH_TRUSTZONE_SECURE # define GIC_SMP_CPUSTART GIC_IRQ_SGI9 -# define GIC_SMP_CPUPAUSE GIC_IRQ_SGI10 -# define GIC_SMP_CPUCALL GIC_IRQ_SGI11 -# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI12 +# define GIC_SMP_CALL GIC_IRQ_SGI10 +# define GIC_SMP_SCHED GIC_IRQ_SGI11 #else # define GIC_SMP_CPUSTART GIC_IRQ_SGI1 -# define GIC_SMP_CPUPAUSE GIC_IRQ_SGI2 -# define GIC_SMP_CPUCALL GIC_IRQ_SGI3 -# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI4 +# define GIC_SMP_CALL GIC_IRQ_SGI2 +# define GIC_SMP_SCHED GIC_IRQ_SGI3 #endif /**************************************************************************** @@ -335,32 +333,7 @@ int arm_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list); #ifdef CONFIG_SMP -/**************************************************************************** - * Name: arm_pause_handler - * - * Description: - * This is the handler for SGI2. It performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int arm_pause_handler(int irq, void *context, void *arg); - -#ifdef CONFIG_SMP -int arm_pause_async_handler(int irq, void *context, void *arg); -#endif - +int arm_smp_sched_handler(int irq, void *context, void *arg); void arm_gic_secondary_init(void); #endif diff --git a/arch/arm/src/armv8-r/arm_gicv3.c b/arch/arm/src/armv8-r/arm_gicv3.c index d32db05ac7735..15aac99960e41 100644 --- a/arch/arm/src/armv8-r/arm_gicv3.c +++ b/arch/arm/src/armv8-r/arm_gicv3.c @@ -567,11 +567,8 @@ static void gicv3_dist_init(void) #ifdef CONFIG_SMP /* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */ - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, - arm64_pause_async_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, - nxsched_smp_call_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_SCHED, arm64_smp_sched_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CALL, nxsched_smp_call_handler, NULL)); #endif } @@ -815,8 +812,8 @@ static void arm_gic_init(void) gicv3_cpuif_init(); #ifdef CONFIG_SMP - up_enable_irq(GIC_SMP_CPUPAUSE); - up_enable_irq(GIC_SMP_CPUPAUSE_ASYNC); + up_enable_irq(GIC_SMP_CALL); + up_enable_irq(GIC_SMP_SCHED); #endif } @@ -844,24 +841,4 @@ void arm_gic_secondary_init(void) arm_gic_init(); } -# ifdef CONFIG_SMP -/*************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ***************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - up_trigger_irq(GIC_SMP_CPUCALL, cpuset); -} -# endif #endif diff --git a/arch/arm/src/armv8-r/arm_schedulesigaction.c b/arch/arm/src/armv8-r/arm_schedulesigaction.c index 1ceba997f7f34..94f1111c37b1c 100644 --- a/arch/arm/src/armv8-r/arm_schedulesigaction.c +++ b/arch/arm/src/armv8-r/arm_schedulesigaction.c @@ -75,90 +75,56 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on any CPU. + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), - this_task()->xcp.regs); - - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handler will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else - { - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ - -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the return lr and cpsr and one scratch register. These - * will be restored by the signal trampoline after the signals - * have been delivered. - */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the return lr and cpsr and one scratch register. These + * will be restored by the signal trampoline after the signals + * have been delivered. + */ - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *) - ((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *) + ((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled - */ + /* Then set up to vector to the trampoline with interrupts + * disabled + */ - tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; - tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT); + tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver; + tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT); #ifdef CONFIG_ARM_THUMB - tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT; -#endif - -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } + tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT; #endif - } } } diff --git a/arch/arm/src/armv8-r/arm_sigdeliver.c b/arch/arm/src/armv8-r/arm_sigdeliver.c index e249869434dd1..29a422040c2fb 100644 --- a/arch/arm/src/armv8-r/arm_sigdeliver.c +++ b/arch/arm/src/armv8-r/arm_sigdeliver.c @@ -68,8 +68,8 @@ void arm_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -101,7 +101,7 @@ void arm_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -145,7 +145,7 @@ void arm_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/arm/src/armv8-r/arm_syscall.c b/arch/arm/src/armv8-r/arm_syscall.c index 285613ed4b057..abf3b191f4743 100644 --- a/arch/arm/src/armv8-r/arm_syscall.c +++ b/arch/arm/src/armv8-r/arm_syscall.c @@ -426,7 +426,7 @@ uint32_t *arm_syscall(uint32_t *regs) /* Copy "info" into user stack */ - if (rtcb->xcp.sigdeliver) + if (rtcb->sigdeliver) { usp = rtcb->xcp.saved_regs[REG_SP]; } diff --git a/arch/arm/src/cxd56xx/CMakeLists.txt b/arch/arm/src/cxd56xx/CMakeLists.txt index a19a29a15dd2c..cb7315f33b20a 100644 --- a/arch/arm/src/cxd56xx/CMakeLists.txt +++ b/arch/arm/src/cxd56xx/CMakeLists.txt @@ -42,7 +42,7 @@ set(SRCS if(CONFIG_SMP) list(APPEND SRCS cxd56_cpuidlestack.c) list(APPEND SRCS cxd56_cpuindex.c) - list(APPEND SRCS cxd56_cpupause.c) + list(APPEND SRCS cxd56_smpcall.c) list(APPEND SRCS cxd56_cpustart.c) if(CONFIG_CXD56_TESTSET) list(APPEND SRCS cxd56_testset.c) diff --git a/arch/arm/src/cxd56xx/Make.defs b/arch/arm/src/cxd56xx/Make.defs index 54c653c050d1b..ece4ac2b546b0 100644 --- a/arch/arm/src/cxd56xx/Make.defs +++ b/arch/arm/src/cxd56xx/Make.defs @@ -41,7 +41,7 @@ CHIP_CSRCS += cxd56_sysctl.c ifeq ($(CONFIG_SMP), y) CHIP_CSRCS += cxd56_cpuidlestack.c CHIP_CSRCS += cxd56_cpuindex.c -CHIP_CSRCS += cxd56_cpupause.c +CHIP_CSRCS += cxd56_smpcall.c CHIP_CSRCS += cxd56_cpustart.c ifeq ($(CONFIG_CXD56_TESTSET),y) CHIP_CSRCS += cxd56_testset.c diff --git a/arch/arm/src/cxd56xx/cxd56_cpupause.c b/arch/arm/src/cxd56xx/cxd56_cpupause.c deleted file mode 100644 index 42807545bf909..0000000000000 --- a/arch/arm/src/cxd56xx/cxd56_cpupause.c +++ /dev/null @@ -1,578 +0,0 @@ -/**************************************************************************** - * arch/arm/src/cxd56xx/cxd56_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "sched/sched.h" -#include "arm_internal.h" -#include "hardware/cxd5602_memorymap.h" - -#ifdef CONFIG_SMP - -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -#if 0 -# define DPRINTF(fmt, args...) _err(fmt, ##args) -#else -# define DPRINTF(fmt, args...) do {} while (0) -#endif - -#define CXD56_CPU_P2_INT (CXD56_SWINT_BASE + 0x8) /* for APP_DSP0 */ - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -static volatile int g_irq_to_handle[CONFIG_SMP_NCPUS][2]; - -/**************************************************************************** - * Private Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: handle_irqreq - * - * Description: - * If an irq handling request is found on cpu, call up_enable_irq() or - * up_disable_irq(), then return true. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = an irq handling request is found - * false = no irq handling request is found - * - ****************************************************************************/ - -static bool handle_irqreq(int cpu) -{ - int i; - bool handled = false; - - /* Check both cases */ - - for (i = 0; i < 2; i++) - { - int irqreq = g_irq_to_handle[cpu][i]; - - if (irqreq) - { - /* Unlock the spinlock first */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Then wait for the spinlock to be released */ - - spin_lock(&g_cpu_wait[cpu]); - - /* Clear g_irq_to_handle[cpu][i] */ - - g_irq_to_handle[cpu][i] = 0; - - if (0 == i) - { - up_enable_irq(irqreq); - } - else - { - up_disable_irq(irqreq); - } - - /* Finally unlock the spinlock */ - - spin_unlock(&g_cpu_wait[cpu]); - handled = true; - - break; - } - } - - return handled; -} - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Fistly, check if this IPI is to enable/disable IRQ */ - - if (handle_irqreq(cpu)) - { - return OK; - } - - /* Wait for the spinlock to be released */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: arm_pause_handler - * - * Description: - * Inter-CPU interrupt handler - * - * Input Parameters: - * Standard interrupt handler inputs - * - * Returned Value: - * Should always return OK - * - ****************************************************************************/ - -int arm_pause_handler(int irq, void *c, void *arg) -{ - int cpu = this_cpu(); - int ret = OK; - - nxsched_smp_call_handler(irq, c, arg); - - DPRINTF("cpu%d will be paused\n", cpu); - - /* Clear SW_INT for APP_DSP(cpu) */ - - putreg32(0, CXD56_CPU_P2_INT + (4 * cpu)); - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() would call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: Normally, we do not call up_cpu_paused() here because - * the above enter_critical_setion() would call up_cpu_paused() - * inside because the caller holds a crtical section. - * However, cxd56's remote IRQ control logic also uses this handler - * and a caller might not take a critical section to avoid a deadlock - * during up_enable_irq() and up_disable_irq(). This is allowed - * because IRQ control logic does not interact wtih the scheduler. - * This means that if the request was not handled above, we need - * to call up_cpu_paused() here again. - */ - - if (up_cpu_pausereq(cpu)) - { - ret = up_cpu_paused(cpu); - } - - leave_critical_section(flags); - } - - nxsched_process_delivered(cpu); - - return ret; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - /* Generate IRQ for CPU(cpu) */ - - putreg32(1, CXD56_CPU_P2_INT + (4 * cpu)); - - return OK; -} - -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - int cpu; - - for (; cpuset != 0; cpuset &= ~(1 << cpu)) - { - cpu = ffs(cpuset) - 1; - up_cpu_pause_async(cpu); - } -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped/ - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - DPRINTF("cpu=%d\n", cpu); - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - /* Generate IRQ for CPU(cpu) */ - - up_cpu_pause_async(cpu); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - DPRINTF("cpu=%d\n", cpu); - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - return OK; -} - -/**************************************************************************** - * Name: up_send_irqreq() - * - * Description: - * Send up_enable_irq() / up_disable_irq() request to the specified cpu - * - * This function is called from up_enable_irq() or up_disable_irq() - * to be handled on specified CPU. Locking protocol in the sequence is - * the same as up_pause_cpu() plus up_resume_cpu(). - * - * Input Parameters: - * idx - The request index (0: enable, 1: disable) - * irq - The IRQ number to be handled - * cpu - The index of the CPU which will handle the request - * - ****************************************************************************/ - -void up_send_irqreq(int idx, int irq, int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Wait for the spinlocks to be released */ - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - /* Set irq for the cpu */ - - g_irq_to_handle[cpu][idx] = irq; - - /* Generate IRQ for CPU(cpu) */ - - putreg32(1, CXD56_CPU_P2_INT + (4 * cpu)); - - /* Wait for the handler is executed on cpu */ - - spin_lock(&g_cpu_paused[cpu]); - spin_unlock(&g_cpu_paused[cpu]); - - /* Finally unlock the spinlock to proceed the handler */ - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); -} - -#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/cxd56xx/cxd56_cpustart.c b/arch/arm/src/cxd56xx/cxd56_cpustart.c index dcac15f589723..be32c9e53003c 100644 --- a/arch/arm/src/cxd56xx/cxd56_cpustart.c +++ b/arch/arm/src/cxd56xx/cxd56_cpustart.c @@ -72,7 +72,7 @@ volatile static spinlock_t g_appdsp_boot; -extern int arm_pause_handler(int irq, void *c, void *arg); +extern int cxd56_smp_call_handler(int irq, void *c, void *arg); /**************************************************************************** * Private Functions @@ -117,8 +117,8 @@ static void appdsp_boot(void) /* Enable SW_INT */ - irq_attach(CXD56_IRQ_SW_INT, arm_pause_handler, NULL); - up_enable_irq(CXD56_IRQ_SW_INT); + irq_attach(CXD56_IRQ_SMP_CALL, cxd56_smp_call_handler, NULL); + up_enable_irq(CXD56_IRQ_SMP_CALL); spin_unlock(&g_appdsp_boot); @@ -232,8 +232,8 @@ int up_cpu_start(int cpu) /* Setup SW_INT for this APP_DSP0 */ - irq_attach(CXD56_IRQ_SW_INT, arm_pause_handler, NULL); - up_enable_irq(CXD56_IRQ_SW_INT); + irq_attach(CXD56_IRQ_SMP_CALL, cxd56_smp_call_handler, NULL); + up_enable_irq(CXD56_IRQ_SMP_CALL); } spin_lock(&g_appdsp_boot); diff --git a/arch/arm/src/cxd56xx/cxd56_irq.c b/arch/arm/src/cxd56xx/cxd56_irq.c index 1bc9ba49d5c8b..d9d24ad4966f9 100644 --- a/arch/arm/src/cxd56xx/cxd56_irq.c +++ b/arch/arm/src/cxd56xx/cxd56_irq.c @@ -470,7 +470,7 @@ void up_enable_irq(int irq) /* EXTINT needs to be handled on CPU0 to avoid deadlock */ - if (irq > CXD56_IRQ_EXTINT && irq != CXD56_IRQ_SW_INT && 0 != cpu) + if (irq > CXD56_IRQ_EXTINT && irq != CXD56_IRQ_SMP_CALL && 0 != cpu) { up_send_irqreq(0, irq, 0); return; diff --git a/arch/arm/src/cxd56xx/cxd56_smpcall.c b/arch/arm/src/cxd56xx/cxd56_smpcall.c new file mode 100644 index 0000000000000..360e83ad2f082 --- /dev/null +++ b/arch/arm/src/cxd56xx/cxd56_smpcall.c @@ -0,0 +1,231 @@ +/**************************************************************************** + * arch/arm/src/cxd56xx/cxd56_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sched/sched.h" +#include "arm_internal.h" +#include "hardware/cxd5602_memorymap.h" + +#ifdef CONFIG_SMP + +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#if 0 +# define DPRINTF(fmt, args...) _err(fmt, ##args) +#else +# define DPRINTF(fmt, args...) do {} while (0) +#endif + +#define CXD56_CPU_P2_INT (CXD56_SWINT_BASE + 0x8) /* for APP_DSP0 */ + +/**************************************************************************** + * Private Data + ****************************************************************************/ + +static volatile int g_irq_to_handle[CONFIG_SMP_NCPUS][2]; + +/**************************************************************************** + * Private Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: handle_irqreq + * + * Description: + * If an irq handling request is found on cpu, call up_enable_irq() or + * up_disable_irq(), then return true. + * + * Input Parameters: + * cpu - The index of the CPU to be queried + * + * Returned Value: + * true = an irq handling request is found + * false = no irq handling request is found + * + ****************************************************************************/ + +static bool handle_irqreq(int cpu) +{ + int i; + bool handled = false; + + /* Check both cases */ + + for (i = 0; i < 2; i++) + { + int irqreq = g_irq_to_handle[cpu][i]; + + if (irqreq) + { + /* Clear g_irq_to_handle[cpu][i] */ + + g_irq_to_handle[cpu][i] = 0; + + if (0 == i) + { + up_enable_irq(irqreq); + } + else + { + up_disable_irq(irqreq); + } + + handled = true; + + break; + } + } + + return handled; +} + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: cxd56_smp_call_handler + * + * Description: + * This is the handler for SMP_CALL. + * + ****************************************************************************/ + +int cxd56_smp_call_handler(int irq, void *c, void *arg) +{ + int cpu = this_cpu(); + int ret = OK; + + handle_irqreq(cpu); + + nxsched_smp_call_handler(irq, c, arg); + + DPRINTF("cpu%d will be paused\n", cpu); + + /* Clear SW_INT for APP_DSP(cpu) */ + + putreg32(0, CXD56_CPU_P2_INT + (4 * cpu)); + + nxsched_process_delivered(cpu); + + return ret; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + /* Generate IRQ for CPU(cpu) */ + + putreg32(1, CXD56_CPU_P2_INT + (4 * cpu)); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + int cpu; + + for (; cpuset != 0; cpuset &= ~(1 << cpu)) + { + cpu = ffs(cpuset) - 1; + up_send_smp_sched(cpu); + } +} + +/**************************************************************************** + * Name: up_send_irqreq() + * + * Description: + * Send up_enable_irq() / up_disable_irq() request to the specified cpu + * + * This function is called from up_enable_irq() or up_disable_irq() + * to be handled on specified CPU. Locking protocol in the sequence is + * the same as up_pause_cpu() plus up_resume_cpu(). + * + * Input Parameters: + * idx - The request index (0: enable, 1: disable) + * irq - The IRQ number to be handled + * cpu - The index of the CPU which will handle the request + * + ****************************************************************************/ + +void up_send_irqreq(int idx, int irq, int cpu) +{ + DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); + + /* Set irq for the cpu */ + + g_irq_to_handle[cpu][idx] = irq; + + /* Generate IRQ for CPU(cpu) */ + + putreg32(1, CXD56_CPU_P2_INT + (4 * cpu)); +} + +#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/lc823450/Make.defs b/arch/arm/src/lc823450/Make.defs index 905f3a1786dc3..4b3bf6ca42843 100644 --- a/arch/arm/src/lc823450/Make.defs +++ b/arch/arm/src/lc823450/Make.defs @@ -92,7 +92,7 @@ endif ifeq ($(CONFIG_SMP), y) CHIP_CSRCS += lc823450_cpuidlestack.c CHIP_CSRCS += lc823450_cpuindex.c -CHIP_CSRCS += lc823450_cpupause.c +CHIP_CSRCS += lc823450_smpcall.c CHIP_CSRCS += lc823450_cpustart.c CHIP_CSRCS += lc823450_testset.c CMN_ASRCS := $(filter-out arm_testset.S,$(CMN_ASRCS)) diff --git a/arch/arm/src/lc823450/lc823450_cpupause.c b/arch/arm/src/lc823450/lc823450_cpupause.c deleted file mode 100644 index b4249b36d9a16..0000000000000 --- a/arch/arm/src/lc823450/lc823450_cpupause.c +++ /dev/null @@ -1,450 +0,0 @@ -/**************************************************************************** - * arch/arm/src/lc823450/lc823450_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "sched/sched.h" -#include "arm_internal.h" -#include "lc823450_intc.h" - -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -#if 0 -#define DPRINTF(fmt, args...) llinfo(fmt, ##args) -#else -#define DPRINTF(fmt, args...) do {} while (0) -#endif - -/**************************************************************************** - * Public Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Wait for the spinlock to be released */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: lc823450_pause_handler - * - * Description: - * Inter-CPU interrupt handler - * - * Input Parameters: - * Standard interrupt handler inputs - * - * Returned Value: - * Should always return OK - * - ****************************************************************************/ - -int lc823450_pause_handler(int irq, void *c, void *arg) -{ - int cpu = this_cpu(); - - nxsched_smp_call_handler(irq, c, arg); - - /* Clear : Pause IRQ */ - - if (irq == LC823450_IRQ_CTXM3_01) - { - DPRINTF("CPU0 -> CPU1\n"); - putreg32(IPICLR_INTISR0_CLR_1, IPICLR); - } - else - { - DPRINTF("CPU1 -> CPU0\n"); - putreg32(IPICLR_INTISR1_CLR_1, IPICLR); - } - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - nxsched_process_delivered(cpu); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - /* Execute Pause IRQ to CPU(cpu) */ - - if (cpu == 1) - { - putreg32(IPIREG_INTISR0_1, IPIREG); - } - else - { - putreg32(IPIREG_INTISR1_1, IPIREG); - } - - return OK; -} - -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - int cpu; - - for (; cpuset != 0; cpuset &= ~(1 << cpu)) - { - cpu = ffs(cpuset) - 1; - up_cpu_pause_async(cpu); - } -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped/ - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - DPRINTF("cpu=%d\n", cpu); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2 - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - /* Execute Pause IRQ to CPU(cpu) */ - - up_cpu_pause_async(cpu); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - DPRINTF("cpu=%d\n", cpu); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} diff --git a/arch/arm/src/lc823450/lc823450_cpustart.c b/arch/arm/src/lc823450/lc823450_cpustart.c index 8611eea093750..26d1d8cbc2435 100644 --- a/arch/arm/src/lc823450/lc823450_cpustart.c +++ b/arch/arm/src/lc823450/lc823450_cpustart.c @@ -63,13 +63,13 @@ * Public Data ****************************************************************************/ -extern volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; +static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; /**************************************************************************** * Private Functions ****************************************************************************/ -extern int lc823450_pause_handler(int irq, void *c, void *arg); +extern int lc823450_smp_call_handler(int irq, void *c, void *arg); /**************************************************************************** * Name: cpu1_boot @@ -106,8 +106,8 @@ static void cpu1_boot(void) up_enable_irq(LC823450_IRQ_MEMFAULT); #endif - irq_attach(LC823450_IRQ_CTXM3_01, lc823450_pause_handler, NULL); - up_enable_irq(LC823450_IRQ_CTXM3_01); + irq_attach(LC823450_IRQ_SMP_CALL_01, lc823450_smp_call_handler, NULL); + up_enable_irq(LC823450_IRQ_SMP_CALL_01); } spin_unlock(&g_cpu_wait[0]); @@ -193,8 +193,8 @@ int up_cpu_start(int cpu) /* IRQ setup CPU1->CPU0 */ - irq_attach(LC823450_IRQ_CTXM3_11, lc823450_pause_handler, NULL); - up_enable_irq(LC823450_IRQ_CTXM3_11); + irq_attach(LC823450_IRQ_SMP_CALL_11, lc823450_smp_call_handler, NULL); + up_enable_irq(LC823450_IRQ_SMP_CALL_11); spin_lock(&g_cpu_wait[0]); diff --git a/arch/arm/src/lc823450/lc823450_smpcall.c b/arch/arm/src/lc823450/lc823450_smpcall.c new file mode 100644 index 0000000000000..8aa5bce3f89c6 --- /dev/null +++ b/arch/arm/src/lc823450/lc823450_smpcall.c @@ -0,0 +1,149 @@ +/**************************************************************************** + * arch/arm/src/lc823450/lc823450_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sched/sched.h" +#include "arm_internal.h" +#include "lc823450_intc.h" + +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#if 0 +#define DPRINTF(fmt, args...) llinfo(fmt, ##args) +#else +#define DPRINTF(fmt, args...) do {} while (0) +#endif + +/**************************************************************************** + * Public Data + ****************************************************************************/ + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: lc823450_smp_call_handler + * + * Description: + * This is the handler for SMP_CALL. + * + ****************************************************************************/ + +int lc823450_smp_call_handler(int irq, void *c, void *arg) +{ + int cpu = this_cpu(); + + nxsched_smp_call_handler(irq, c, arg); + + /* Clear : Pause IRQ */ + + if (irq == LC823450_IRQ_SMP_CALL_01) + { + DPRINTF("CPU0 -> CPU1\n"); + putreg32(IPICLR_INTISR0_CLR_1, IPICLR); + } + else + { + DPRINTF("CPU1 -> CPU0\n"); + putreg32(IPICLR_INTISR1_CLR_1, IPICLR); + } + + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + /* Execute Pause IRQ to CPU(cpu) */ + + if (cpu == 1) + { + putreg32(IPIREG_INTISR0_1, IPIREG); + } + else + { + putreg32(IPIREG_INTISR1_1, IPIREG); + } + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + int cpu; + + for (; cpuset != 0; cpuset &= ~(1 << cpu)) + { + cpu = ffs(cpuset) - 1; + up_send_smp_sched(cpu); + } +} diff --git a/arch/arm/src/rp2040/Make.defs b/arch/arm/src/rp2040/Make.defs index 76aab709273f5..1fbf4b4cd02b9 100644 --- a/arch/arm/src/rp2040/Make.defs +++ b/arch/arm/src/rp2040/Make.defs @@ -37,7 +37,7 @@ CHIP_CSRCS += rp2040_pll.c ifeq ($(CONFIG_SMP),y) CHIP_CSRCS += rp2040_cpuindex.c CHIP_CSRCS += rp2040_cpustart.c -CHIP_CSRCS += rp2040_cpupause.c +CHIP_CSRCS += rp2040_smpcall.c CHIP_CSRCS += rp2040_cpuidlestack.c CHIP_CSRCS += rp2040_testset.c CMN_ASRCS := $(filter-out arm_testset.S,$(CMN_ASRCS)) diff --git a/arch/arm/src/rp2040/rp2040_cpupause.c b/arch/arm/src/rp2040/rp2040_cpupause.c deleted file mode 100644 index d71a3c5f6508c..0000000000000 --- a/arch/arm/src/rp2040/rp2040_cpupause.c +++ /dev/null @@ -1,547 +0,0 @@ -/**************************************************************************** - * arch/arm/src/rp2040/rp2040_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "sched/sched.h" -#include "arm_internal.h" -#include "hardware/rp2040_sio.h" - -#ifdef CONFIG_SMP - -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -#if 0 -#define DPRINTF(fmt, args...) llinfo(fmt, ##args) -#else -#define DPRINTF(fmt, args...) do {} while (0) -#endif - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Name: rp2040_handle_irqreq - * - * Description: - * If an irq handling request is found on cpu, call up_enable_irq() or - * up_disable_irq(). - * - * Input Parameters: - * irqreq - The IRQ number to be handled (>0 : enable / <0 : disable) - * - ****************************************************************************/ - -static void rp2040_handle_irqreq(int irqreq) -{ - DEBUGASSERT(this_cpu() == 0); - - /* Unlock the spinlock first */ - - spin_unlock(&g_cpu_paused[0]); - - /* Then wait for the spinlock to be released */ - - spin_lock(&g_cpu_wait[0]); - - if (irqreq > 0) - { - up_enable_irq(irqreq); - } - else - { - up_disable_irq(-irqreq); - } - - /* Finally unlock the spinlock */ - - spin_unlock(&g_cpu_wait[0]); -} - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Wait for the spinlock to be released */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: arm_pause_handler - * - * Description: - * Inter-CPU interrupt handler - * - * Input Parameters: - * Standard interrupt handler inputs - * - * Returned Value: - * Should always return OK - * - ****************************************************************************/ - -int arm_pause_handler(int irq, void *c, void *arg) -{ - int cpu = this_cpu(); - int irqreq; - uint32_t stat; - - nxsched_smp_call_handler(irq, c, arg); - - stat = getreg32(RP2040_SIO_FIFO_ST); - if (stat & (RP2040_SIO_FIFO_ST_ROE | RP2040_SIO_FIFO_ST_WOF)) - { - /* Clear sticky flag */ - - putreg32(0, RP2040_SIO_FIFO_ST); - } - - if (!(stat & RP2040_SIO_FIFO_ST_VLD)) - { - /* No data received */ - - return OK; - } - - irqreq = getreg32(RP2040_SIO_FIFO_RD); - - if (irqreq != 0) - { - /* Handle IRQ enable/disable request */ - - rp2040_handle_irqreq(irqreq); - return OK; - } - - DPRINTF("cpu%d will be paused\n", cpu); - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - nxsched_process_delivered(cpu); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - /* Generate IRQ for CPU(cpu) */ - - while (!(getreg32(RP2040_SIO_FIFO_ST) & RP2040_SIO_FIFO_ST_RDY)) - ; - putreg32(0, RP2040_SIO_FIFO_WR); - - return OK; -} - -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - int cpu; - - for (; cpuset != 0; cpuset &= ~(1 << cpu)) - { - cpu = ffs(cpuset) - 1; - up_cpu_pause_async(cpu); - } -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped/ - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - DPRINTF("cpu=%d\n", cpu); - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2 - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - DEBUGASSERT(cpu != this_cpu()); - - /* Generate IRQ for CPU(cpu) */ - - up_cpu_pause_async(cpu); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - DPRINTF("cpu=%d\n", cpu); - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - return OK; -} - -/**************************************************************************** - * Name: rp2040_send_irqreq() - * - * Description: - * Send up_enable_irq() / up_disable_irq() request to the Core #0 - * - * This function is called from up_enable_irq() or up_disable_irq() - * to be handled on specified CPU. Locking protocol in the sequence is - * the same as up_pause_cpu() plus up_resume_cpu(). - * - * Input Parameters: - * irqreq - The IRQ number to be handled (>0 : enable / <0 : disable) - * - ****************************************************************************/ - -void rp2040_send_irqreq(int irqreq) -{ - /* Wait for the spinlocks to be released */ - - spin_lock(&g_cpu_wait[0]); - spin_lock(&g_cpu_paused[0]); - - /* Send IRQ number to Core #0 */ - - while (!(getreg32(RP2040_SIO_FIFO_ST) & RP2040_SIO_FIFO_ST_RDY)) - ; - putreg32(irqreq, RP2040_SIO_FIFO_WR); - - /* Wait for the handler is executed on cpu */ - - spin_lock(&g_cpu_paused[0]); - spin_unlock(&g_cpu_paused[0]); - - /* Finally unlock the spinlock to proceed the handler */ - - spin_unlock(&g_cpu_wait[0]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[0]); - - spin_unlock(&g_cpu_resumed[0]); -} - -#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/rp2040/rp2040_cpustart.c b/arch/arm/src/rp2040/rp2040_cpustart.c index 230246497e685..18893edfd8333 100644 --- a/arch/arm/src/rp2040/rp2040_cpustart.c +++ b/arch/arm/src/rp2040/rp2040_cpustart.c @@ -67,7 +67,7 @@ volatile static spinlock_t g_core1_boot; -extern int arm_pause_handler(int irq, void *c, void *arg); +extern int rp2040_smp_call_handler(int irq, void *c, void *arg); /**************************************************************************** * Private Functions @@ -151,8 +151,8 @@ static void core1_boot(void) /* Enable inter-processor FIFO interrupt */ - irq_attach(RP2040_SIO_IRQ_PROC1, arm_pause_handler, NULL); - up_enable_irq(RP2040_SIO_IRQ_PROC1); + irq_attach(RP2040_SMP_CALL_PROC1, rp2040_smp_call_handler, NULL); + up_enable_irq(RP2040_SMP_CALL_PROC1); spin_unlock(&g_core1_boot); @@ -247,8 +247,8 @@ int up_cpu_start(int cpu) /* Enable inter-processor FIFO interrupt */ - irq_attach(RP2040_SIO_IRQ_PROC0, arm_pause_handler, NULL); - up_enable_irq(RP2040_SIO_IRQ_PROC0); + irq_attach(RP2040_SMP_CALL_PROC0, rp2040_smp_call_handler, NULL); + up_enable_irq(RP2040_SMP_CALL_PROC0); spin_lock(&g_core1_boot); diff --git a/arch/arm/src/rp2040/rp2040_irq.c b/arch/arm/src/rp2040/rp2040_irq.c index 4791a5bb1b721..8317f52b4d2fc 100644 --- a/arch/arm/src/rp2040/rp2040_irq.c +++ b/arch/arm/src/rp2040/rp2040_irq.c @@ -276,7 +276,7 @@ void up_disable_irq(int irq) DEBUGASSERT((unsigned)irq < NR_IRQS); #ifdef CONFIG_SMP - if (irq >= RP2040_IRQ_EXTINT && irq != RP2040_SIO_IRQ_PROC1 && + if (irq >= RP2040_IRQ_EXTINT && irq != RP2040_SMP_CALL_PROC1 && this_cpu() != 0) { /* Must be handled by Core 0 */ @@ -324,7 +324,7 @@ void up_enable_irq(int irq) DEBUGASSERT((unsigned)irq < NR_IRQS); #ifdef CONFIG_SMP - if (irq >= RP2040_IRQ_EXTINT && irq != RP2040_SIO_IRQ_PROC1 && + if (irq >= RP2040_IRQ_EXTINT && irq != RP2040_SMP_CALL_PROC1 && this_cpu() != 0) { /* Must be handled by Core 0 */ diff --git a/arch/arm/src/rp2040/rp2040_smpcall.c b/arch/arm/src/rp2040/rp2040_smpcall.c new file mode 100644 index 0000000000000..ff889320f9367 --- /dev/null +++ b/arch/arm/src/rp2040/rp2040_smpcall.c @@ -0,0 +1,210 @@ +/**************************************************************************** + * arch/arm/src/rp2040/rp2040_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sched/sched.h" +#include "arm_internal.h" +#include "hardware/rp2040_sio.h" + +#ifdef CONFIG_SMP + +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#if 0 +#define DPRINTF(fmt, args...) llinfo(fmt, ##args) +#else +#define DPRINTF(fmt, args...) do {} while (0) +#endif + +/**************************************************************************** + * Name: rp2040_handle_irqreq + * + * Description: + * If an irq handling request is found on cpu, call up_enable_irq() or + * up_disable_irq(). + * + * Input Parameters: + * irqreq - The IRQ number to be handled (>0 : enable / <0 : disable) + * + ****************************************************************************/ + +static void rp2040_handle_irqreq(int irqreq) +{ + DEBUGASSERT(this_cpu() == 0); + + if (irqreq > 0) + { + up_enable_irq(irqreq); + } + else + { + up_disable_irq(-irqreq); + } +} + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: rp2040_smp_call_handler + * + * Description: + * This is the handler for SMP_CALL. + * + ****************************************************************************/ + +int rp2040_smp_call_handler(int irq, void *c, void *arg) +{ + int cpu = this_cpu(); + int irqreq; + uint32_t stat; + + nxsched_smp_call_handler(irq, c, arg); + + stat = getreg32(RP2040_SIO_FIFO_ST); + if (stat & (RP2040_SIO_FIFO_ST_ROE | RP2040_SIO_FIFO_ST_WOF)) + { + /* Clear sticky flag */ + + putreg32(0, RP2040_SIO_FIFO_ST); + } + + if (!(stat & RP2040_SIO_FIFO_ST_VLD)) + { + /* No data received */ + + return OK; + } + + irqreq = getreg32(RP2040_SIO_FIFO_RD); + + if (irqreq != 0) + { + /* Handle IRQ enable/disable request */ + + rp2040_handle_irqreq(irqreq); + return OK; + } + + DPRINTF("cpu%d will be paused\n", cpu); + + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + /* Generate IRQ for CPU(cpu) */ + + while (!(getreg32(RP2040_SIO_FIFO_ST) & RP2040_SIO_FIFO_ST_RDY)) + ; + putreg32(0, RP2040_SIO_FIFO_WR); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + int cpu; + + for (; cpuset != 0; cpuset &= ~(1 << cpu)) + { + cpu = ffs(cpuset) - 1; + up_send_smp_sched(cpu); + } +} + +/**************************************************************************** + * Name: rp2040_send_irqreq() + * + * Description: + * Send up_enable_irq() / up_disable_irq() request to the Core #0 + * + * This function is called from up_enable_irq() or up_disable_irq() + * to be handled on specified CPU. Locking protocol in the sequence is + * the same as up_pause_cpu() plus up_resume_cpu(). + * + * Input Parameters: + * irqreq - The IRQ number to be handled (>0 : enable / <0 : disable) + * + ****************************************************************************/ + +void rp2040_send_irqreq(int irqreq) +{ + /* Send IRQ number to Core #0 */ + + while (!(getreg32(RP2040_SIO_FIFO_ST) & RP2040_SIO_FIFO_ST_RDY)) + ; + putreg32(irqreq, RP2040_SIO_FIFO_WR); +} + +#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/sam34/Make.defs b/arch/arm/src/sam34/Make.defs index faad35b62fbd9..ca7bfb75ed9f1 100644 --- a/arch/arm/src/sam34/Make.defs +++ b/arch/arm/src/sam34/Make.defs @@ -138,7 +138,7 @@ endif # CONFIG_SAM34_TC ifeq ($(CONFIG_SMP),y) CHIP_CSRCS += sam4cm_cpuindex.c sam4cm_cpuidlestack.c -CHIP_CSRCS += sam4cm_cpupause.c sam4cm_cpustart.c +CHIP_CSRCS += sam4cm_smpcall.c sam4cm_cpustart.c ifneq ($(CONFIG_ARCH_IDLE_CUSTOM),y) CHIP_CSRCS += sam4cm_idle.c endif diff --git a/arch/arm/src/sam34/sam4cm_cpupause.c b/arch/arm/src/sam34/sam4cm_cpupause.c deleted file mode 100644 index 2ebc5ea2d9f5c..0000000000000 --- a/arch/arm/src/sam34/sam4cm_cpupause.c +++ /dev/null @@ -1,446 +0,0 @@ -/**************************************************************************** - * arch/arm/src/sam34/sam4cm_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "sched/sched.h" -#include "arm_internal.h" -#include "hardware/sam4cm_ipc.h" - -#ifdef CONFIG_SMP - -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -#if 0 -# define DPRINTF(fmt, args...) _err(fmt, ##args) -#else -# define DPRINTF(fmt, args...) do {} while (0) -#endif - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Wait for the spinlock to be released */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: arm_pause_handler - * - * Description: - * Inter-CPU interrupt handler - * - * Input Parameters: - * Standard interrupt handler inputs - * - * Returned Value: - * Should always return OK - * - ****************************************************************************/ - -int arm_pause_handler(int irq, void *c, void *arg) -{ - int cpu = this_cpu(); - - nxsched_smp_call_handler(irq, c, arg); - - /* Clear : Pause IRQ */ - - /* IPC Interrupt Clear Command Register (write-only) */ - - if (1 == cpu) - { - DPRINTF("CPU0 -> CPU1\n"); - putreg32(0x1, SAM_IPC1_ICCR); - } - else - { - DPRINTF("CPU1 -> CPU0\n"); - putreg32(0x1, SAM_IPC0_ICCR); - } - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused. - */ - - if (spin_is_locked(&g_cpu_paused[cpu])) - { - return up_cpu_paused(cpu); - } - - nxsched_process_delivered(cpu); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - /* Execute Pause IRQ to CPU(cpu) */ - - /* Set IPC Interrupt (IRQ0) (write-only) */ - - if (cpu == 1) - { - putreg32(0x1, SAM_IPC1_ISCR); - } - else - { - putreg32(0x1, SAM_IPC0_ISCR); - } - - return OK; -} - -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - int cpu; - - for (; cpuset != 0; cpuset &= ~(1 << cpu)) - { - cpu = ffs(cpuset) - 1; - up_cpu_pause_async(cpu); - } -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped/ - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - DPRINTF("cpu=%d\n", cpu); - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the - * interrupt handler from returning until up_cpu_resume() is called; - * g_cpu_paused is a handshake that will prefent this function from - * returning until the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - /* Execute Pause IRQ to CPU(cpu) */ - - /* Set IPC Interrupt (IRQ0) (write-only) */ - - up_cpu_pause_async(cpu); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - DPRINTF("cpu=%d\n", cpu); - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/sam34/sam4cm_cpustart.c b/arch/arm/src/sam34/sam4cm_cpustart.c index 4e0ffc30206cc..70357960c1fab 100644 --- a/arch/arm/src/sam34/sam4cm_cpustart.c +++ b/arch/arm/src/sam34/sam4cm_cpustart.c @@ -64,7 +64,7 @@ ****************************************************************************/ volatile static spinlock_t g_cpu1_boot; -extern int arm_pause_handler(int irq, void *c, void *arg); +extern int sam4cm_smp_call_handler(int irq, void *c, void *arg); /**************************************************************************** * Public Functions @@ -108,8 +108,8 @@ static void cpu1_boot(void) /* Enable : write-only */ putreg32(0x1, SAM_IPC1_IECR); - irq_attach(SAM_IRQ_IPC1, arm_pause_handler, NULL); - up_enable_irq(SAM_IRQ_IPC1); + irq_attach(SAM_IRQ_SMP_CALL1, sam4cm_smp_call_handler, NULL); + up_enable_irq(SAM_IRQ_SMP_CALL1); } spin_unlock(&g_cpu1_boot); @@ -218,8 +218,8 @@ int up_cpu_start(int cpu) sam_ipc0_enableclk(); putreg32(0x1, SAM_IPC0_ICCR); /* clear : write-only */ putreg32(0x1, SAM_IPC0_IECR); /* enable : write-only */ - irq_attach(SAM_IRQ_IPC0, arm_pause_handler, NULL); - up_enable_irq(SAM_IRQ_IPC0); + irq_attach(SAM_IRQ_SMP_CALL0, sam4cm_smp_call_handler, NULL); + up_enable_irq(SAM_IRQ_SMP_CALL0); spin_lock(&g_cpu1_boot); diff --git a/arch/arm/src/sam34/sam4cm_smpcall.c b/arch/arm/src/sam34/sam4cm_smpcall.c new file mode 100644 index 0000000000000..ff92026230f71 --- /dev/null +++ b/arch/arm/src/sam34/sam4cm_smpcall.c @@ -0,0 +1,157 @@ +/**************************************************************************** + * arch/arm/src/sam34/sam4cm_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sched/sched.h" +#include "arm_internal.h" +#include "hardware/sam4cm_ipc.h" + +#ifdef CONFIG_SMP + +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#if 0 +# define DPRINTF(fmt, args...) _err(fmt, ##args) +#else +# define DPRINTF(fmt, args...) do {} while (0) +#endif + +/**************************************************************************** + * Private Data + ****************************************************************************/ + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: sam4cm_smp_call_handler + * + * Description: + * This is the handler for SMP_CALL. + * + ****************************************************************************/ + +int sam4cm_smp_call_handler(int irq, void *c, void *arg) +{ + int cpu = this_cpu(); + + nxsched_smp_call_handler(irq, c, arg); + + /* Clear : Pause IRQ */ + + /* IPC Interrupt Clear Command Register (write-only) */ + + if (1 == cpu) + { + DPRINTF("CPU0 -> CPU1\n"); + putreg32(0x1, SAM_IPC1_ICCR); + } + else + { + DPRINTF("CPU1 -> CPU0\n"); + putreg32(0x1, SAM_IPC0_ICCR); + } + + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + /* Execute Pause IRQ to CPU(cpu) */ + + /* Set IPC Interrupt (IRQ0) (write-only) */ + + if (cpu == 1) + { + putreg32(0x1, SAM_IPC1_ISCR); + } + else + { + putreg32(0x1, SAM_IPC0_ISCR); + } + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + int cpu; + + for (; cpuset != 0; cpuset &= ~(1 << cpu)) + { + cpu = ffs(cpuset) - 1; + up_send_smp_sched(cpu); + } +} + +#endif /* CONFIG_SMP */ diff --git a/arch/arm/src/tlsr82/tc32/tc32_schedulesigaction.c b/arch/arm/src/tlsr82/tc32/tc32_schedulesigaction.c index 4e90afee3a804..cf2176e3ad46b 100644 --- a/arch/arm/src/tlsr82/tc32/tc32_schedulesigaction.c +++ b/arch/arm/src/tlsr82/tc32/tc32_schedulesigaction.c @@ -75,67 +75,58 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; + /* In this case just deliver the signal now. */ - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. - */ - - sinfo("rtcb=%p current_regs=%p\n", this_task(), - this_task()->xcp.regs); - - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + else + { + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. */ - else - { - /* Save the return lr and cpsr and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. - */ - - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *)((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *)((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled - */ + /* Then set up to vector to the trampoline with interrupts + * disabled + */ - tcb->xcp.regs[REG_LR] = (uint32_t)arm_sigdeliver; - tcb->xcp.regs[REG_CPSR] = PSR_MODE_SVC | PSR_I_BIT; - tcb->xcp.regs[REG_IRQ_EN] = 0; - } + tcb->xcp.regs[REG_LR] = (uint32_t)arm_sigdeliver; + tcb->xcp.regs[REG_CPSR] = PSR_MODE_SVC | PSR_I_BIT; + tcb->xcp.regs[REG_IRQ_EN] = 0; } } diff --git a/arch/arm64/include/irq.h b/arch/arm64/include/irq.h index 19ed567ad18e6..2f721ad2e0e3b 100644 --- a/arch/arm64/include/irq.h +++ b/arch/arm64/include/irq.h @@ -242,12 +242,6 @@ extern "C" struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - #ifdef CONFIG_BUILD_KERNEL /* This is the saved address to use when returning from a user-space * signal handler. diff --git a/arch/arm64/src/common/CMakeLists.txt b/arch/arm64/src/common/CMakeLists.txt index 6f8877a37a566..03da77a59da09 100644 --- a/arch/arm64/src/common/CMakeLists.txt +++ b/arch/arm64/src/common/CMakeLists.txt @@ -71,7 +71,7 @@ endif() if(CONFIG_SMP) list(APPEND SRCS arm64_cpuidlestack.c arm64_cpustart.c) - list(APPEND SRCS arm64_cpupause.c) + list(APPEND SRCS arm64_smpcall.c) endif() if(CONFIG_BUILD_KERNEL) diff --git a/arch/arm64/src/common/Make.defs b/arch/arm64/src/common/Make.defs index 0c9bf06db0d36..4a0470f0cb3ee 100644 --- a/arch/arm64/src/common/Make.defs +++ b/arch/arm64/src/common/Make.defs @@ -84,7 +84,7 @@ endif ifeq ($(CONFIG_SMP),y) CMN_CSRCS += arm64_cpuidlestack.c arm64_cpustart.c -CMN_CSRCS += arm64_cpupause.c +CMN_CSRCS += arm64_smpcall.c endif ifeq ($(CONFIG_BUILD_KERNEL),y) diff --git a/arch/arm64/src/common/arm64_arch.h b/arch/arm64/src/common/arm64_arch.h index 06935472fb595..d2310128e8368 100644 --- a/arch/arm64/src/common/arm64_arch.h +++ b/arch/arm64/src/common/arm64_arch.h @@ -299,6 +299,9 @@ struct regs_context uint64_t spsr; uint64_t sp_el0; uint64_t exe_depth; +#ifdef CONFIG_ARCH_FPU + struct fpu_reg fpu_regs; +#endif }; /**************************************************************************** @@ -391,6 +394,26 @@ static inline void arch_nop(void) __asm__ volatile ("nop"); } +/**************************************************************************** + * Name: + * arm64_current_el() + * + * Description: + * + * Get current execution level + * + ****************************************************************************/ + +#define arm64_current_el() \ + ({ \ + uint64_t __el; \ + int __ret; \ + __asm__ volatile ("mrs %0, CurrentEL" \ + : "=r" (__el)); \ + __ret = GET_EL(__el); \ + __ret; \ + }) + /**************************************************************************** * Name: * read_/write_/zero_ sysreg diff --git a/arch/arm64/src/common/arm64_cpupause.c b/arch/arm64/src/common/arm64_cpupause.c deleted file mode 100644 index da81259d8d558..0000000000000 --- a/arch/arm64/src/common/arm64_cpupause.c +++ /dev/null @@ -1,430 +0,0 @@ -/**************************************************************************** - * arch/arm64/src/common/arm64_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include -#include - -#include -#include -#include -#include - -#include "arm64_internal.h" -#include "arm64_gic.h" -#include "sched/sched.h" - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Release the g_cpu_paused spinlock to synchronize with the - * requesting CPU. - */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - /* Wait for the spinlock to be released. The requesting CPU will release - * the spinlock when the CPU is resumed. - */ - - spin_lock(&g_cpu_wait[cpu]); - - /* This CPU has been resumed. Restore the exception context of the TCB at - * the (new) head of the assigned task list. - */ - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: arm64_pause_async_handler - * - * Description: - * This is the handler for async pause. - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It porcess g_delivertasks - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int arm64_pause_async_handler(int irq, void *context, void *arg) -{ - int cpu = this_cpu(); - - nxsched_process_delivered(cpu); - - return OK; -} - -/**************************************************************************** - * Name: arm64_pause_handler - * - * Description: - * This is the handler for SGI2. It performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int arm64_pause_handler(int irq, void *context, void *arg) -{ - int cpu = this_cpu(); - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused(). If the pause event has already - * been processed then g_cpu_paused[cpu] will not be locked. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - /* Execute SGI2 */ - - arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE_ASYNC, (1 << cpu)); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2 - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} diff --git a/arch/arm64/src/common/arm64_exit.c b/arch/arm64/src/common/arm64_exit.c index 0052adac94a74..d8e6bc4596f7b 100644 --- a/arch/arm64/src/common/arm64_exit.c +++ b/arch/arm64/src/common/arm64_exit.c @@ -67,9 +67,6 @@ void up_exit(int status) enter_critical_section(); /* Destroy the task at the head of the ready to run list. */ -#ifdef CONFIG_ARCH_FPU - arm64_destory_fpu(tcb); -#endif nxtask_exit(); diff --git a/arch/arm64/src/common/arm64_fatal.c b/arch/arm64/src/common/arm64_fatal.c index 3486fe1d5d18d..5c1b5241904c3 100644 --- a/arch/arm64/src/common/arm64_fatal.c +++ b/arch/arm64/src/common/arm64_fatal.c @@ -39,6 +39,7 @@ #include #include "sched/sched.h" #include "irq/irq.h" + #include "arm64_arch.h" #include "arm64_internal.h" #include "arm64_fatal.h" @@ -51,326 +52,527 @@ #endif /**************************************************************************** - * Private Functions + * Private Type Declarations ****************************************************************************/ +struct fatal_handle_info +{ + fatal_handle_func_t handle_fn; + const char *name; +}; + /**************************************************************************** - * Name: print_ec_cause + * Private Functions Declarations ****************************************************************************/ +/* Default callback handler for debug and fatal event + * Can be override by other handler + */ + +static int default_debug_handler(struct regs_context *regs, + uint64_t far, uint64_t esr); +static int default_fatal_handler(struct regs_context *regs, + uint64_t far, uint64_t esr); + +/**************************************************************************** + * Private Data + ****************************************************************************/ + +static const char *g_esr_class_str[] = +{ + [0 ... ESR_ELX_EC_MAX] = "UNRECOGNIZED EC", + [ESR_ELX_EC_UNKNOWN] = "Unknown/Uncategorized", + [ESR_ELX_EC_WFX] = "WFI/WFE", + [ESR_ELX_EC_CP15_32] = "CP15 MCR/MRC", + [ESR_ELX_EC_CP15_64] = "CP15 MCRR/MRRC", + [ESR_ELX_EC_CP14_MR] = "CP14 MCR/MRC", + [ESR_ELX_EC_CP14_LS] = "CP14 LDC/STC", + [ESR_ELX_EC_FP_ASIMD] = "ASIMD", + [ESR_ELX_EC_CP10_ID] = "CP10 MRC/VMRS", + [ESR_ELX_EC_PAC] = "PAC", + [ESR_ELX_EC_CP14_64] = "CP14 MCRR/MRRC", + [ESR_ELX_EC_BTI] = "BTI", + [ESR_ELX_EC_ILL] = "PSTATE.IL", + [ESR_ELX_EC_SVC32] = "SVC (AArch32)", + [ESR_ELX_EC_HVC32] = "HVC (AArch32)", + [ESR_ELX_EC_SMC32] = "SMC (AArch32)", + [ESR_ELX_EC_SVC64] = "SVC (AArch64)", + [ESR_ELX_EC_HVC64] = "HVC (AArch64)", + [ESR_ELX_EC_SMC64] = "SMC (AArch64)", + [ESR_ELX_EC_SYS64] = "MSR/MRS (AArch64)", + [ESR_ELX_EC_SVE] = "SVE", + [ESR_ELX_EC_ERET] = "ERET/ERETAA/ERETAB", + [ESR_ELX_EC_FPAC] = "FPAC", + [ESR_ELX_EC_SME] = "SME", + [ESR_ELX_EC_IMP_DEF] = "EL3 IMP DEF", + [ESR_ELX_EC_IABT_LOW] = "IABT (lower EL)", + [ESR_ELX_EC_IABT_CUR] = "IABT (current EL)", + [ESR_ELX_EC_PC_ALIGN] = "PC Alignment", + [ESR_ELX_EC_DABT_LOW] = "DABT (lower EL)", + [ESR_ELX_EC_DABT_CUR] = "DABT (current EL)", + [ESR_ELX_EC_SP_ALIGN] = "SP Alignment", + [ESR_ELX_EC_MOPS] = "MOPS", + [ESR_ELX_EC_FP_EXC32] = "FP (AArch32)", + [ESR_ELX_EC_FP_EXC64] = "FP (AArch64)", + [ESR_ELX_EC_SERROR] = "SError", + [ESR_ELX_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", + [ESR_ELX_EC_BREAKPT_CUR] = "Breakpoint (current EL)", + [ESR_ELX_EC_SOFTSTP_LOW] = "Software Step (lower EL)", + [ESR_ELX_EC_SOFTSTP_CUR] = "Software Step (current EL)", + [ESR_ELX_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", + [ESR_ELX_EC_WATCHPT_CUR] = "Watchpoint (current EL)", + [ESR_ELX_EC_BKPT32] = "BKPT (AArch32)", + [ESR_ELX_EC_VECTOR32] = "Vector catch (AArch32)", + [ESR_ELX_EC_BRK64] = "BRK (AArch64)", +}; + +static const char *g_esr_desc_str[] = +{ + [0 ... ESR_ELX_EC_MAX] = "UNRECOGNIZED EC", + [ESR_ELX_EC_UNKNOWN] = "Unknown/Uncategorized", + [ESR_ELX_EC_WFX] = "Trapped WFI or WFE instruction execution", + [ESR_ELX_EC_CP15_32] = "Trapped MCR or MRC access with" + "(coproc==0b1111) " + "that is not reported using EC 0b000000", + [ESR_ELX_EC_CP15_64] = "Trapped MCRR or MRRC access with" + "(coproc==0b1111) " + "that is not reported using EC 0b000000", + [ESR_ELX_EC_CP14_MR] = "Trapped MCR or MRC access with (coproc==0b1110)", + [ESR_ELX_EC_CP14_LS] = "Trapped LDC or STC access", + [ESR_ELX_EC_FP_ASIMD] = "Trapped access to SVE, Advanced SIMD, or " + "floating-point functionality", + [ESR_ELX_EC_CP10_ID] = "CP10 MRC/VMRS", + [ESR_ELX_EC_PAC] = "PAC", + [ESR_ELX_EC_CP14_64] = "Trapped MRRC access with (coproc==0b1110)", + [ESR_ELX_EC_BTI] = "Branch Target Exception", + [ESR_ELX_EC_ILL] = "Illegal Execution state", + [ESR_ELX_EC_SVC32] = "SVC instruction execution in AArch32 state", + [ESR_ELX_EC_HVC32] = "HVC (AArch32)", + [ESR_ELX_EC_SMC32] = "SMC (AArch32)", + [ESR_ELX_EC_SVC64] = "SVC (AArch64)", + [ESR_ELX_EC_HVC64] = "HVC (AArch64)", + [ESR_ELX_EC_SMC64] = "SMC (AArch64)", + [ESR_ELX_EC_SYS64] = "Trapped MSR, MRS or System instruction " + "execution in AArch64 state, that is not " + "reported using EC 0b000000, 0b000001 or 0b000111", + [ESR_ELX_EC_SVE] = "Trapped access to SVE functionality", + [ESR_ELX_EC_ERET] = "ERET/ERETAA/ERETAB", + [ESR_ELX_EC_FPAC] = "Exception from a Pointer Authentication " + "instruction authentication failure", + [ESR_ELX_EC_SME] = "SME", + [ESR_ELX_EC_IMP_DEF] = "EL3 IMP DEF", + [ESR_ELX_EC_IABT_LOW] = "Instruction Abort from a lower Exception level, " + "that might be using AArch32 or AArch64", + [ESR_ELX_EC_IABT_CUR] = "Instruction Abort taken without a change " + "in Exception level", + [ESR_ELX_EC_PC_ALIGN] = "PC alignment fault exception.", + [ESR_ELX_EC_DABT_LOW] = "Data Abort from a lower Exception level, " + "that might be using AArch32 or AArch64", + [ESR_ELX_EC_DABT_CUR] = "Data Abort taken without a change in " + "Exception level", + [ESR_ELX_EC_SP_ALIGN] = "SP alignment fault exception", + [ESR_ELX_EC_MOPS] = "MOPS", + [ESR_ELX_EC_FP_EXC32] = "Trapped floating-point exception taken from " + "AArch32 state", + [ESR_ELX_EC_FP_EXC64] = "Trapped floating-point exception taken from " + "AArch64 state", + [ESR_ELX_EC_SERROR] = "SError interrupt", + [ESR_ELX_EC_BREAKPT_LOW] = "Breakpoint exception from a lower " + "Exception level, " + "that might be using AArch32 or AArch64", + [ESR_ELX_EC_BREAKPT_CUR] = "Breakpoint exception taken without a change " + "in Exception level", + [ESR_ELX_EC_SOFTSTP_LOW] = "Software Step exception from a lower " + "Exception level," + "that might be using AArch32 or AArch64", + [ESR_ELX_EC_SOFTSTP_CUR] = "Software Step exception taken without a " + "change in Exception level", + [ESR_ELX_EC_WATCHPT_LOW] = "Watchpoint exception from a lower " + "Exception level, " + "that might be using AArch32 or AArch64", + [ESR_ELX_EC_WATCHPT_CUR] = "Watchpoint exception taken without " + "a change in Exception level.", + [ESR_ELX_EC_BKPT32] = "BKPT instruction execution in AArch32 state", + [ESR_ELX_EC_VECTOR32] = "Vector catch (AArch32)", + [ESR_ELX_EC_BRK64] = "BRK instruction execution in AArch64 state.", +}; + +static struct fatal_handle_info g_fatal_handler[] = +{ + { default_fatal_handler, "ttbr address size fault" }, + { default_fatal_handler, "level 1 address size fault" }, + { default_fatal_handler, "level 2 address size fault" }, + { default_fatal_handler, "level 3 address size fault" }, + { default_fatal_handler, "level 0 translation fault" }, + { default_fatal_handler, "level 1 translation fault" }, + { default_fatal_handler, "level 2 translation fault" }, + { default_fatal_handler, "level 3 translation fault" }, + { default_fatal_handler, "unknown 8" }, + { default_fatal_handler, "level 1 access flag fault" }, + { default_fatal_handler, "level 2 access flag fault" }, + { default_fatal_handler, "level 3 access flag fault" }, + { default_fatal_handler, "unknown 12" }, + { default_fatal_handler, "level 1 permission fault" }, + { default_fatal_handler, "level 2 permission fault" }, + { default_fatal_handler, "level 3 permission fault" }, + { default_fatal_handler, "synchronous external abort" }, + { default_fatal_handler, "synchronous tag check fault" }, + { default_fatal_handler, "unknown 18" }, + { default_fatal_handler, "unknown 19" }, + { default_fatal_handler, "level 0 (translation table walk)" }, + { default_fatal_handler, "level 1 (translation table walk)" }, + { default_fatal_handler, "level 2 (translation table walk)" }, + { default_fatal_handler, "level 3 (translation table walk)" }, + { default_fatal_handler, "synchronous parity or ECC error" }, + { default_fatal_handler, "unknown 25" }, + { default_fatal_handler, "unknown 26" }, + { default_fatal_handler, "unknown 27" }, + { default_fatal_handler, "level 0 synchronous parity " + "error (translation table walk)" }, + { default_fatal_handler, "level 1 synchronous parity " + "error (translation table walk)" }, + { default_fatal_handler, "level 2 synchronous parity " + "error (translation table walk)" }, + { default_fatal_handler, "level 3 synchronous parity " + "error (translation table walk)" }, + { default_fatal_handler, "unknown 32" }, + { default_fatal_handler, "alignment fault" }, + { default_fatal_handler, "unknown 34" }, + { default_fatal_handler, "unknown 35" }, + { default_fatal_handler, "unknown 36" }, + { default_fatal_handler, "unknown 37" }, + { default_fatal_handler, "unknown 38" }, + { default_fatal_handler, "unknown 39" }, + { default_fatal_handler, "unknown 40" }, + { default_fatal_handler, "unknown 41" }, + { default_fatal_handler, "unknown 42" }, + { default_fatal_handler, "unknown 43" }, + { default_fatal_handler, "unknown 44" }, + { default_fatal_handler, "unknown 45" }, + { default_fatal_handler, "unknown 46" }, + { default_fatal_handler, "unknown 47" }, + { default_fatal_handler, "TLB conflict abort" }, + { default_fatal_handler, "Unsupported atomic hardware update fault" }, + { default_fatal_handler, "unknown 50" }, + { default_fatal_handler, "unknown 51" }, + { default_fatal_handler, "implementation fault (lockdown abort)" }, + { default_fatal_handler, "implementation fault (unsupported exclusive)" }, + { default_fatal_handler, "unknown 54" }, + { default_fatal_handler, "unknown 55" }, + { default_fatal_handler, "unknown 56" }, + { default_fatal_handler, "unknown 57" }, + { default_fatal_handler, "unknown 58" }, + { default_fatal_handler, "unknown 59" }, + { default_fatal_handler, "unknown 60" }, + { default_fatal_handler, "section domain fault" }, + { default_fatal_handler, "page domain fault" }, + { default_fatal_handler, "unknown 63" }, +}; + +static struct fatal_handle_info g_debug_handler[] = +{ + { default_debug_handler, "hardware breakpoint" }, + { default_debug_handler, "hardware single-step" }, + { default_debug_handler, "hardware watchpoint" }, + { default_debug_handler, "unknown 3" }, + { default_debug_handler, "aarch32 BKPT" }, + { default_debug_handler, "aarch32 vector catch" }, + { default_debug_handler, "aarch64 BRK" }, + { default_debug_handler, "unknown 7" }, +}; + +/**************************************************************************** + * Private Functions + ****************************************************************************/ + +static const char *esr_get_class_string(uint64_t esr) +{ + uint32_t ec = ESR_ELX_EC(esr); + + return g_esr_class_str[ec]; +} + +static const char *esr_get_desc_string(uint64_t esr) +{ + uint32_t ec = ESR_ELX_EC(esr); + + return g_esr_desc_str[ec]; +} + static void print_ec_cause(uint64_t esr) { - uint32_t ec = (uint32_t)esr >> 26; + sinfo("%s\n", esr_get_class_string(esr)); + sinfo("%s\n", esr_get_desc_string(esr)); +} - switch (ec) - { - case 0b000000: - { - sinfo("Unknown reason\n"); - break; - } +static int default_fatal_handler(struct regs_context *regs, + uint64_t far, uint64_t esr) +{ + struct fatal_handle_info *inf = g_fatal_handler + (esr & ESR_ELX_FSC); - case 0b000001: - { - sinfo("Trapped WFI or WFE instruction execution\n"); - break; - } + /* Data Fault Status Code. */ - case 0b000011: - { - sinfo( - "Trapped MCR or MRC access with (coproc==0b1111) that " - "is not reported using EC 0b000000\n"); - break; - } + sinfo("(IFSC/DFSC) for Data/Instruction aborts: %s\n", inf->name); - case 0b000100: - { - sinfo( - "Trapped MCRR or MRRC access with (coproc==0b1111) " - "that is not reported using EC 0b000000\n"); - break; - } + return -EINVAL; /* "fault" */ +} - case 0b000101: - { - sinfo("Trapped MCR or MRC access with (coproc==0b1110)\n"); - break; - } +static int default_debug_handler(struct regs_context *regs, + uint64_t far, uint64_t esr) +{ + struct fatal_handle_info *inf = g_debug_handler + DBG_ESR_EVT(esr); - case 0b000110: - { - sinfo("Trapped LDC or STC access\n"); - break; - } + sinfo("Default Debug Handler: %s\n", inf->name); + return -1; /* "fault" */ +} - case 0b000111: - { - sinfo( - "Trapped access to SVE, Advanced SIMD, or " - "floating-point functionality\n"); - break; - } +static int arm64_el1_abort(struct regs_context *regs, uint64_t esr) +{ + uint64_t far = read_sysreg(far_el1); + struct fatal_handle_info *inf = g_fatal_handler + (esr & ESR_ELX_FSC); - case 0b001100: - { - sinfo("Trapped MRRC access with (coproc==0b1110)\n"); - break; - } + return inf->handle_fn(regs, far, esr); +} - case 0b001101: - { - sinfo("Branch Target Exception\n"); - break; - } +static int arm64_el1_pc(struct regs_context *regs, uint64_t esr) +{ + uint64_t far = read_sysreg(far_el1); - case 0b001110: - { - sinfo("Illegal Execution state\n"); - break; - } + sinfo("SP/PC alignment exception at 0x%" PRIx64 "\n", far); + return -EINVAL; /* "fault" */ +} - case 0b010001: - { - sinfo("SVC instruction execution in AArch32 state\n"); - break; - } +static int arm64_el1_bti(struct regs_context *regs, uint64_t esr) +{ + uint64_t far = read_sysreg(far_el1); - case 0b011000: - { - sinfo( - "Trapped MSR, MRS or System instruction execution in " - "AArch64 state, that is not reported using EC " - "0b000000, 0b000001 or 0b000111\n"); - break; - } + sinfo("BTI exception at 0x%" PRIx64 "\n", far); + return -EINVAL; /* "fault" */ +} - case 0b011001: - { - sinfo("Trapped access to SVE functionality\n"); - break; - } +static int arm64_el1_undef(struct regs_context *regs, uint64_t esr) +{ + uint32_t insn; + + sinfo("Undefined instruction at 0x%" PRIx64 ", dump:\n", regs->elr); + memcpy(&insn, (void *)(regs->elr - 8), 4); + sinfo("0x%" PRIx64 " : 0x%" PRIx32 "\n", regs->elr - 8, insn); + memcpy(&insn, (void *)(regs->elr - 4), 4); + sinfo("0x%" PRIx64 " : 0x%" PRIx32 "\n", regs->elr - 4, insn); + memcpy(&insn, (void *)(regs->elr), 4); + sinfo("0x%" PRIx64 " : 0x%" PRIx32 "\n", regs->elr, insn); + memcpy(&insn, (void *)(regs->elr + 4), 4); + sinfo("0x%" PRIx64 " : 0x%" PRIx32 "\n", regs->elr + 4, insn); + memcpy(&insn, (void *)(regs->elr + 8), 4); + sinfo("0x%" PRIx64 " : 0x%" PRIx32 "\n", regs->elr + 8, insn); + + return -1; +} - case 0b100000: - { - sinfo( - "Instruction Abort from a lower Exception level, that " - "might be using AArch32 or AArch64\n"); - break; - } +static int arm64_el1_fpac(struct regs_context *regs, uint64_t esr) +{ + uint64_t far = read_sysreg(far_el1); - case 0b100001: - { - sinfo( - "Instruction Abort taken without a change " - "in Exception level.\n"); - break; - } + /* Unexpected FPAC exception in the kernel. */ - case 0b100010: - { - sinfo("PC alignment fault exception.\n"); - break; - } + sinfo("Unexpected FPAC exception at 0x%" PRIx64 "\n", far); + return -EINVAL; +} - case 0b100100: - { - sinfo( - "Data Abort from a lower Exception level, that might " - "be using AArch32 or AArch64\n"); - break; - } +static int arm64_el1_dbg(struct regs_context *regs, uint64_t esr) +{ + uint64_t far = read_sysreg(far_el1); + struct fatal_handle_info *inf = g_debug_handler + DBG_ESR_EVT(esr); - case 0b100101: - { - sinfo("Data Abort taken without a change in Exception level\n"); - break; - } + return inf->handle_fn(regs, far, esr); +} - case 0b100110: - { - sinfo("SP alignment fault exception\n"); - break; - } +static int arm64_el1_exception_handler(uint64_t esr, + struct regs_context *regs) +{ + uint32_t ec = ESR_ELX_EC(esr); + int ret; - case 0b101000: - { - sinfo( - "Trapped floating-point exception " - "taken from AArch32 state\n"); - break; - } + switch (ec) + { + /* Data/Instruction Abort at EL1 */ - case 0b101100: + case ESR_ELX_EC_DABT_CUR: + case ESR_ELX_EC_IABT_CUR: { - sinfo( - "Trapped floating-point exception " - "taken from AArch64 state.\n"); + ret = arm64_el1_abort(regs, esr); break; } - case 0b101111: - { - sinfo("SError interrupt\n"); - break; - } + /* PC alignment fault exception. */ - case 0b110000: + case ESR_ELX_EC_PC_ALIGN: { - sinfo( - "Breakpoint exception from a lower Exception level, " - "that might be using AArch32 or AArch64\n"); + ret = arm64_el1_pc(regs, esr); break; } - case 0b110001: - { - sinfo( - "Breakpoint exception taken without a change in " - "Exception level\n"); - break; - } + /* Trapped MSR, MRS or System instruction execution + * in AArch64 state + */ - case 0b110010: + case ESR_ELX_EC_SYS64: + case ESR_ELX_EC_UNKNOWN: { - sinfo( - "Software Step exception from a lower Exception level, " - "that might be using AArch32 or AArch64\n"); + ret = arm64_el1_undef(regs, esr); break; } - case 0b110011: - { - sinfo( - "Software Step exception taken without a change in " - "Exception level\n"); - break; - } + /* Branch Target Exception */ - case 0b110100: + case ESR_ELX_EC_BTI: { - sinfo( - "Watchpoint exception from a lower Exception level, " - "that might be using AArch32 or AArch64\n"); + ret = arm64_el1_bti(regs, esr); break; } - case 0b110101: + case ESR_ELX_EC_BREAKPT_CUR: + + /* Breakpoint exception taken in current Exception level */ + + case ESR_ELX_EC_SOFTSTP_CUR: + + /* Software Step exception taken in current Exception level */ + + case ESR_ELX_EC_WATCHPT_CUR: + + /* Watchpoint exception taken in current Exception level */ + + case ESR_ELX_EC_BRK64: { - sinfo( - "Watchpoint exception taken without a change in " - "Exception level.\n"); + /* BRK instruction execution in AArch64 state */ + + ret = arm64_el1_dbg(regs, esr); break; } - case 0b111000: + case ESR_ELX_EC_FPAC: { - sinfo("BKPT instruction execution in AArch32 state\n"); + /* Exception from a Pointer Authentication + * instruction authentication failure + */ + + ret = arm64_el1_fpac(regs, esr); break; } - case 0b111100: + default: { - sinfo("BRK instruction execution in AArch64 state.\n"); - break; + sinfo("64-bit el1h sync, esr = 0x%x", ec); + ret = -EINVAL; } + } - default: - break; - } + return ret; } -/**************************************************************************** - * Public Functions - ****************************************************************************/ +static int arm64_exception_handler(struct regs_context *regs) +{ + uint64_t el; + uint64_t esr; + uint64_t elr; + uint64_t far; + const char *el_str; + int ret = -EINVAL; + + el = arm64_current_el(); + + switch (el) + { + case MODE_EL1: + { + el_str = "MODE_EL1"; + esr = read_sysreg(esr_el1); + far = read_sysreg(far_el1); + elr = read_sysreg(elr_el1); + ret = arm64_el1_exception_handler(esr, regs); + break; + } -/**************************************************************************** - * Name: up_mdelay - ****************************************************************************/ + case MODE_EL2: + { + el_str = "MODE_EL2"; + esr = read_sysreg(esr_el2); + far = read_sysreg(far_el2); + elr = read_sysreg(elr_el2); + break; + } -void up_mdelay(unsigned int milliseconds) -{ - volatile unsigned int i; - volatile unsigned int j; +#ifdef CONFIG_ARCH_HAVE_EL3 + case MODE_EL3: + { + el_str = "MODE_EL3"; + esr = read_sysreg(esr_el3); + far = read_sysreg(far_el3); + elr = read_sysreg(elr_el3); + break; + } - for (i = 0; i < milliseconds; i++) +#endif + default: { - for (j = 0; j < CONFIG_BOARD_LOOPSPERMSEC; j++) - { - } + el_str = "Unknown"; + + /* Just to keep the compiler happy */ + + esr = elr = far = 0; + break; + } + } + + if (ret != 0) + { + sinfo("CurrentEL: %s\n", el_str); + sinfo("ESR_ELn: 0x%" PRIx64 "\n", esr); + sinfo("FAR_ELn: 0x%" PRIx64 "\n", far); + sinfo("ELR_ELn: 0x%" PRIx64 "\n", elr); + + print_ec_cause(esr); } + + return ret; } /**************************************************************************** - * Name: arm64_fatal_error - * - * Description: - * + * Public Functions ****************************************************************************/ -void arm64_fatal_error(unsigned int reason, struct regs_context * reg) +void arm64_fatal_handler(struct regs_context *regs) { - uint64_t el, esr, elr, far; + int ret; - sinfo("reason = %d\n", reason); + /* Nested exception are not supported */ - up_set_current_regs((uint64_t *)reg); + DEBUGASSERT(up_current_regs() == NULL); - if (reason != K_ERR_SPURIOUS_IRQ) - { - __asm__ volatile ("mrs %0, CurrentEL" : "=r" (el)); + up_set_current_regs((uint64_t *)regs); - switch (GET_EL(el)) - { - case MODE_EL1: - { - sinfo("CurrentEL: MODE_EL1\n"); - __asm__ volatile ("mrs %0, esr_el1" : "=r" (esr)); - __asm__ volatile ("mrs %0, far_el1" : "=r" (far)); - __asm__ volatile ("mrs %0, elr_el1" : "=r" (elr)); - break; - } - - case MODE_EL2: - { - sinfo("CurrentEL: MODE_EL2\n"); - __asm__ volatile ("mrs %0, esr_el2" : "=r" (esr)); - __asm__ volatile ("mrs %0, far_el2" : "=r" (far)); - __asm__ volatile ("mrs %0, elr_el2" : "=r" (elr)); - break; - } + ret = arm64_exception_handler(regs); -#ifdef CONFIG_ARCH_HAVE_EL3 - case MODE_EL3: - { - sinfo("CurrentEL: MODE_EL3\n"); - __asm__ volatile ("mrs %0, esr_el3" : "=r" (esr)); - __asm__ volatile ("mrs %0, far_el3" : "=r" (far)); - __asm__ volatile ("mrs %0, elr_el3" : "=r" (elr)); - break; - } -#endif + if (ret != 0) + { + /* The fatal is not handled, print error and hung */ - default: - { - sinfo("CurrentEL: unknown\n"); + PANIC_WITH_REGS("panic", regs); + } - /* Just to keep the compiler happy */ + /* Set CURRENT_REGS to NULL to indicate that we are no longer in an + * Exception handler. + */ - esr = elr = far = 0; - break; - } - } + up_set_current_regs(NULL); +} - if (GET_EL(el) != MODE_EL0) - { - sinfo("ESR_ELn: 0x%"PRIx64"\n", esr); - sinfo("FAR_ELn: 0x%"PRIx64"\n", far); - sinfo("ELR_ELn: 0x%"PRIx64"\n", elr); +void arm64_register_debug_hook(int nr, fatal_handle_func_t fn) +{ + DEBUGVERIFY(nr > 0 && nr <= nitems(g_debug_handler)); - print_ec_cause(esr); - } - } + /* Override the default handler */ - PANIC_WITH_REGS("panic", reg); + g_debug_handler[nr].handle_fn = fn; } diff --git a/arch/arm64/src/common/arm64_fatal.h b/arch/arm64/src/common/arm64_fatal.h index 360a28e8fa975..301e158f00011 100644 --- a/arch/arm64/src/common/arm64_fatal.h +++ b/arch/arm64/src/common/arm64_fatal.h @@ -21,12 +21,6 @@ #ifndef __ARCH_ARM64_SRC_COMMON_ARM64_FATAL_H #define __ARCH_ARM64_SRC_COMMON_ARM64_FATAL_H -/* Fatal error APIs */ - -#define K_ERR_CPU_EXCEPTION (0) -#define K_ERR_CPU_MODE32 (1) -#define K_ERR_SPURIOUS_IRQ (2) - #ifndef __ASSEMBLY__ /**************************************************************************** @@ -43,6 +37,123 @@ * Pre-processor Definitions ****************************************************************************/ +#define ESR_ELX_EC_UNKNOWN (0x00) +#define ESR_ELX_EC_WFX (0x01) + +/* Unallocated EC: 0x02 */ + +#define ESR_ELX_EC_CP15_32 (0x03) +#define ESR_ELX_EC_CP15_64 (0x04) +#define ESR_ELX_EC_CP14_MR (0x05) +#define ESR_ELX_EC_CP14_LS (0x06) +#define ESR_ELX_EC_FP_ASIMD (0x07) +#define ESR_ELX_EC_CP10_ID (0x08) /* EL2 only */ +#define ESR_ELX_EC_PAC (0x09) /* EL2 and above */ + +/* Unallocated EC: 0x0A - 0x0B */ + +#define ESR_ELX_EC_CP14_64 (0x0C) +#define ESR_ELX_EC_BTI (0x0D) +#define ESR_ELX_EC_ILL (0x0E) + +/* Unallocated EC: 0x0F - 0x10 */ + +#define ESR_ELX_EC_SVC32 (0x11) +#define ESR_ELX_EC_HVC32 (0x12) /* EL2 only */ +#define ESR_ELX_EC_SMC32 (0x13) /* EL2 and above */ + +/* Unallocated EC: 0x14 */ + +#define ESR_ELX_EC_SVC64 (0x15) +#define ESR_ELX_EC_HVC64 (0x16) /* EL2 and above */ +#define ESR_ELX_EC_SMC64 (0x17) /* EL2 and above */ +#define ESR_ELX_EC_SYS64 (0x18) +#define ESR_ELX_EC_SVE (0x19) +#define ESR_ELX_EC_ERET (0x1a) /* EL2 only */ + +/* Unallocated EC: 0x1B */ + +#define ESR_ELX_EC_FPAC (0x1C) /* EL1 and above */ +#define ESR_ELX_EC_SME (0x1D) + +/* Unallocated EC: 0x1D - 0x1E */ + +#define ESR_ELX_EC_IMP_DEF (0x1f) /* EL3 only */ +#define ESR_ELX_EC_IABT_LOW (0x20) +#define ESR_ELX_EC_IABT_CUR (0x21) +#define ESR_ELX_EC_PC_ALIGN (0x22) + +/* Unallocated EC: 0x23 */ + +#define ESR_ELX_EC_DABT_LOW (0x24) +#define ESR_ELX_EC_DABT_CUR (0x25) +#define ESR_ELX_EC_SP_ALIGN (0x26) +#define ESR_ELX_EC_MOPS (0x27) +#define ESR_ELX_EC_FP_EXC32 (0x28) + +/* Unallocated EC: 0x29 - 0x2B */ + +#define ESR_ELX_EC_FP_EXC64 (0x2C) + +/* Unallocated EC: 0x2D - 0x2E */ + +#define ESR_ELX_EC_SERROR (0x2F) +#define ESR_ELX_EC_BREAKPT_LOW (0x30) +#define ESR_ELX_EC_BREAKPT_CUR (0x31) +#define ESR_ELX_EC_SOFTSTP_LOW (0x32) +#define ESR_ELX_EC_SOFTSTP_CUR (0x33) +#define ESR_ELX_EC_WATCHPT_LOW (0x34) +#define ESR_ELX_EC_WATCHPT_CUR (0x35) + +/* Unallocated EC: 0x36 - 0x37 */ + +#define ESR_ELX_EC_BKPT32 (0x38) + +/* Unallocated EC: 0x39 */ + +#define ESR_ELX_EC_VECTOR32 (0x3A) /* EL2 only */ + +/* Unallocated EC: 0x3B */ + +#define ESR_ELX_EC_BRK64 (0x3C) + +/* Unallocated EC: 0x3D - 0x3F */ + +#define ESR_ELX_EC_MAX (0x3F) + +#define ESR_ELX_EC_SHIFT (26) +#define ESR_ELX_EC_WIDTH (6) +#define ESR_ELX_EC_MASK (0x3F << ESR_ELX_EC_SHIFT) +#define ESR_ELX_EC(esr) (((esr) & ESR_ELX_EC_MASK) \ + >> ESR_ELX_EC_SHIFT) + +/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */ + +#define ESR_ELX_FSC (0x3F) +#define ESR_ELX_FSC_TYPE (0x3C) +#define ESR_ELX_FSC_LEVEL (0x03) +#define ESR_ELX_FSC_EXTABT (0x10) +#define ESR_ELX_FSC_MTE (0x11) +#define ESR_ELX_FSC_SERROR (0x11) +#define ESR_ELX_FSC_ACCESS (0x08) +#define ESR_ELX_FSC_FAULT (0x04) +#define ESR_ELX_FSC_PERM (0x0C) +#define ESR_ELX_FSC_SEA_TTW0 (0x14) +#define ESR_ELX_FSC_SEA_TTW1 (0x15) +#define ESR_ELX_FSC_SEA_TTW2 (0x16) +#define ESR_ELX_FSC_SEA_TTW3 (0x17) +#define ESR_ELX_FSC_SECC (0x18) +#define ESR_ELX_FSC_SECC_TTW0 (0x1c) +#define ESR_ELX_FSC_SECC_TTW1 (0x1d) +#define ESR_ELX_FSC_SECC_TTW2 (0x1e) +#define ESR_ELX_FSC_SECC_TTW3 (0x1f) + +#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7) +#define DBG_ESR_EVT_HWBP (0x0) +#define DBG_ESR_EVT_HWSS (0x1) +#define DBG_ESR_EVT_HWWP (0x2) +#define DBG_ESR_EVT_BRK (0x6) + #define __builtin_unreachable() \ do \ { \ @@ -51,28 +162,49 @@ } while (true) /**************************************************************************** - * Public Data + * Public Type Declarations ****************************************************************************/ +typedef int (*fatal_handle_func_t)(struct regs_context *regs, + uint64_t far, uint64_t esr); + /**************************************************************************** * Public Function Prototypes ****************************************************************************/ /**************************************************************************** - * Name: arm64_fatal_error + * Name: arm64_fatal_handler * * Description: - * fatal error handle for arm64 + * Fatal handle for arm64 * Input Parameters: - * reason: error reason * reg: exception stack reg context * - * Returned Value: + * Returned Value: None + * If the function return, the exception has been handled + * + ****************************************************************************/ + +void arm64_fatal_handler(struct regs_context *reg); + +/**************************************************************************** + * Name: arm64_register_debug_hook + * + * Description: + * Register a hook function for DEBUG event + * Input Parameters: + * nr: DEBUG event + * DBG_ESR_EVT_HWBP : Hardware BreakPoint + * DBG_ESR_EVT_HWSS : Hardware SingleStep + * DBG_ESR_EVT_HWWP : Hardware WatchPoint + * DBG_ESR_EVT_BRK : Brk instruction trigger + * fn: hook function + * + * Returned Value: none * ****************************************************************************/ -void arm64_fatal_error(unsigned int reason, struct regs_context * reg); -void arm64_dump_fatal(struct regs_context * reg); +void arm64_register_debug_hook(int nr, fatal_handle_func_t fn); #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/src/common/arm64_fpu.c b/arch/arm64/src/common/arm64_fpu.c index c1accb48ebfa5..e80e7240aee29 100644 --- a/arch/arm64/src/common/arm64_fpu.c +++ b/arch/arm64/src/common/arm64_fpu.c @@ -77,11 +77,10 @@ struct arm64_fpu_procfs_file_s * Private Data ***************************************************************************/ -static struct fpu_reg g_idle_thread_fpu[CONFIG_SMP_NCPUS]; -static struct arm64_cpu_fpu_context g_cpu_fpu_ctx[CONFIG_SMP_NCPUS]; - #ifdef CONFIG_FS_PROCFS_REGISTER +static struct arm64_cpu_fpu_context g_cpu_fpu_ctx[CONFIG_SMP_NCPUS]; + /* procfs methods */ static int arm64_fpu_procfs_open(struct file *filep, const char *relpath, @@ -262,141 +261,6 @@ static int arm64_fpu_procfs_stat(const char *relpath, struct stat *buf) } #endif -/*************************************************************************** - * Public Functions - ***************************************************************************/ - -void arm64_init_fpu(struct tcb_s *tcb) -{ - if (tcb->pid < CONFIG_SMP_NCPUS) - { -#ifdef CONFIG_SMP - int cpu = tcb->cpu; -#else - int cpu = 0; -#endif - memset(&g_cpu_fpu_ctx[cpu], 0, - sizeof(struct arm64_cpu_fpu_context)); - g_cpu_fpu_ctx[cpu].idle_thread = tcb; - - tcb->xcp.fpu_regs = (uint64_t *)&g_idle_thread_fpu[cpu]; - } - - memset(tcb->xcp.fpu_regs, 0, sizeof(struct fpu_reg)); -} - -void arm64_destory_fpu(struct tcb_s *tcb) -{ - struct tcb_s *owner; - - /* save current fpu owner's context */ - - owner = g_cpu_fpu_ctx[this_cpu()].fpu_owner; - - if (owner == tcb) - { - g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL; - } -} - -/*************************************************************************** - * Name: arm64_fpu_enter_exception - * - * Description: - * called at every time get into a exception - * - ***************************************************************************/ - -void arm64_fpu_enter_exception(void) -{ -} - -void arm64_fpu_exit_exception(void) -{ -} - -void arm64_fpu_trap(struct regs_context *regs) -{ - struct tcb_s *owner; - - UNUSED(regs); - - /* disable fpu trap access */ - - arm64_fpu_access_trap_disable(); - - /* save current fpu owner's context */ - - owner = g_cpu_fpu_ctx[this_cpu()].fpu_owner; - - if (owner != NULL) - { - arm64_fpu_save((struct fpu_reg *)owner->xcp.fpu_regs); - ARM64_DSB(); - g_cpu_fpu_ctx[this_cpu()].save_count++; - g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL; - } - - if (arch_get_exception_depth() > 1) - { - /* if get_exception_depth > 1 - * it means FPU access exception occurred in exception context - * switch FPU owner to idle thread - */ - - owner = g_cpu_fpu_ctx[this_cpu()].idle_thread; - } - else - { - owner = running_task(); - } - - /* restore our context */ - - arm64_fpu_restore((struct fpu_reg *)owner->xcp.fpu_regs); - g_cpu_fpu_ctx[this_cpu()].restore_count++; - - /* become new owner */ - - g_cpu_fpu_ctx[this_cpu()].fpu_owner = owner; -} - -void arm64_fpu_context_restore(void) -{ - struct tcb_s *new_tcb = running_task(); - - arm64_fpu_access_trap_disable(); - - /* FPU trap has happened at this task */ - - if (new_tcb == g_cpu_fpu_ctx[this_cpu()].fpu_owner) - { - arm64_fpu_access_trap_disable(); - } - else - { - arm64_fpu_access_trap_enable(); - } - - g_cpu_fpu_ctx[this_cpu()].switch_count++; -} - -#ifdef CONFIG_SMP -void arm64_fpu_context_save(void) -{ - struct tcb_s *tcb = running_task(); - - if (tcb == g_cpu_fpu_ctx[this_cpu()].fpu_owner) - { - arm64_fpu_access_trap_disable(); - arm64_fpu_save((struct fpu_reg *)tcb->xcp.fpu_regs); - ARM64_DSB(); - g_cpu_fpu_ctx[this_cpu()].save_count++; - g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL; - } -} -#endif - void arm64_fpu_enable(void) { irqstate_t flags = up_irq_save(); diff --git a/arch/arm64/src/common/arm64_fpu_func.S b/arch/arm64/src/common/arm64_fpu_func.S index da68a0c94cb1b..13481ea098c26 100644 --- a/arch/arm64/src/common/arm64_fpu_func.S +++ b/arch/arm64/src/common/arm64_fpu_func.S @@ -66,10 +66,10 @@ SECTION_FUNC(text, arm64_fpu_save) stp q28, q29, [x0, #(16 * FPU_REG_Q28)] stp q30, q31, [x0, #(16 * FPU_REG_Q30)] - mrs x1, fpsr - mrs x2, fpcr - str w1, [x0, #(16 * 32 + 0)] - str w2, [x0, #(16 * 32 + 4)] + mrs x10, fpsr + mrs x11, fpcr + str w10, [x0, #(16 * 32 + 0)] + str w11, [x0, #(16 * 32 + 4)] ret @@ -93,9 +93,9 @@ SECTION_FUNC(text, arm64_fpu_restore) ldp q28, q29, [x0, #(16 * FPU_REG_Q28)] ldp q30, q31, [x0, #(16 * FPU_REG_Q30)] - ldr w1, [x0, #(16 * 32 + 0)] - ldr w2, [x0, #(16 * 32 + 4)] - msr fpsr, x1 - msr fpcr, x2 + ldr w10, [x0, #(16 * 32 + 0)] + ldr w11, [x0, #(16 * 32 + 4)] + msr fpsr, x10 + msr fpcr, x11 ret diff --git a/arch/arm64/src/common/arm64_gic.h b/arch/arm64/src/common/arm64_gic.h index 191e40aa7cc21..ce31277205edf 100644 --- a/arch/arm64/src/common/arm64_gic.h +++ b/arch/arm64/src/common/arm64_gic.h @@ -281,15 +281,13 @@ #define GIC_IRQ_SGI15 15 #ifdef CONFIG_ARCH_TRUSTZONE_SECURE -# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI8 -# define GIC_SMP_CPUSTART GIC_IRQ_SGI9 -# define GIC_SMP_CPUPAUSE GIC_IRQ_SGI10 -# define GIC_SMP_CPUCALL GIC_IRQ_SGI11 +# define GIC_SMP_SCHED GIC_IRQ_SGI9 +# define GIC_SMP_CPUSTART GIC_IRQ_SGI10 +# define GIC_SMP_CALL GIC_IRQ_SGI11 #else -# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI0 -# define GIC_SMP_CPUSTART GIC_IRQ_SGI1 -# define GIC_SMP_CPUPAUSE GIC_IRQ_SGI2 -# define GIC_SMP_CPUCALL GIC_IRQ_SGI3 +# define GIC_SMP_SCHED GIC_IRQ_SGI1 +# define GIC_SMP_CPUSTART GIC_IRQ_SGI2 +# define GIC_SMP_CALL GIC_IRQ_SGI3 #endif /**************************************************************************** @@ -329,32 +327,10 @@ int arm64_gic_v2m_initialize(void); #ifdef CONFIG_SMP /**************************************************************************** - * Name: arm64_pause_handler + * Name: arm64_smp_sched_handler * * Description: - * This is the handler for SGI2. It performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int arm64_pause_handler(int irq, void *context, void *arg); - -/**************************************************************************** - * Name: arm64_pause_async_handler - * - * Description: - * This is the handler for async pause. + * This is the handler for sched. * * 1. It saves the current task state at the head of the current assigned * task list. @@ -370,7 +346,7 @@ int arm64_pause_handler(int irq, void *context, void *arg); * ****************************************************************************/ -int arm64_pause_async_handler(int irq, void *context, void *arg); +int arm64_smp_sched_handler(int irq, void *context, void *arg); void arm64_gic_secondary_init(void); diff --git a/arch/arm64/src/common/arm64_gicv2.c b/arch/arm64/src/common/arm64_gicv2.c index a69c881305a15..06d85292b799a 100644 --- a/arch/arm64/src/common/arm64_gicv2.c +++ b/arch/arm64/src/common/arm64_gicv2.c @@ -864,11 +864,8 @@ static void arm_gic0_initialize(void) #ifdef CONFIG_SMP /* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */ - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, - arm64_pause_async_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, - nxsched_smp_call_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_SCHED, arm64_smp_sched_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CALL, nxsched_smp_call_handler, NULL)); #endif } @@ -1490,26 +1487,6 @@ void arm64_gic_raise_sgi(unsigned int sgi, uint16_t cpuset) arm_cpu_sgi(sgi, cpuset); } -# ifdef CONFIG_SMP -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - up_trigger_irq(GIC_SMP_CPUCALL, cpuset); -} -# endif #endif /* CONFIG_SMP */ /**************************************************************************** diff --git a/arch/arm64/src/common/arm64_gicv3.c b/arch/arm64/src/common/arm64_gicv3.c index 81fe66bb492a5..aad758376c97f 100644 --- a/arch/arm64/src/common/arm64_gicv3.c +++ b/arch/arm64/src/common/arm64_gicv3.c @@ -653,11 +653,8 @@ static void gicv3_dist_init(void) #ifdef CONFIG_SMP /* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */ - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, - arm64_pause_async_handler, NULL)); - DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, - nxsched_smp_call_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_SCHED, arm64_smp_sched_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CALL, nxsched_smp_call_handler, NULL)); #endif } @@ -953,9 +950,8 @@ static void arm64_gic_init(void) gicv3_cpuif_init(); #ifdef CONFIG_SMP - up_enable_irq(GIC_SMP_CPUPAUSE); - up_enable_irq(GIC_SMP_CPUPAUSE_ASYNC); - up_enable_irq(GIC_SMP_CPUCALL); + up_enable_irq(GIC_SMP_SCHED); + up_enable_irq(GIC_SMP_CALL); #endif } @@ -982,27 +978,6 @@ void arm64_gic_secondary_init(void) { arm64_gic_init(); } - -# ifdef CONFIG_SMP -/*************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ***************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - up_trigger_irq(GIC_SMP_CPUCALL, cpuset); -} -# endif #endif /*************************************************************************** diff --git a/arch/arm64/src/common/arm64_initialstate.c b/arch/arm64/src/common/arm64_initialstate.c index 93aa822fff6ec..32058db4855d4 100644 --- a/arch/arm64/src/common/arm64_initialstate.c +++ b/arch/arm64/src/common/arm64_initialstate.c @@ -47,10 +47,6 @@ #include "chip.h" #include "arm64_fatal.h" -#ifdef CONFIG_ARCH_FPU -#include "arm64_fpu.h" -#endif - /**************************************************************************** * Public Functions ****************************************************************************/ @@ -70,17 +66,6 @@ void arm64_new_task(struct tcb_s * tcb) } #endif -#ifdef CONFIG_ARCH_FPU - struct fpu_reg *pfpuctx; - pfpuctx = STACK_PTR_TO_FRAME(struct fpu_reg, stack_ptr); - tcb->xcp.fpu_regs = (uint64_t *)pfpuctx; - - /* set fpu context */ - - arm64_init_fpu(tcb); - stack_ptr = (uintptr_t)pfpuctx; -#endif - pinitctx = STACK_PTR_TO_FRAME(struct regs_context, stack_ptr); memset(pinitctx, 0, sizeof(struct regs_context)); pinitctx->elr = (uint64_t)tcb->start; @@ -150,11 +135,6 @@ void up_initial_state(struct tcb_s *tcb) tcb->stack_base_ptr = tcb->stack_alloc_ptr; tcb->adj_stack_size = CONFIG_IDLETHREAD_STACKSIZE; -#ifdef CONFIG_ARCH_FPU - /* set fpu context */ - - arm64_init_fpu(tcb); -#endif /* set initialize idle thread tcb and exception depth * core 0, idle0 */ diff --git a/arch/arm64/src/common/arm64_internal.h b/arch/arm64/src/common/arm64_internal.h index 6f4ab45b2c93b..6610a966ee823 100644 --- a/arch/arm64/src/common/arm64_internal.h +++ b/arch/arm64/src/common/arm64_internal.h @@ -264,7 +264,7 @@ EXTERN uint8_t g_idle_topstack[]; /* End+1 of heap */ void arm64_new_task(struct tcb_s *tak_new); void arm64_jump_to_user(uint64_t entry, uint64_t x0, uint64_t x1, - uint64_t *regs) noreturn_function; + uint64_t sp_el0, uint64_t *regs) noreturn_function; /* Low level initialization provided by chip logic */ @@ -305,8 +305,7 @@ void arm64_pginitialize(void); # define arm64_pginitialize() #endif /* CONFIG_LEGACY_PAGING */ -uint64_t * arm64_syscall_switch(uint64_t *regs); -int arm64_syscall(uint64_t *regs); +uint64_t *arm64_syscall(uint64_t *regs); /* Low level serial output **************************************************/ diff --git a/arch/arm64/src/common/arm64_pthread_start.c b/arch/arm64/src/common/arm64_pthread_start.c index 8132884cf8394..3b693455b858c 100644 --- a/arch/arm64/src/common/arm64_pthread_start.c +++ b/arch/arm64/src/common/arm64_pthread_start.c @@ -68,6 +68,8 @@ void up_pthread_start(pthread_trampoline_t startup, pthread_startroutine_t entrypt, pthread_addr_t arg) { + struct tcb_s *rtcb = this_task(); + /* Set up to enter the user-space pthread start-up function in * unprivileged mode. We need: * @@ -78,7 +80,7 @@ void up_pthread_start(pthread_trampoline_t startup, */ arm64_jump_to_user((uint64_t)startup, (uint64_t)entrypt, (uint64_t)arg, - this_task()->xcp.initregs); + (uint64_t)rtcb->xcp.ustkptr, rtcb->xcp.initregs); } #endif /* !CONFIG_BUILD_FLAT && __KERNEL__ && !CONFIG_DISABLE_PTHREAD */ diff --git a/arch/arm64/src/common/arm64_schedulesigaction.c b/arch/arm64/src/common/arm64_schedulesigaction.c index 6301f0bb6f248..cbfc897dedec4 100644 --- a/arch/arm64/src/common/arm64_schedulesigaction.c +++ b/arch/arm64/src/common/arm64_schedulesigaction.c @@ -38,10 +38,6 @@ #include "irq/irq.h" #include "arm64_fatal.h" -#ifdef CONFIG_ARCH_FPU -#include "arm64_fpu.h" -#endif - /**************************************************************************** * Public Functions ****************************************************************************/ @@ -58,16 +54,6 @@ void arm64_init_signal_process(struct tcb_s *tcb, struct regs_context *regs) struct regs_context *psigctx; char *stack_ptr = (char *)pctx->sp_elx - sizeof(struct regs_context); -#ifdef CONFIG_ARCH_FPU - struct fpu_reg *pfpuctx; - pfpuctx = STACK_PTR_TO_FRAME(struct fpu_reg, stack_ptr); - tcb->xcp.fpu_regs = (uint64_t *)pfpuctx; - - /* set fpu context */ - - arm64_init_fpu(tcb); - stack_ptr = (char *)pfpuctx; -#endif psigctx = STACK_PTR_TO_FRAME(struct regs_context, stack_ptr); memset(psigctx, 0, sizeof(struct regs_context)); psigctx->elr = (uint64_t)arm64_sigdeliver; @@ -126,65 +112,35 @@ void arm64_init_signal_process(struct tcb_s *tcb, struct regs_context *regs) * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on any CPU. + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! */ - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handler will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else - { -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the return lr and cpsr and one scratch register. These - * will be restored by the signal trampoline after the signals - * have been delivered. - */ - -#ifdef CONFIG_ARCH_FPU - tcb->xcp.saved_fpu_regs = tcb->xcp.fpu_regs; -#endif - tcb->xcp.saved_reg = tcb->xcp.regs; - - /* create signal process context */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the return lr and cpsr and one scratch register. These + * will be restored by the signal trampoline after the signals + * have been delivered. + */ - arm64_init_signal_process(tcb, NULL); + tcb->xcp.saved_reg = tcb->xcp.regs; -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ + /* create signal process context */ - if (cpu != me) - { - up_cpu_resume(cpu); - } -#endif - } + arm64_init_signal_process(tcb, NULL); } } diff --git a/arch/arm64/src/common/arm64_sigdeliver.c b/arch/arm64/src/common/arm64_sigdeliver.c index 15e7946c7c007..2a0fdd65cc343 100644 --- a/arch/arm64/src/common/arm64_sigdeliver.c +++ b/arch/arm64/src/common/arm64_sigdeliver.c @@ -38,10 +38,6 @@ #include "irq/irq.h" #include "arm64_fatal.h" -#ifdef CONFIG_ARCH_FPU -#include "arm64_fpu.h" -#endif - /**************************************************************************** * Public Functions ****************************************************************************/ @@ -74,8 +70,8 @@ void arm64_sigdeliver(void) #endif sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -107,7 +103,7 @@ void arm64_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -154,14 +150,9 @@ void arm64_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ rtcb->xcp.regs = rtcb->xcp.saved_reg; -#ifdef CONFIG_ARCH_FPU - arm64_destory_fpu(rtcb); - rtcb->xcp.fpu_regs = rtcb->xcp.saved_fpu_regs; -#endif - /* Then restore the correct state for this thread of execution. */ #ifdef CONFIG_SMP diff --git a/arch/arm64/src/common/arm64_smpcall.c b/arch/arm64/src/common/arm64_smpcall.c new file mode 100644 index 0000000000000..e1672bd21b998 --- /dev/null +++ b/arch/arm64/src/common/arm64_smpcall.c @@ -0,0 +1,122 @@ +/**************************************************************************** + * arch/arm64/src/common/arm64_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "arm64_internal.h" +#include "arm64_gic.h" +#include "sched/sched.h" + +/**************************************************************************** + * Private Data + ****************************************************************************/ + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: arm64_smp_sched_handler + * + * Description: + * This is the handler for sched. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int arm64_smp_sched_handler(int irq, void *context, void *arg) +{ + int cpu = this_cpu(); + + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + /* Execute SGI2 */ + + arm64_gic_raise_sgi(GIC_SMP_SCHED, (1 << cpu)); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + up_trigger_irq(GIC_SMP_CALL, cpuset); +} diff --git a/arch/arm64/src/common/arm64_syscall.c b/arch/arm64/src/common/arm64_syscall.c index 5a6c91b0c4c5f..a192cabafba92 100644 --- a/arch/arm64/src/common/arm64_syscall.c +++ b/arch/arm64/src/common/arm64_syscall.c @@ -53,18 +53,18 @@ typedef uintptr_t (*syscall_t)(unsigned int, ...); ****************************************************************************/ static void arm64_dump_syscall(const char *tag, uint64_t cmd, - const struct regs_context * f_regs) + const uint64_t *regs) { - svcinfo("SYSCALL %s: regs: %p cmd: %" PRId64 "\n", tag, f_regs, cmd); + svcinfo("SYSCALL %s: regs: %p cmd: %" PRId64 "\n", tag, regs, cmd); svcinfo("x0: 0x%-16lx x1: 0x%lx\n", - f_regs->regs[REG_X0], f_regs->regs[REG_X1]); + regs[REG_X0], regs[REG_X1]); svcinfo("x2: 0x%-16lx x3: 0x%lx\n", - f_regs->regs[REG_X2], f_regs->regs[REG_X3]); + regs[REG_X2], regs[REG_X3]); svcinfo("x4: 0x%-16lx x5: 0x%lx\n", - f_regs->regs[REG_X4], f_regs->regs[REG_X5]); + regs[REG_X4], regs[REG_X5]); svcinfo("x6: 0x%-16lx x7: 0x%lx\n", - f_regs->regs[REG_X6], f_regs->regs[REG_X7]); + regs[REG_X6], regs[REG_X7]); } #ifdef CONFIG_LIB_SYSCALL @@ -145,32 +145,32 @@ uintptr_t dispatch_syscall(unsigned int nbr, uintptr_t parm1, #endif /**************************************************************************** - * Name: arm64_syscall_switch + * Name: arm64_syscall * * Description: * task switch syscall * ****************************************************************************/ -uint64_t *arm64_syscall_switch(uint64_t * regs) +uint64_t *arm64_syscall(uint64_t *regs) { + uint64_t *ret_regs = regs; uint64_t cmd; - struct regs_context *f_regs; - uint64_t *ret_regs; struct tcb_s *tcb; int cpu; +#ifdef CONFIG_BUILD_KERNEL + uint64_t spsr; +#endif /* Nested interrupts are not supported */ DEBUGASSERT(regs); - f_regs = (struct regs_context *)regs; - /* The SYSCALL command is in x0 on entry. Parameters follow in x1..x7 */ - cmd = f_regs->regs[REG_X0]; + cmd = regs[REG_X0]; - arm64_dump_syscall(__func__, cmd, f_regs); + arm64_dump_syscall(__func__, cmd, regs); switch (cmd) { @@ -192,8 +192,8 @@ uint64_t *arm64_syscall_switch(uint64_t * regs) * set will determine the restored context. */ - ret_regs = (uint64_t *)f_regs->regs[REG_X1]; - f_regs->regs[REG_X1] = 0; /* set the saveregs = 0 */ + ret_regs = (uint64_t *)regs[REG_X1]; + regs[REG_X1] = 0; /* set the saveregs = 0 */ DEBUGASSERT(ret_regs); } @@ -217,91 +217,13 @@ uint64_t *arm64_syscall_switch(uint64_t * regs) case SYS_switch_context: { - DEBUGASSERT(f_regs->regs[REG_X1] != 0 && - f_regs->regs[REG_X2] != 0); - *(uint64_t **)f_regs->regs[REG_X1] = regs; - - ret_regs = (uint64_t *) f_regs->regs[REG_X2]; - } - break; + DEBUGASSERT(regs[REG_X1] != 0 && regs[REG_X2] != 0); + *(uint64_t **)regs[REG_X1] = regs; - default: - { - svcerr("ERROR: Bad SYS call: 0x%" PRIx64 "\n", cmd); - ret_regs = 0; - return 0; + ret_regs = (uint64_t *)regs[REG_X2]; } break; - } - - if ((uint64_t *)f_regs != ret_regs) - { - cpu = this_cpu(); - tcb = current_task(cpu); - -#ifdef CONFIG_ARCH_ADDRENV - /* Make sure that the address environment for the previously - * running task is closed down gracefully (data caches dump, - * MMU flushed) and set up the address environment for the new - * thread at the head of the ready-to-run list. - */ - - addrenv_switch(NULL); -#endif - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(g_running_tasks[cpu]); - nxsched_resume_scheduler(tcb); - - /* Record the new "running" task. g_running_tasks[] is only used by - * assertion logic for reporting crashes. - */ - - g_running_tasks[cpu] = tcb; - - /* Restore the cpu lock */ - - restore_critical_section(tcb, cpu); - } - - return ret_regs; -} - -/**************************************************************************** - * Name: arm64_syscall - * - * Description: - * SVC interrupts will vector here with insn=the SVC instruction and - * xcp=the interrupt context - * - * The handler may get the SVC number be de-referencing the return - * address saved in the xcp and decoding the SVC instruction - * - ****************************************************************************/ -int arm64_syscall(uint64_t *regs) -{ - uint64_t cmd; - struct regs_context *f_regs; -#ifdef CONFIG_BUILD_KERNEL - uint64_t spsr; -#endif - - /* Nested interrupts are not supported */ - - DEBUGASSERT(regs); - - f_regs = (struct regs_context *)regs; - - /* The SYSCALL command is in x0 on entry. Parameters follow in x1..x7 */ - - cmd = f_regs->regs[REG_X0]; - - arm64_dump_syscall(__func__, cmd, f_regs); - - switch (cmd) - { #ifdef CONFIG_BUILD_KERNEL /* R0=SYS_signal_handler: This a user signal handler callback * @@ -404,10 +326,44 @@ int arm64_syscall(uint64_t *regs) #endif default: + { + svcerr("ERROR: Bad SYS call: 0x%" PRIx64 "\n", cmd); + ret_regs = 0; + return 0; + } + break; + } + + if ((uint64_t *)regs != ret_regs) + { + cpu = this_cpu(); + tcb = current_task(cpu); + +#ifdef CONFIG_ARCH_ADDRENV + /* Make sure that the address environment for the previously + * running task is closed down gracefully (data caches dump, + * MMU flushed) and set up the address environment for the new + * thread at the head of the ready-to-run list. + */ + + addrenv_switch(NULL); +#endif + + /* Update scheduler parameters */ - DEBUGPANIC(); - break; + nxsched_suspend_scheduler(g_running_tasks[cpu]); + nxsched_resume_scheduler(tcb); + + /* Record the new "running" task. g_running_tasks[] is only used by + * assertion logic for reporting crashes. + */ + + g_running_tasks[cpu] = tcb; + + /* Restore the cpu lock */ + + restore_critical_section(tcb, cpu); } - return 0; + return ret_regs; } diff --git a/arch/arm64/src/common/arm64_task_start.c b/arch/arm64/src/common/arm64_task_start.c index 0d20f693e5a04..a0219a5cf6850 100644 --- a/arch/arm64/src/common/arm64_task_start.c +++ b/arch/arm64/src/common/arm64_task_start.c @@ -65,6 +65,8 @@ void up_task_start(main_t taskentry, int argc, char *argv[]) { + struct tcb_s *rtcb = this_task(); + /* Set up to return to the user-space _start function in * unprivileged mode. We need: * @@ -75,7 +77,7 @@ void up_task_start(main_t taskentry, int argc, char *argv[]) */ arm64_jump_to_user((uint64_t)taskentry, (uint64_t)argc, (uint64_t)argv, - this_task()->xcp.initregs); + (uint64_t)rtcb->xcp.ustkptr, rtcb->xcp.initregs); } #endif /* !CONFIG_BUILD_FLAT */ diff --git a/arch/arm64/src/common/arm64_vector_table.S b/arch/arm64/src/common/arm64_vector_table.S index fa81c4e888e7f..8f58891c542bd 100644 --- a/arch/arm64/src/common/arm64_vector_table.S +++ b/arch/arm64/src/common/arm64_vector_table.S @@ -46,7 +46,7 @@ */ .macro arm64_enter_exception xreg0, xreg1 - sub sp, sp, #8 * XCPTCONTEXT_GP_REGS + sub sp, sp, #8 * XCPTCONTEXT_REGS stp x0, x1, [sp, #8 * REG_X0] stp x2, x3, [sp, #8 * REG_X2] @@ -65,8 +65,8 @@ stp x28, x29, [sp, #8 * REG_X28] /* Save the current task's SP_ELx and x30 */ - add \xreg0, sp, #8 * XCPTCONTEXT_GP_REGS - stp x30, \xreg0, [sp, #8 * REG_X30] + add \xreg0, sp, #8 * XCPTCONTEXT_REGS + stp x30, \xreg0, [sp, #8 * REG_X30] /* ELR and SPSR */ #if CONFIG_ARCH_ARM64_EXCEPTION_LEVEL == 3 @@ -78,17 +78,24 @@ #endif stp \xreg0, \xreg1, [sp, #8 * REG_ELR] - /* increment exception depth */ + mrs \xreg0, sp_el0 + mrs \xreg1, tpidrro_el0 + stp \xreg0, \xreg1, [sp, #8 * REG_SP_EL0] + + /* Increment exception depth */ mrs \xreg0, tpidrro_el0 mov \xreg1, #1 add \xreg0, \xreg0, \xreg1 msr tpidrro_el0, \xreg0 + /* Save the FPU registers */ + #ifdef CONFIG_ARCH_FPU - bl arm64_fpu_enter_exception + add x0, sp, #8 * XCPTCONTEXT_GP_REGS + bl arm64_fpu_save + ldr x0, [sp, #8 * REG_X0] #endif - .endm /**************************************************************************** @@ -216,25 +223,25 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table) .align 7 arm64_enter_exception x0, x1 - b arm64_mode32_error + b arm64_mode32_handler /* Lower EL using AArch32 / IRQ */ .align 7 arm64_enter_exception x0, x1 - b arm64_mode32_error + b arm64_mode32_handler /* Lower EL using AArch32 / FIQ */ .align 7 arm64_enter_exception x0, x1 - b arm64_mode32_error + b arm64_mode32_handler /* Lower EL using AArch32 / SError */ .align 7 arm64_enter_exception x0, x1 - b arm64_mode32_error + b arm64_mode32_handler /* Restore Corruptible Registers and exception context * from the task stack. @@ -243,9 +250,8 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table) GTEXT(arm64_exit_exception) SECTION_FUNC(text, arm64_exit_exception) #ifdef CONFIG_ARCH_FPU - bl arm64_fpu_exit_exception -GTEXT(arm64_exit_exc_fpu_done) -arm64_exit_exc_fpu_done: + add x0, sp, #8 * XCPTCONTEXT_GP_REGS + bl arm64_fpu_restore #endif /* restore spsr and elr at el1*/ @@ -259,6 +265,10 @@ arm64_exit_exc_fpu_done: msr spsr_el1, x1 #endif + ldp x0, x1, [sp, #8 * REG_SP_EL0] + msr sp_el0, x0 + msr tpidrro_el0, x1 + /* decrement exception depth */ mrs x0, tpidrro_el0 @@ -283,6 +293,6 @@ arm64_exit_exc_fpu_done: ldp x28, x29, [sp, #8 * REG_X28] ldp x30, xzr, [sp, #8 * REG_X30] - add sp, sp, #8 * XCPTCONTEXT_GP_REGS + add sp, sp, #8 * XCPTCONTEXT_REGS eret diff --git a/arch/arm64/src/common/arm64_vectors.S b/arch/arm64/src/common/arm64_vectors.S index d2d1e5c95a309..0c0605ca72b8b 100644 --- a/arch/arm64/src/common/arm64_vectors.S +++ b/arch/arm64/src/common/arm64_vectors.S @@ -99,63 +99,6 @@ SECTION_FUNC(text, up_saveusercontext) ret -/**************************************************************************** - * Function: arm64_context_switch - * - * Description: - * Routine to handle context switch - * - * arm64_context_switch( x0, x1) - * x0: restore thread stack context - * x1: save thread stack context - * note: - * x1 = 0, only restore x0 - * - ****************************************************************************/ - -GTEXT(arm64_context_switch) -SECTION_FUNC(text, arm64_context_switch) - cmp x1, #0x0 - beq restore_new - -#if defined(CONFIG_ARCH_FPU) && defined(CONFIG_SMP) - stp x0, x1, [sp, #-16]! - stp xzr, x30, [sp, #-16]! - bl arm64_fpu_context_save - ldp xzr, x30, [sp], #16 - ldp x0, x1, [sp], #16 -#endif - - /* Save the current SP_ELx */ - add x4, sp, #8 * XCPTCONTEXT_GP_REGS - str x4, [x1, #8 * REG_SP_ELX] - - /* Save the current task's SP_EL0 and exception depth */ - mrs x4, sp_el0 - mrs x5, tpidrro_el0 - stp x4, x5, [x1, #8 * REG_SP_EL0] - -restore_new: - - /* Restore SP_EL0 and thread's exception dept */ - ldp x4, x5, [x0, #8 * REG_SP_EL0] - msr tpidrro_el0, x5 - msr sp_el0, x4 - - /* retrieve new thread's SP_ELx */ - ldr x4, [x0, #8 * REG_SP_ELX] - sub sp, x4, #8 * XCPTCONTEXT_GP_REGS - -#ifdef CONFIG_ARCH_FPU - stp xzr, x30, [sp, #-16]! - bl arm64_fpu_context_restore - ldp xzr, x30, [sp], #16 -#endif - - /* Return to arm64_sync_exc() or arm64_irq_handler() */ - - ret - /**************************************************************************** * Function: arm64_jump_to_user * @@ -164,10 +107,11 @@ restore_new: * the kernel is ready to give control to the user task in user space. * * arm64_jump_to_user(entry, x0, x1, regs) - * entry: process entry point - * x0: parameter 0 for process - * x1: parameter 1 for process - * regs: integer register save area to use + * entry: process entry point + * x0: parameter 0 for process + * x1: parameter 1 for process + * sp_el0: user stack pointer + * regs: integer register save area to use * ****************************************************************************/ @@ -175,10 +119,11 @@ restore_new: GTEXT(arm64_jump_to_user) SECTION_FUNC(text, arm64_jump_to_user) msr daifset, #IRQ_DAIF_MASK - mov sp, x3 + mov sp, x4 str x0, [sp, #8 * REG_ELR] str x1, [sp, #8 * REG_X0] str x2, [sp, #8 * REG_X1] + str x3, [sp, #8 * REG_SP_EL0] mrs x0, spsr_el1 and x0, x0, #~SPSR_MODE_MASK #orr x0, x0, #SPSR_MODE_EL0T # EL0T=0x00, out of range for orr @@ -205,26 +150,13 @@ SECTION_FUNC(text, arm64_sync_exc) #endif lsr x10, x9, #26 -#ifdef CONFIG_ARCH_FPU - /* fpu trap */ - - cmp x10, #0x07 /* Access to SIMD or floating-point */ - bne 1f - mov x0, sp /* x0 = context */ - bl arm64_fpu_trap - - /* when the fpu trap is handled */ - - b arm64_exit_exc_fpu_done -1: -#endif /* 0x15 = SVC system call */ cmp x10, #0x15 /* if this is a svc call ?*/ - bne exc_handle + bne arm64_fatal_handler #ifdef CONFIG_LIB_SYSCALL /* Handle user system calls separately */ @@ -255,120 +187,23 @@ SECTION_FUNC(text, arm64_sync_exc) reserved_syscall: #endif - /* x0 = syscall_cmd - * if ( x0 <= SYS_switch_context ) { - * call context_switch - * it's a context switch syscall, so context need to be done - * } - * #define SYS_save_context (0) - * #define SYS_restore_context (1) - * #define SYS_switch_context (2) - */ - - ldr x0, [sp, #8 * REG_X0] - cmp x0, #SYS_save_context - beq save_context - - cmp x0, #SYS_switch_context - beq context_switch - - cmp x0, #SYS_restore_context - beq context_switch - - /* Normal syscall, thread context will not switch - * - * call the SVC handler with interrupts disabled. - * void arm64_syscall(uint64_t *regs) - * in: - * regs = pointer to struct reg_context allocating - * from stack, esf_reg has put on it - * regs[REG_X0]: syscall cmd - * regs[REG_X1] ~ regs[REG_X6]: syscall parameter - * out: - * x0: return by arm64_syscall - */ - - mov x0, sp /* x0 = reg frame */ - - /* Call arm64_syscall() on the user stack */ - - bl arm64_syscall /* Call the handler */ - - /* Return from exception */ - - b arm64_exit_exception - -context_switch: - /* Call arm64_syscall_switch() for context switch - * - * uint64_t * arm64_syscall_switch(uint64_t * regs) - * out: - * x0: return by arm64_syscall_switch, restore task context - * regs[REG_X1]: save task context, if x1 = 0, only restore x0 - */ - - mov x0, sp - bl arm64_syscall_switch - - /* get save task reg context pointer */ - - ldr x1, [sp, #8 * REG_X1] - cmp x1, #0x0 - - beq do_switch - ldr x1, [x1] - -do_switch: + /* Switch to IRQ stack and save current sp on it. */ #ifdef CONFIG_SMP - /* Notes: - * Complete any pending TLB or cache maintenance on this CPU in case - * the thread migrates to a different CPU. - * This full barrier is also required by the membarrier system - * call. - */ - - dsb ish -#endif - - bl arm64_context_switch - -#ifdef CONFIG_ARCH_FPU - /* when the fpu trap is handled */ - - b arm64_exit_exc_fpu_done + get_cpu_id x0 + ldr x1, =(g_cpu_int_stacktop) + lsl x0, x0, #3 + ldr x1, [x1, x0] #else - b arm64_exit_exception + ldr x1, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK) #endif -save_context: - arm64_exception_context_save x0 x1 sp - mov x0, sp - bl arm64_syscall_save_context - - /* Save the return value into the ESF */ - - str x0, [sp, #8 * REG_X0] - - /* Return from exception */ - - b arm64_exit_exception - -exc_handle: - arm64_exception_context_save x0 x1 sp - mov x0, #K_ERR_CPU_EXCEPTION - mov x1, sp - - /* void arm64_fatal_error(unsigned int reason, const uint64_t *regs) - * x0 = reason - * x1 = Exception stack frame - */ - - bl arm64_fatal_error + mov sp, x1 - /* Return here only in case of recoverable error */ + bl arm64_syscall /* Call the handler */ - b arm64_exit_exception + mov sp, x0 + b arm64_exit_exception /**************************************************************************** * Name: arm64_irq_handler @@ -382,20 +217,16 @@ GTEXT(arm64_irq_handler) SECTION_FUNC(text, arm64_irq_handler) /* Switch to IRQ stack and save current sp on it. */ #ifdef CONFIG_SMP - get_cpu_id x1 - ldr x0, =(g_cpu_int_stacktop) - lsl x1, x1, #3 - ldr x0, [x0, x1] + get_cpu_id x0 + ldr x1, =(g_cpu_int_stacktop) + lsl x0, x0, #3 + ldr x1, [x1, x0] #else - ldr x0, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK) + ldr x1, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK) #endif - /* Save the task's stack and switch irq stack */ - mov x1, sp - mov sp, x0 - str x1, [sp, #-16]! - - mov x0, x1 /* x0 = reg frame */ + mov x0, sp + mov sp, x1 /* Call arm64_decodeirq() on the interrupt stack * with interrupts disabled @@ -403,149 +234,79 @@ SECTION_FUNC(text, arm64_irq_handler) bl arm64_decodeirq - /* Upon return from arm64_decodeirq, x0 holds the pointer to the - * call reg context area, which can be use to restore context. - * This may or may not be the same value that was passed to arm64_decodeirq: - * It will differ if a context switch is required. - */ - - ldr x1, [sp], #16 - - /* retrieve the task's stack. */ - - mov sp, x1 - - cmp x0, x1 - beq irq_exit - -irq_context_switch: -#ifdef CONFIG_SMP - /* Notes: - * Complete any pending TLB or cache maintenance on this CPU in case - * the thread migrates to a different CPU. - * This full barrier is also required by the membarrier system - * call. - */ - dsb ish - -#endif - - /* Switch thread - * - x0: restore task reg context, return by arm64_decodeirq, - * - x1: save task reg context, save before call arm64_decodeirq - * call arm64_context_switch(x0) to switch - */ - bl arm64_context_switch -#ifdef CONFIG_ARCH_FPU - /* when the fpu trap is handled */ - - b arm64_exit_exc_fpu_done -#endif - -irq_exit: - b arm64_exit_exception + mov sp, x0 + b arm64_exit_exception -/* TODO: if the arm64_fatal_error return success, maybe need context switch */ +/**************************************************************************** + * Name: arm64_serror_handler + * + * Description: + * SError exception handler + * + ****************************************************************************/ GTEXT(arm64_serror_handler) SECTION_FUNC(text, arm64_serror_handler) - arm64_exception_context_save x0 x1 sp - - mov x0, #K_ERR_CPU_EXCEPTION - mov x1, sp - - bl arm64_fatal_error + mov x0, sp + bl arm64_fatal_handler /* Return here only in case of recoverable error */ - b arm64_exit_exception - -GTEXT(arm64_mode32_error) -SECTION_FUNC(text, arm64_mode32_error) - arm64_exception_context_save x0 x1 sp + b arm64_exit_exception - mov x1, sp - mov x0, #K_ERR_CPU_MODE32 +/**************************************************************************** + * Name: arm64_mode32_handler + * + * Description: + * Mode32 exception handler + * + ****************************************************************************/ - bl arm64_fatal_error +GTEXT(arm64_mode32_handler) +SECTION_FUNC(text, arm64_mode32_handler) + mov x0, sp + bl arm64_fatal_handler /* Return here only in case of recoverable error */ - b arm64_exit_exception + b arm64_exit_exception + +/**************************************************************************** + * Name: arm64_fiq_handler + * + * Description: + * Interrupt exception handler + * + ****************************************************************************/ GTEXT(arm64_fiq_handler) SECTION_FUNC(text, arm64_fiq_handler) #ifndef CONFIG_ARM64_DECODEFIQ - arm64_exception_context_save x0 x1 sp - mov x1, sp - mov x0, #K_ERR_SPURIOUS_IRQ /* K_ERR_SPURIOUS_IRQ */ - - bl arm64_fatal_error + mov x0, sp + bl arm64_fatal_handler /* Return here only in case of recoverable error */ - b arm64_exit_exception + b arm64_exit_exception #else /* Switch to FIQ stack and save current sp on it. */ #ifdef CONFIG_SMP - get_cpu_id x1 - ldr x0, =(g_cpu_int_fiq_stacktop) - lsl x1, x1, #3 - ldr x0, [x0, x1] + get_cpu_id x0 + ldr x1, =(g_cpu_int_fiq_stacktop) + lsl x0, x0, #3 + ldr x1, [x1, x0] #else - ldr x0, =(g_interrupt_fiq_stack + CONFIG_ARCH_INTERRUPTSTACK) + ldr x1, =(g_interrupt_fiq_stack + CONFIG_ARCH_INTERRUPTSTACK) #endif - /* Save the task's stack and switch fiq stack */ - - mov x1, sp - mov sp, x0 - str x1, [sp, #-16]! - - mov x0, x1 /* x0 = reg frame */ - - /* Call arm64_decodefiq() on the interrupt stack - * with interrupts disabled - */ - - bl arm64_decodefiq - - /* Upon return from arm64_decodefiq, x0 holds the pointer to the - * call reg context area, which can be use to restore context. - * This may or may not be the same value that was passed to arm64_decodefiq: - * It will differ if a context switch is required. - */ - - ldr x1, [sp], #16 - - /* retrieve the task's stack. */ + mov x0, sp mov sp, x1 - cmp x0, x1 - beq fiq_exit - - -#ifdef CONFIG_SMP - /* Notes: - * Complete any pending TLB or cache maintenance on this CPU in case - * the thread migrates to a different CPU. - * This full barrier is also required by the membarrier system - * call. + /* Call arm64_decodeirq() on the interrupt stack + * with interrupts disabled */ - dsb ish - -#endif - /* Switch thread - * - x0: restore task reg context, return by arm64_decodefiq, - * - x1: save task reg context, save before call arm64_decodefiq - * call arm64_context_switch(x0) to switch - */ - bl arm64_context_switch -#ifdef CONFIG_ARCH_FPU - /* when the fpu trap is handled */ + bl arm64_decodeirq - b arm64_exit_exc_fpu_done -#endif -fiq_exit: - b arm64_exit_exception + mov sp, x0 + b arm64_exit_exception #endif diff --git a/arch/avr/include/avr/irq.h b/arch/avr/include/avr/irq.h index a0df8a95e2e49..df9a82dbae498 100644 --- a/arch/avr/include/avr/irq.h +++ b/arch/avr/include/avr/irq.h @@ -93,12 +93,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of PC and SR used during signal processing. * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/avr/include/avr32/irq.h b/arch/avr/include/avr32/irq.h index 755d9086ceb49..ed8e064bc62dc 100644 --- a/arch/avr/include/avr32/irq.h +++ b/arch/avr/include/avr32/irq.h @@ -93,12 +93,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of PC and SR used during signal processing. * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/avr/src/avr/avr_schedulesigaction.c b/arch/avr/src/avr/avr_schedulesigaction.c index 251556aa91b63..02f7d16132966 100644 --- a/arch/avr/src/avr/avr_schedulesigaction.c +++ b/arch/avr/src/avr/avr_schedulesigaction.c @@ -75,120 +75,112 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { uintptr_t reg_ptr = (uintptr_t)avr_sigdeliver; - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ + /* In this case just deliver the signal now. */ - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - * - * Hmmm... there looks like a latent bug here: The following - * logic would fail in the strange case where we are in an - * interrupt handler, the thread is signalling itself, but - * a context switch to another task has occurred so that - * g_current_regs does not refer to the thread of this_task()! - */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } - else - { - /* Save registers that must be protected while the signal - * handler runs. These will be restored by the signal - * trampoline after the signal(s) have been delivered. - */ + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following + * logic would fail in the strange case where we are in an + * interrupt handler, the thread is signalling itself, but + * a context switch to another task has occurred so that + * g_current_regs does not refer to the thread of this_task()! + */ - tcb->xcp.saved_pc0 = up_current_regs()[REG_PC0]; - tcb->xcp.saved_pc1 = up_current_regs()[REG_PC1]; + else + { + /* Save registers that must be protected while the signal + * handler runs. These will be restored by the signal + * trampoline after the signal(s) have been delivered. + */ + + tcb->xcp.saved_pc0 = up_current_regs()[REG_PC0]; + tcb->xcp.saved_pc1 = up_current_regs()[REG_PC1]; #if defined(REG_PC2) - tcb->xcp.saved_pc2 = up_current_regs()[REG_PC2]; + tcb->xcp.saved_pc2 = up_current_regs()[REG_PC2]; #endif - tcb->xcp.saved_sreg = up_current_regs()[REG_SREG]; + tcb->xcp.saved_sreg = up_current_regs()[REG_SREG]; - /* Then set up to vector to the trampoline with interrupts - * disabled - */ + /* Then set up to vector to the trampoline with interrupts + * disabled + */ #if !defined(REG_PC2) - up_current_regs()[REG_PC0] = (uint16_t)reg_ptr >> 8; - up_current_regs()[REG_PC1] = (uint16_t)reg_ptr & 0xff; + up_current_regs()[REG_PC0] = (uint16_t)reg_ptr >> 8; + up_current_regs()[REG_PC1] = (uint16_t)reg_ptr & 0xff; #else - up_current_regs()[REG_PC0] = (uint32_t)reg_ptr >> 16; - up_current_regs()[REG_PC1] = (uint32_t)reg_ptr >> 8; - up_current_regs()[REG_PC2] = (uint32_t)reg_ptr & 0xff; + up_current_regs()[REG_PC0] = (uint32_t)reg_ptr >> 16; + up_current_regs()[REG_PC1] = (uint32_t)reg_ptr >> 8; + up_current_regs()[REG_PC2] = (uint32_t)reg_ptr & 0xff; #endif - up_current_regs()[REG_SREG] &= ~(1 << SREG_I); + up_current_regs()[REG_SREG] &= ~(1 << SREG_I); - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ - avr_savestate(tcb->xcp.regs); - } + avr_savestate(tcb->xcp.regs); } + } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. - */ + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ - else - { - /* Save registers that must be protected while the signal handler - * runs. These will be restored by the signal trampoline after - * the signals have been delivered. - */ + else + { + /* Save registers that must be protected while the signal handler + * runs. These will be restored by the signal trampoline after + * the signals have been delivered. + */ - tcb->xcp.saved_pc0 = tcb->xcp.regs[REG_PC0]; - tcb->xcp.saved_pc1 = tcb->xcp.regs[REG_PC1]; + tcb->xcp.saved_pc0 = tcb->xcp.regs[REG_PC0]; + tcb->xcp.saved_pc1 = tcb->xcp.regs[REG_PC1]; #if defined(REG_PC2) - tcb->xcp.saved_pc2 = tcb->xcp.regs[REG_PC2]; + tcb->xcp.saved_pc2 = tcb->xcp.regs[REG_PC2]; #endif - tcb->xcp.saved_sreg = tcb->xcp.regs[REG_SREG]; + tcb->xcp.saved_sreg = tcb->xcp.regs[REG_SREG]; - /* Then set up to vector to the trampoline with interrupts - * disabled - */ + /* Then set up to vector to the trampoline with interrupts + * disabled + */ #if !defined(REG_PC2) - tcb->xcp.regs[REG_PC0] = (uint16_t)reg_ptr >> 8; - tcb->xcp.regs[REG_PC1] = (uint16_t)reg_ptr & 0xff; + tcb->xcp.regs[REG_PC0] = (uint16_t)reg_ptr >> 8; + tcb->xcp.regs[REG_PC1] = (uint16_t)reg_ptr & 0xff; #else - tcb->xcp.regs[REG_PC0] = (uint32_t)reg_ptr >> 16; - tcb->xcp.regs[REG_PC1] = (uint32_t)reg_ptr >> 8; - tcb->xcp.regs[REG_PC2] = (uint32_t)reg_ptr & 0xff; + tcb->xcp.regs[REG_PC0] = (uint32_t)reg_ptr >> 16; + tcb->xcp.regs[REG_PC1] = (uint32_t)reg_ptr >> 8; + tcb->xcp.regs[REG_PC2] = (uint32_t)reg_ptr & 0xff; #endif - tcb->xcp.regs[REG_SREG] &= ~(1 << SREG_I); - } + tcb->xcp.regs[REG_SREG] &= ~(1 << SREG_I); } } diff --git a/arch/avr/src/avr/avr_sigdeliver.c b/arch/avr/src/avr/avr_sigdeliver.c index 24b56fc8ee03e..be1093bcfb30c 100644 --- a/arch/avr/src/avr/avr_sigdeliver.c +++ b/arch/avr/src/avr/avr_sigdeliver.c @@ -59,8 +59,8 @@ void avr_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -76,7 +76,7 @@ void avr_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -96,13 +96,13 @@ void avr_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_PC0] = rtcb->xcp.saved_pc0; - regs[REG_PC1] = rtcb->xcp.saved_pc1; + regs[REG_PC0] = rtcb->xcp.saved_pc0; + regs[REG_PC1] = rtcb->xcp.saved_pc1; #if defined(REG_PC2) - regs[REG_PC2] = rtcb->xcp.saved_pc2; + regs[REG_PC2] = rtcb->xcp.saved_pc2; #endif - regs[REG_SREG] = rtcb->xcp.saved_sreg; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_SREG] = rtcb->xcp.saved_sreg; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. This is an * unusual case that must be handled by up_fullcontextresore. This case is diff --git a/arch/avr/src/avr32/avr_initialstate.c b/arch/avr/src/avr32/avr_initialstate.c index a48315678e49f..14e9886a4c662 100644 --- a/arch/avr/src/avr32/avr_initialstate.c +++ b/arch/avr/src/avr32/avr_initialstate.c @@ -87,7 +87,7 @@ void up_initial_state(struct tcb_s *tcb) #else /* No pending signal delivery */ - xcp->sigdeliver = NULL; + tcb->sigdeliver = NULL; /* Clear the frame pointer and link register since this is the outermost * frame. diff --git a/arch/avr/src/avr32/avr_schedulesigaction.c b/arch/avr/src/avr32/avr_schedulesigaction.c index 0a84063baada1..66ead0617ab21 100644 --- a/arch/avr/src/avr32/avr_schedulesigaction.c +++ b/arch/avr/src/avr32/avr_schedulesigaction.c @@ -75,95 +75,87 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - * - * Hmmm... there looks like a latent bug here: The following - * logic would fail in the strange case where we are in an - * interrupt handler, the thread is signalling itself, but - * a context switch to another task has occurred so that - * g_current_regs does not refer to the thread of this_task()! - */ - - else - { - /* Save registers that must be protected while the signal - * handler runs. These will be restored by the signal - * trampoline after the signal(s) have been delivered. - */ + /* In this case just deliver the signal now. */ - tcb->xcp.saved_pc = up_current_regs()[REG_PC]; - tcb->xcp.saved_sr = up_current_regs()[REG_SR]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_PC] = (uint32_t)avr_sigdeliver; - up_current_regs()[REG_SR] |= AVR32_SR_GM_MASK; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ - - avr_savestate(tcb->xcp.regs); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following + * logic would fail in the strange case where we are in an + * interrupt handler, the thread is signalling itself, but + * a context switch to another task has occurred so that + * g_current_regs does not refer to the thread of this_task()! */ else { - /* Save registers that must be protected while the signal handler - * runs. These will be restored by the signal trampoline after - * the signals have been delivered. + /* Save registers that must be protected while the signal + * handler runs. These will be restored by the signal + * trampoline after the signal(s) have been delivered. */ - tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; - tcb->xcp.saved_sr = tcb->xcp.regs[REG_SR]; + tcb->xcp.saved_pc = up_current_regs()[REG_PC]; + tcb->xcp.saved_sr = up_current_regs()[REG_SR]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_PC] = (uint32_t)avr_sigdeliver; - tcb->xcp.regs[REG_SR] |= AVR32_SR_GM_MASK; + up_current_regs()[REG_PC] = (uint32_t)avr_sigdeliver; + up_current_regs()[REG_SR] |= AVR32_SR_GM_MASK; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + avr_savestate(tcb->xcp.regs); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save registers that must be protected while the signal handler + * runs. These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; + tcb->xcp.saved_sr = tcb->xcp.regs[REG_SR]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)avr_sigdeliver; + tcb->xcp.regs[REG_SR] |= AVR32_SR_GM_MASK; + } } diff --git a/arch/avr/src/avr32/avr_sigdeliver.c b/arch/avr/src/avr32/avr_sigdeliver.c index ca81e807ced99..331ac29fd624f 100644 --- a/arch/avr/src/avr32/avr_sigdeliver.c +++ b/arch/avr/src/avr32/avr_sigdeliver.c @@ -63,8 +63,8 @@ void avr_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -80,7 +80,7 @@ void avr_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -100,9 +100,9 @@ void avr_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_PC] = rtcb->xcp.saved_pc; - regs[REG_SR] = rtcb->xcp.saved_sr; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_PC] = rtcb->xcp.saved_pc; + regs[REG_SR] = rtcb->xcp.saved_sr; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. This is an * unusual case that must be handled by up_fullcontextresore. This case is diff --git a/arch/ceva/include/xc5/irq.h b/arch/ceva/include/xc5/irq.h index cf77787e0af16..0d413daccb70f 100644 --- a/arch/ceva/include/xc5/irq.h +++ b/arch/ceva/include/xc5/irq.h @@ -127,12 +127,6 @@ struct xcpt_syscall_s struct xcptcontext { #ifndef CONFIG_DISABLE_SIGNALS - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/ceva/include/xm6/irq.h b/arch/ceva/include/xm6/irq.h index 0a735828ec2ef..44732f3d9e4ec 100644 --- a/arch/ceva/include/xm6/irq.h +++ b/arch/ceva/include/xm6/irq.h @@ -130,12 +130,6 @@ struct xcpt_syscall_s struct xcptcontext { #ifndef CONFIG_DISABLE_SIGNALS - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/ceva/src/common/ceva_schedulesigaction.c b/arch/ceva/src/common/ceva_schedulesigaction.c index 2eea396b0b994..c96a741141280 100644 --- a/arch/ceva/src/common/ceva_schedulesigaction.c +++ b/arch/ceva/src/common/ceva_schedulesigaction.c @@ -72,138 +72,105 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); - DEBUGASSERT(tcb != NULL && sigdeliver != NULL); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb->task_state == TSTATE_TASK_RUNNING) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on any CPU. - */ - - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb->task_state == TSTATE_TASK_RUNNING) - { - uint8_t me = this_cpu(); + uint8_t me = this_cpu(); #ifdef CONFIG_SMP - uint8_t cpu = tcb->cpu; + uint8_t cpu = tcb->cpu; #else - uint8_t cpu = 0; + uint8_t cpu = 0; #endif - /* CASE 1: We are not in an interrupt handler and a task is - * signaling itself for some reason. - */ - - if (cpu == me && !up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: The task that needs to receive the signal is running. - * This could happen if the task is running on another CPU OR if - * we are in an interrupt handler and the task is running on this - * CPU. In the former case, we will have to PAUSE the other CPU - * first. But in either case, we will have to modify the return - * state as well as the state in the TCB. - */ - - else - { -#ifdef CONFIG_SMP - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ + /* CASE 1: We are not in an interrupt handler and a task is + * signaling itself for some reason. + */ - if (cpu != me) - { - /* Pause the CPU */ + if (cpu == me && !up_current_regs()) + { + /* In this case just deliver the signal now. */ - up_cpu_pause(cpu); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } - /* Now tcb on the other CPU can be accessed safely */ -#endif + /* CASE 2: The task that needs to receive the signal is running. + * This could happen if the task is running on another CPU OR if + * we are in an interrupt handler and the task is running on this + * CPU. In the former case, we will have to PAUSE the other CPU + * first. But in either case, we will have to modify the return + * state as well as the state in the TCB. + */ - /* Save the current register context location */ + else + { + /* Save the current register context location */ - tcb->xcp.saved_regs = up_current_regs(); + tcb->xcp.saved_regs = up_current_regs(); - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - up_current_regs() -= XCPTCONTEXT_REGS; - memcpy(up_current_regs(), up_current_regs() + - XCPTCONTEXT_REGS, XCPTCONTEXT_SIZE); + up_current_regs() -= XCPTCONTEXT_REGS; + memcpy(up_current_regs(), up_current_regs() + + XCPTCONTEXT_REGS, XCPTCONTEXT_SIZE); - up_current_regs()[REG_SP] = (uint32_t)up_current_regs(); + up_current_regs()[REG_SP] = (uint32_t)up_current_regs(); - /* Then set up to vector to the trampoline with interrupts - * unchanged. We must already be in privileged thread mode - * to be here. - */ + /* Then set up to vector to the trampoline with interrupts + * unchanged. We must already be in privileged thread mode + * to be here. + */ - up_current_regs()[REG_PC] = (uint32_t)ceva_sigdeliver; + up_current_regs()[REG_PC] = (uint32_t)ceva_sigdeliver; #ifdef REG_OM - up_current_regs()[REG_OM] &= ~REG_OM_MASK; - up_current_regs()[REG_OM] |= REG_OM_KERNEL; -#endif - -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } + up_current_regs()[REG_OM] &= ~REG_OM_MASK; + up_current_regs()[REG_OM] |= REG_OM_KERNEL; #endif - } } + } - /* Otherwise, we are (1) signaling a task is not running from an - * interrupt handler or (2) we are not in an interrupt handler and the - * running task is signaling some other non-running task. - */ + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signaling some other non-running task. + */ - else - { - /* Save the current register context location */ + else + { + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be restored - * by the signal trampoline after the signal has been delivered. - */ + /* Duplicate the register context. These will be restored + * by the signal trampoline after the signal has been delivered. + */ - tcb->xcp.regs -= XCPTCONTEXT_REGS; - memcpy(tcb->xcp.regs, tcb->xcp.regs + - XCPTCONTEXT_REGS, XCPTCONTEXT_SIZE); + tcb->xcp.regs -= XCPTCONTEXT_REGS; + memcpy(tcb->xcp.regs, tcb->xcp.regs + + XCPTCONTEXT_REGS, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs; + tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs; - /* Then set up to vector to the trampoline with interrupts - * unchanged. We must already be in privileged thread mode to be - * here. - */ + /* Then set up to vector to the trampoline with interrupts + * unchanged. We must already be in privileged thread mode to be + * here. + */ - tcb->xcp.regs[REG_PC] = (uint32_t)ceva_sigdeliver; + tcb->xcp.regs[REG_PC] = (uint32_t)ceva_sigdeliver; #ifdef REG_OM - tcb->xcp.regs[REG_OM] &= ~REG_OM_MASK; - tcb->xcp.regs[REG_OM] |= REG_OM_KERNEL; + tcb->xcp.regs[REG_OM] &= ~REG_OM_MASK; + tcb->xcp.regs[REG_OM] |= REG_OM_KERNEL; #endif - } } } diff --git a/arch/ceva/src/common/ceva_sigdeliver.c b/arch/ceva/src/common/ceva_sigdeliver.c index 3bf25abdffafb..9d0deb4bf380e 100644 --- a/arch/ceva/src/common/ceva_sigdeliver.c +++ b/arch/ceva/src/common/ceva_sigdeliver.c @@ -62,16 +62,16 @@ void ceva_sigdeliver(void) int saved_errno = rtcb->pterrno; sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Get a local copy of the sigdeliver function pointer. We do this so that * we can nullify the sigdeliver function pointer in the TCB and accept * more signal deliveries while processing the current pending signals. */ - sigdeliver = (sig_deliver_t)rtcb->xcp.sigdeliver; - rtcb->xcp.sigdeliver = NULL; + sigdeliver = rtcb->sigdeliver; + rtcb->sigdeliver = NULL; /* Deliver the signal */ diff --git a/arch/mips/include/mips32/irq.h b/arch/mips/include/mips32/irq.h index 228f95a56585c..031ba4fe2b233 100644 --- a/arch/mips/include/mips32/irq.h +++ b/arch/mips/include/mips32/irq.h @@ -321,12 +321,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-NULL if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These additional register save locations are used to implement the * signal delivery trampoline. * diff --git a/arch/mips/src/mips32/mips_schedulesigaction.c b/arch/mips/src/mips32/mips_schedulesigaction.c index acbb8318fb586..32501b02eb4ae 100644 --- a/arch/mips/src/mips32/mips_schedulesigaction.c +++ b/arch/mips/src/mips32/mips_schedulesigaction.c @@ -76,88 +76,41 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { uint32_t status; - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - * - * Hmmm... there looks like a latent bug here: The following - * logic would fail in the strange case where we are in an - * interrupt handler, the thread is signalling itself, but - * a context switch to another task has occurred so that - * g_current_regs does not refer to the thread of this_task()! - */ + /* In this case just deliver the signal now. */ - else - { - /* Save the return EPC and STATUS registers. These will be - * restored by the signal trampoline after the signals have - * been delivered. - */ - - tcb->xcp.saved_epc = up_current_regs()[REG_EPC]; - tcb->xcp.saved_status = up_current_regs()[REG_STATUS]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_EPC] = (uint32_t)mips_sigdeliver; - status = up_current_regs()[REG_STATUS]; - status &= ~CP0_STATUS_INT_MASK; - status |= CP0_STATUS_INT_SW0; - up_current_regs()[REG_STATUS] = status; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ - - mips_savestate(tcb->xcp.regs); - - sinfo("PC/STATUS Saved: %08" PRIx32 "/%08" PRIx32 - " New: %08" PRIx32 "/%08" PRIx32 "\n", - tcb->xcp.saved_epc, tcb->xcp.saved_status, - up_current_regs()[REG_EPC], - up_current_regs()[REG_STATUS]); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following + * logic would fail in the strange case where we are in an + * interrupt handler, the thread is signalling itself, but + * a context switch to another task has occurred so that + * g_current_regs does not refer to the thread of this_task()! */ else @@ -167,23 +120,62 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) * been delivered. */ - tcb->xcp.saved_epc = tcb->xcp.regs[REG_EPC]; - tcb->xcp.saved_status = tcb->xcp.regs[REG_STATUS]; + tcb->xcp.saved_epc = up_current_regs()[REG_EPC]; + tcb->xcp.saved_status = up_current_regs()[REG_STATUS]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_EPC] = (uint32_t)mips_sigdeliver; - status = tcb->xcp.regs[REG_STATUS]; - status &= ~CP0_STATUS_INT_MASK; - status |= CP0_STATUS_INT_SW0; - tcb->xcp.regs[REG_STATUS] = status; + up_current_regs()[REG_EPC] = (uint32_t)mips_sigdeliver; + status = up_current_regs()[REG_STATUS]; + status &= ~CP0_STATUS_INT_MASK; + status |= CP0_STATUS_INT_SW0; + up_current_regs()[REG_STATUS] = status; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + mips_savestate(tcb->xcp.regs); sinfo("PC/STATUS Saved: %08" PRIx32 "/%08" PRIx32 " New: %08" PRIx32 "/%08" PRIx32 "\n", tcb->xcp.saved_epc, tcb->xcp.saved_status, - tcb->xcp.regs[REG_EPC], tcb->xcp.regs[REG_STATUS]); + up_current_regs()[REG_EPC], + up_current_regs()[REG_STATUS]); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return EPC and STATUS registers. These will be + * restored by the signal trampoline after the signals have + * been delivered. + */ + + tcb->xcp.saved_epc = tcb->xcp.regs[REG_EPC]; + tcb->xcp.saved_status = tcb->xcp.regs[REG_STATUS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_EPC] = (uint32_t)mips_sigdeliver; + status = tcb->xcp.regs[REG_STATUS]; + status &= ~CP0_STATUS_INT_MASK; + status |= CP0_STATUS_INT_SW0; + tcb->xcp.regs[REG_STATUS] = status; + + sinfo("PC/STATUS Saved: %08" PRIx32 "/%08" PRIx32 + " New: %08" PRIx32 "/%08" PRIx32 "\n", + tcb->xcp.saved_epc, tcb->xcp.saved_status, + tcb->xcp.regs[REG_EPC], tcb->xcp.regs[REG_STATUS]); + } } diff --git a/arch/mips/src/mips32/mips_sigdeliver.c b/arch/mips/src/mips32/mips_sigdeliver.c index ea810a56386f3..d942689c918c4 100644 --- a/arch/mips/src/mips32/mips_sigdeliver.c +++ b/arch/mips/src/mips32/mips_sigdeliver.c @@ -61,8 +61,8 @@ void mips_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -78,7 +78,7 @@ void mips_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -100,9 +100,9 @@ void mips_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_EPC] = rtcb->xcp.saved_epc; - regs[REG_STATUS] = rtcb->xcp.saved_status; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_EPC] = rtcb->xcp.saved_epc; + regs[REG_STATUS] = rtcb->xcp.saved_status; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of * execution. diff --git a/arch/misoc/include/lm32/irq.h b/arch/misoc/include/lm32/irq.h index 69bdd8a271ff0..799cba3d5da95 100644 --- a/arch/misoc/include/lm32/irq.h +++ b/arch/misoc/include/lm32/irq.h @@ -179,12 +179,6 @@ struct xcptcontext { - /* The following function pointer is non-NULL if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These additional register save locations are used to implement the * signal delivery trampoline. * diff --git a/arch/misoc/include/minerva/irq.h b/arch/misoc/include/minerva/irq.h index 4b81cec28141e..c34c49861ec31 100644 --- a/arch/misoc/include/minerva/irq.h +++ b/arch/misoc/include/minerva/irq.h @@ -261,12 +261,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-NULL if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These additional register save locations are used to implement the * signal delivery trampoline. */ diff --git a/arch/misoc/src/lm32/lm32_schedulesigaction.c b/arch/misoc/src/lm32/lm32_schedulesigaction.c index afc8edafe8d5a..437f180d758de 100644 --- a/arch/misoc/src/lm32/lm32_schedulesigaction.c +++ b/arch/misoc/src/lm32/lm32_schedulesigaction.c @@ -75,81 +75,39 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - * - * Hmmm... there looks like a latent bug here: The following - * logic would fail in the strange case where we are in an - * interrupt handler, the thread is signalling itself, but - * a context switch to another task has occurred so that - * g_current_regs does not refer to the thread of this_task()! - */ - - else - { - /* Save the return EPC and STATUS registers. These will be - * restored by the signal trampoline after the signals have - * been delivered. - */ + /* In this case just deliver the signal now. */ - tcb->xcp.saved_epc = up_current_regs()[REG_EPC]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_EPC] = (uint32_t)lm32_sigdeliver; - up_current_regs()[REG_INT_CTX] = 0; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ - - misoc_savestate(tcb->xcp.regs); - - sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n", - tcb->xcp.saved_epc, tcb->xcp.saved_status, - up_current_regs()[REG_EPC], - up_current_regs()[REG_STATUS]); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following + * logic would fail in the strange case where we are in an + * interrupt handler, the thread is signalling itself, but + * a context switch to another task has occurred so that + * g_current_regs does not refer to the thread of this_task()! */ else @@ -159,19 +117,53 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) * been delivered. */ - tcb->xcp.saved_epc = tcb->xcp.regs[REG_EPC]; - tcb->xcp.saved_int_ctx = tcb->xcp.regs[REG_INT_CTX]; + tcb->xcp.saved_epc = up_current_regs()[REG_EPC]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_EPC] = (uint32_t)lm32_sigdeliver; - tcb->xcp.regs[REG_INT_CTX] = 0; + up_current_regs()[REG_EPC] = (uint32_t)lm32_sigdeliver; + up_current_regs()[REG_INT_CTX] = 0; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + misoc_savestate(tcb->xcp.regs); sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n", tcb->xcp.saved_epc, tcb->xcp.saved_status, - tcb->xcp.regs[REG_EPC], tcb->xcp.regs[REG_STATUS]); + up_current_regs()[REG_EPC], + up_current_regs()[REG_STATUS]); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return EPC and STATUS registers. These will be + * restored by the signal trampoline after the signals have + * been delivered. + */ + + tcb->xcp.saved_epc = tcb->xcp.regs[REG_EPC]; + tcb->xcp.saved_int_ctx = tcb->xcp.regs[REG_INT_CTX]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_EPC] = (uint32_t)lm32_sigdeliver; + tcb->xcp.regs[REG_INT_CTX] = 0; + + sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n", + tcb->xcp.saved_epc, tcb->xcp.saved_status, + tcb->xcp.regs[REG_EPC], tcb->xcp.regs[REG_STATUS]); + } } diff --git a/arch/misoc/src/lm32/lm32_sigdeliver.c b/arch/misoc/src/lm32/lm32_sigdeliver.c index 4adc84955909f..081f40d6e3739 100644 --- a/arch/misoc/src/lm32/lm32_sigdeliver.c +++ b/arch/misoc/src/lm32/lm32_sigdeliver.c @@ -60,8 +60,8 @@ void lm32_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -77,7 +77,7 @@ void lm32_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -99,9 +99,9 @@ void lm32_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_EPC] = rtcb->xcp.saved_epc; - regs[REG_INT_CTX] = rtcb->xcp.saved_int_ctx; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_EPC] = rtcb->xcp.saved_epc; + regs[REG_INT_CTX] = rtcb->xcp.saved_int_ctx; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of * execution. diff --git a/arch/misoc/src/minerva/minerva_schedulesigaction.c b/arch/misoc/src/minerva/minerva_schedulesigaction.c index e0f2509aff032..9d8487dc689e0 100644 --- a/arch/misoc/src/minerva/minerva_schedulesigaction.c +++ b/arch/misoc/src/minerva/minerva_schedulesigaction.c @@ -76,103 +76,95 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and a task is - * signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the interrupted task - * is the same as the one that must receive the signal, then we - * will have to modify the return state as well as the state in - * the TCB. - * - * Hmmm... there looks like a latent bug here: The following logic - * would fail in the strange case where we are in an interrupt - * handler, the thread is signalling itself, but a context switch - * to another task has occurred so that g_current_regs does not - * refer to the thread of this_task()! - */ - - else - { - /* Save the return EPC and STATUS registers. These will be - * restored by the signal trampoline after the signals have - * been delivered. - */ + /* In this case just deliver the signal now. */ - tcb->xcp.saved_epc = up_current_regs()[REG_CSR_MEPC]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_CSR_MEPC] = - (uint32_t)minerva_sigdeliver; - up_current_regs()[REG_CSR_MSTATUS] &= ~CSR_MSTATUS_MIE; - - /* And make sure that the saved context in the TCB is the same - * as the interrupt return context. - */ - - misoc_savestate(tcb->xcp.regs); - - sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n", - tcb->xcp.saved_epc, tcb->xcp.saved_status, - up_current_regs()[REG_CSR_MEPC], - up_current_regs()[REG_CSR_MSTATUS]); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running from an - * interrupt handler or (2) we are not in an interrupt handler and the - * running task is signalling some non-running task. + /* CASE 2: We are in an interrupt handler AND the interrupted task + * is the same as the one that must receive the signal, then we + * will have to modify the return state as well as the state in + * the TCB. + * + * Hmmm... there looks like a latent bug here: The following logic + * would fail in the strange case where we are in an interrupt + * handler, the thread is signalling itself, but a context switch + * to another task has occurred so that g_current_regs does not + * refer to the thread of this_task()! */ else { /* Save the return EPC and STATUS registers. These will be - * restored by the signal trampoline after the signals have been - * delivered. + * restored by the signal trampoline after the signals have + * been delivered. */ - tcb->xcp.saved_epc = tcb->xcp.regs[REG_CSR_MEPC]; - tcb->xcp.saved_int_ctx = tcb->xcp.regs[REG_CSR_MSTATUS]; + tcb->xcp.saved_epc = up_current_regs()[REG_CSR_MEPC]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_CSR_MEPC] = (uint32_t) minerva_sigdeliver; + up_current_regs()[REG_CSR_MEPC] = + (uint32_t)minerva_sigdeliver; up_current_regs()[REG_CSR_MSTATUS] &= ~CSR_MSTATUS_MIE; + /* And make sure that the saved context in the TCB is the same + * as the interrupt return context. + */ + + misoc_savestate(tcb->xcp.regs); + sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n", tcb->xcp.saved_epc, tcb->xcp.saved_status, - tcb->xcp.regs[REG_CSR_MEPC], tcb->xcp.regs[REG_CSR_MSTATUS]); + up_current_regs()[REG_CSR_MEPC], + up_current_regs()[REG_CSR_MSTATUS]); } } + + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signalling some non-running task. + */ + + else + { + /* Save the return EPC and STATUS registers. These will be + * restored by the signal trampoline after the signals have been + * delivered. + */ + + tcb->xcp.saved_epc = tcb->xcp.regs[REG_CSR_MEPC]; + tcb->xcp.saved_int_ctx = tcb->xcp.regs[REG_CSR_MSTATUS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_CSR_MEPC] = (uint32_t) minerva_sigdeliver; + up_current_regs()[REG_CSR_MSTATUS] &= ~CSR_MSTATUS_MIE; + + sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n", + tcb->xcp.saved_epc, tcb->xcp.saved_status, + tcb->xcp.regs[REG_CSR_MEPC], tcb->xcp.regs[REG_CSR_MSTATUS]); + } } diff --git a/arch/misoc/src/minerva/minerva_sigdeliver.c b/arch/misoc/src/minerva/minerva_sigdeliver.c index ac2603f5046e3..306a1a77dfd61 100644 --- a/arch/misoc/src/minerva/minerva_sigdeliver.c +++ b/arch/misoc/src/minerva/minerva_sigdeliver.c @@ -62,8 +62,8 @@ void minerva_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the real return state on the stack. */ @@ -76,8 +76,8 @@ void minerva_sigdeliver(void) * more signal deliveries while processing the current pending signals. */ - sigdeliver = rtcb->xcp.sigdeliver; - rtcb->xcp.sigdeliver = NULL; + sigdeliver = rtcb->sigdeliver; + rtcb->sigdeliver = NULL; # ifndef CONFIG_SUPPRESS_INTERRUPTS /* Then make sure that interrupts are enabled. Signal handlers must always diff --git a/arch/or1k/include/mor1kx/irq.h b/arch/or1k/include/mor1kx/irq.h index ba1e5f736bd68..f45e5949ed287 100644 --- a/arch/or1k/include/mor1kx/irq.h +++ b/arch/or1k/include/mor1kx/irq.h @@ -170,12 +170,6 @@ struct xcptcontext uint32_t regs[XCPTCONTEXT_REGS]; - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of LR and CPSR used during * signal processing. * diff --git a/arch/or1k/src/common/or1k_schedulesigaction.c b/arch/or1k/src/common/or1k_schedulesigaction.c index 9ee20473d89af..c7f56ede5b856 100644 --- a/arch/or1k/src/common/or1k_schedulesigaction.c +++ b/arch/or1k/src/common/or1k_schedulesigaction.c @@ -74,80 +74,39 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - * - * Hmmm... there looks like a latent bug here: The following - * logic would fail in the strange case where we are in an - * interrupt handler, the thread is signalling itself, but - * a context switch to another task has occurred so that - * current_regs does not refer to the thread of this_task()! - */ - - else - { - /* Save the return lr and cpsr and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. - */ - - /* tcb->xcp.saved_pc = up_current_regs()[REG_PC]; - * tcb->xcp.saved_cpsr = up_current_regs()[REG_CPSR]; - */ + /* In this case just deliver the signal now. */ - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - /* up_current_regs()[REG_PC] = (uint32_t)or1k_sigdeliver; - * up_current_regs()[REG_CPSR] = SVC_MODE | PSR_I_BIT | - * PSR_F_BIT; - */ - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ - - or1k_savestate(tcb->xcp.regs); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following + * logic would fail in the strange case where we are in an + * interrupt handler, the thread is signalling itself, but + * a context switch to another task has occurred so that + * current_regs does not refer to the thread of this_task()! */ else @@ -157,17 +116,50 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) * the signals have been delivered. */ - tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; - - /* tcb->xcp.saved_cpsr = tcb->xcp.regs[REG_CPSR]; */ + /* tcb->xcp.saved_pc = up_current_regs()[REG_PC]; + * tcb->xcp.saved_cpsr = up_current_regs()[REG_CPSR]; + */ /* Then set up to vector to the trampoline with interrupts * disabled */ - /* tcb->xcp.regs[REG_PC] = (uint32_t)or1k_sigdeliver; - * tcb->xcp.regs[REG_CPSR] = SVC_MODE | PSR_I_BIT | PSR_F_BIT; + /* up_current_regs()[REG_PC] = (uint32_t)or1k_sigdeliver; + * up_current_regs()[REG_CPSR] = SVC_MODE | PSR_I_BIT | + * PSR_F_BIT; + */ + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. */ + + or1k_savestate(tcb->xcp.regs); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; + + /* tcb->xcp.saved_cpsr = tcb->xcp.regs[REG_CPSR]; */ + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + /* tcb->xcp.regs[REG_PC] = (uint32_t)or1k_sigdeliver; + * tcb->xcp.regs[REG_CPSR] = SVC_MODE | PSR_I_BIT | PSR_F_BIT; + */ + } } diff --git a/arch/renesas/include/m16c/irq.h b/arch/renesas/include/m16c/irq.h index 5b66067746ff9..f06aae80d8bb0 100644 --- a/arch/renesas/include/m16c/irq.h +++ b/arch/renesas/include/m16c/irq.h @@ -228,12 +228,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of LR and SR used during signal processing. * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/renesas/include/rx65n/irq.h b/arch/renesas/include/rx65n/irq.h index 649af4ea4355e..42c5f62dd08c0 100644 --- a/arch/renesas/include/rx65n/irq.h +++ b/arch/renesas/include/rx65n/irq.h @@ -986,12 +986,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of LR and SR used during signal processing. */ uint32_t saved_pc; diff --git a/arch/renesas/include/sh1/irq.h b/arch/renesas/include/sh1/irq.h index 6ce374cf68078..2f45b100bf8c7 100644 --- a/arch/renesas/include/sh1/irq.h +++ b/arch/renesas/include/sh1/irq.h @@ -449,12 +449,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of LR and SR used during signal processing. * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/renesas/src/m16c/m16c_schedulesigaction.c b/arch/renesas/src/m16c/m16c_schedulesigaction.c index 8217b0e7d466c..58a5fe23e7cb8 100644 --- a/arch/renesas/src/m16c/m16c_schedulesigaction.c +++ b/arch/renesas/src/m16c/m16c_schedulesigaction.c @@ -74,73 +74,33 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - */ - - else - { - /* Save the return PC and SR and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. - */ - - tcb->xcp.saved_pc[0] = up_current_regs()[REG_PC]; - tcb->xcp.saved_pc[1] = up_current_regs()[REG_PC + 1]; - tcb->xcp.saved_flg = up_current_regs()[REG_FLG]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_PC] = (uint32_t)renesas_sigdeliver >> 8; - up_current_regs()[REG_PC + 1] = (uint32_t)renesas_sigdeliver; - up_current_regs()[REG_FLG] &= ~M16C_FLG_I; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* In this case just deliver the signal now. */ - renesas_copystate(tcb->xcp.regs, up_current_regs()); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. */ else @@ -150,17 +110,49 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) * the signals have been delivered. */ - tcb->xcp.saved_pc[0] = tcb->xcp.regs[REG_PC]; - tcb->xcp.saved_pc[1] = tcb->xcp.regs[REG_PC + 1]; - tcb->xcp.saved_flg = tcb->xcp.regs[REG_FLG]; + tcb->xcp.saved_pc[0] = up_current_regs()[REG_PC]; + tcb->xcp.saved_pc[1] = up_current_regs()[REG_PC + 1]; + tcb->xcp.saved_flg = up_current_regs()[REG_FLG]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_PC] = (uint32_t)renesas_sigdeliver >> 8; - tcb->xcp.regs[REG_PC + 1] = (uint32_t)renesas_sigdeliver; - tcb->xcp.regs[REG_FLG] &= ~M16C_FLG_I; + up_current_regs()[REG_PC] = (uint32_t)renesas_sigdeliver >> 8; + up_current_regs()[REG_PC + 1] = (uint32_t)renesas_sigdeliver; + up_current_regs()[REG_FLG] &= ~M16C_FLG_I; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + renesas_copystate(tcb->xcp.regs, up_current_regs()); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return PC and SR and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_pc[0] = tcb->xcp.regs[REG_PC]; + tcb->xcp.saved_pc[1] = tcb->xcp.regs[REG_PC + 1]; + tcb->xcp.saved_flg = tcb->xcp.regs[REG_FLG]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)renesas_sigdeliver >> 8; + tcb->xcp.regs[REG_PC + 1] = (uint32_t)renesas_sigdeliver; + tcb->xcp.regs[REG_FLG] &= ~M16C_FLG_I; + } } diff --git a/arch/renesas/src/m16c/m16c_sigdeliver.c b/arch/renesas/src/m16c/m16c_sigdeliver.c index 52cd6703af367..9fe660a59e9c7 100644 --- a/arch/renesas/src/m16c/m16c_sigdeliver.c +++ b/arch/renesas/src/m16c/m16c_sigdeliver.c @@ -58,8 +58,8 @@ void renesas_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -75,7 +75,7 @@ void renesas_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)sig_rtcb->xcp.sigdeliver)(rtcb); + (sig_rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -95,10 +95,10 @@ void renesas_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_PC] = rtcb->xcp.saved_pc[0]; - regs[REG_PC + 1] = rtcb->xcp.saved_pc[1]; - regs[REG_FLG] = rtcb->xcp.saved_flg; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_PC] = rtcb->xcp.saved_pc[0]; + regs[REG_PC + 1] = rtcb->xcp.saved_pc[1]; + regs[REG_FLG] = rtcb->xcp.saved_flg; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of * execution. diff --git a/arch/renesas/src/rx65n/rx65n_schedulesigaction.c b/arch/renesas/src/rx65n/rx65n_schedulesigaction.c index 7798d47258d95..bdbe03cb96a17 100644 --- a/arch/renesas/src/rx65n/rx65n_schedulesigaction.c +++ b/arch/renesas/src/rx65n/rx65n_schedulesigaction.c @@ -74,71 +74,33 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - */ - - else - { - /* Save the return PC and SR and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. - */ - - tcb->xcp.saved_pc = up_current_regs()[REG_PC]; - tcb->xcp.saved_sr = up_current_regs()[REG_PSW]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_PC] = (uint32_t)renesas_sigdeliver; - up_current_regs()[REG_PSW] |= 0x00030000; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* In this case just deliver the signal now. */ - renesas_copystate(tcb->xcp.regs, up_current_regs()); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. */ else @@ -148,15 +110,45 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) * the signals have been delivered. */ - tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; - tcb->xcp.saved_sr = tcb->xcp.regs[REG_PSW]; + tcb->xcp.saved_pc = up_current_regs()[REG_PC]; + tcb->xcp.saved_sr = up_current_regs()[REG_PSW]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_PC] = (uint32_t)renesas_sigdeliver; - tcb->xcp.regs[REG_PSW] |= 0x00030000; + up_current_regs()[REG_PC] = (uint32_t)renesas_sigdeliver; + up_current_regs()[REG_PSW] |= 0x00030000; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + renesas_copystate(tcb->xcp.regs, up_current_regs()); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return PC and SR and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; + tcb->xcp.saved_sr = tcb->xcp.regs[REG_PSW]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)renesas_sigdeliver; + tcb->xcp.regs[REG_PSW] |= 0x00030000; + } } diff --git a/arch/renesas/src/rx65n/rx65n_sigdeliver.c b/arch/renesas/src/rx65n/rx65n_sigdeliver.c index 5c8d50b1092eb..d18f632f461fd 100644 --- a/arch/renesas/src/rx65n/rx65n_sigdeliver.c +++ b/arch/renesas/src/rx65n/rx65n_sigdeliver.c @@ -60,8 +60,8 @@ void renesas_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the real return state on the stack. */ @@ -75,8 +75,8 @@ void renesas_sigdeliver(void) * signals. */ - sigdeliver = rtcb->xcp.sigdeliver; - rtcb->xcp.sigdeliver = NULL; + sigdeliver = rtcb->sigdeliver; + rtcb->sigdeliver = NULL; #ifndef CONFIG_SUPPRESS_INTERRUPTS /* Then make sure that interrupts are enabled. Signal handlers must always diff --git a/arch/renesas/src/sh1/sh1_schedulesigaction.c b/arch/renesas/src/sh1/sh1_schedulesigaction.c index 8c020741f3052..c5271a566ed33 100644 --- a/arch/renesas/src/sh1/sh1_schedulesigaction.c +++ b/arch/renesas/src/sh1/sh1_schedulesigaction.c @@ -74,71 +74,33 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - */ - - else - { - /* Save the return PC and SR and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. - */ - - tcb->xcp.saved_pc = up_current_regs()[REG_PC]; - tcb->xcp.saved_sr = up_current_regs()[REG_SR]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_PC] = (uint32_t)renesas_sigdeliver; - up_current_regs()[REG_SR] |= 0x000000f0; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* In this case just deliver the signal now. */ - renesas_copystate(tcb->xcp.regs, up_current_regs()); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. */ else @@ -148,15 +110,45 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) * the signals have been delivered. */ - tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; - tcb->xcp.saved_sr = tcb->xcp.regs[REG_SR]; + tcb->xcp.saved_pc = up_current_regs()[REG_PC]; + tcb->xcp.saved_sr = up_current_regs()[REG_SR]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_PC] = (uint32_t)renesas_sigdeliver; - tcb->xcp.regs[REG_SR] |= 0x000000f0 ; + up_current_regs()[REG_PC] = (uint32_t)renesas_sigdeliver; + up_current_regs()[REG_SR] |= 0x000000f0; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + renesas_copystate(tcb->xcp.regs, up_current_regs()); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return PC and SR and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; + tcb->xcp.saved_sr = tcb->xcp.regs[REG_SR]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)renesas_sigdeliver; + tcb->xcp.regs[REG_SR] |= 0x000000f0 ; + } } diff --git a/arch/renesas/src/sh1/sh1_sigdeliver.c b/arch/renesas/src/sh1/sh1_sigdeliver.c index d32166906c4be..48b2cc7f5d8ee 100644 --- a/arch/renesas/src/sh1/sh1_sigdeliver.c +++ b/arch/renesas/src/sh1/sh1_sigdeliver.c @@ -58,8 +58,8 @@ void renesas_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -75,7 +75,7 @@ void renesas_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -95,9 +95,9 @@ void renesas_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_PC] = rtcb->xcp.saved_pc; - regs[REG_SR] = rtcb->xcp.saved_sr; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_PC] = rtcb->xcp.saved_pc; + regs[REG_SR] = rtcb->xcp.saved_sr; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/risc-v/include/irq.h b/arch/risc-v/include/irq.h index cd2665cd88631..ea47dbe31e5cd 100644 --- a/arch/risc-v/include/irq.h +++ b/arch/risc-v/include/irq.h @@ -567,12 +567,6 @@ struct xcptcontext { - /* The following function pointer is non-NULL if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These additional register save locations are used to implement the * signal delivery trampoline. * diff --git a/arch/risc-v/src/common/CMakeLists.txt b/arch/risc-v/src/common/CMakeLists.txt index 1a41dc1f30c01..9340255205162 100644 --- a/arch/risc-v/src/common/CMakeLists.txt +++ b/arch/risc-v/src/common/CMakeLists.txt @@ -41,7 +41,7 @@ if(NOT CONFIG_ALARM_ARCH) endif() if(CONFIG_SMP) - list(APPEND SRCS riscv_cpuindex.c riscv_cpupause.c riscv_cpustart.c) + list(APPEND SRCS riscv_cpuindex.c riscv_smpcall.c riscv_cpustart.c) endif() if(CONFIG_RISCV_MISALIGNED_HANDLER) diff --git a/arch/risc-v/src/common/Make.defs b/arch/risc-v/src/common/Make.defs index 61cae3bbcb998..4a66e52682b48 100644 --- a/arch/risc-v/src/common/Make.defs +++ b/arch/risc-v/src/common/Make.defs @@ -43,7 +43,7 @@ ifneq ($(CONFIG_ALARM_ARCH),y) endif ifeq ($(CONFIG_SMP),y) -CMN_CSRCS += riscv_cpuindex.c riscv_cpupause.c riscv_cpustart.c +CMN_CSRCS += riscv_cpuindex.c riscv_smpcall.c riscv_cpustart.c endif ifeq ($(CONFIG_RISCV_MISALIGNED_HANDLER),y) diff --git a/arch/risc-v/src/common/riscv_cpupause.c b/arch/risc-v/src/common/riscv_cpupause.c deleted file mode 100644 index 148706ca449da..0000000000000 --- a/arch/risc-v/src/common/riscv_cpupause.c +++ /dev/null @@ -1,438 +0,0 @@ -/**************************************************************************** - * arch/risc-v/src/common/riscv_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "sched/sched.h" -#include "riscv_internal.h" -#include "riscv_ipi.h" -#include "chip.h" - -/**************************************************************************** - * Public Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - /* Save the current context at current_regs into the TCB at the head - * of the assigned task list for this CPU. - */ - - riscv_savecontext(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Wait for the spinlock to be released */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - /* Then switch contexts. Any necessary address environment changes - * will be made when the interrupt returns. - */ - - riscv_restorecontext(tcb); - - return OK; -} - -/**************************************************************************** - * Name: riscv_pause_handler - * - * Description: - * Inter-CPU interrupt handler - * - * Input Parameters: - * Standard interrupt handler inputs - * - * Returned Value: - * Should always return OK - * - ****************************************************************************/ - -int riscv_pause_handler(int irq, void *c, void *arg) -{ - struct tcb_s *tcb; - int cpu = this_cpu(); - - nxsched_smp_call_handler(irq, c, arg); - - /* Clear IPI (Inter-Processor-Interrupt) */ - - riscv_ipi_clear(cpu); - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - tcb = current_task(cpu); - riscv_savecontext(tcb); - nxsched_process_delivered(cpu); - tcb = current_task(cpu); - riscv_restorecontext(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - /* Execute Pause IRQ to CPU(cpu) */ - - riscv_ipi_send(cpu); - - return OK; -} - -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - int cpu; - - for (; cpuset != 0; cpuset &= ~(1 << cpu)) - { - cpu = ffs(cpuset) - 1; - riscv_ipi_send(cpu); - } -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped/ - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - sinfo("cpu=%d\n", cpu); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2 - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - /* Execute Pause IRQ to CPU(cpu) */ - - riscv_ipi_send(cpu); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - sinfo("cpu=%d\n", cpu); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} diff --git a/arch/risc-v/src/common/riscv_exception.c b/arch/risc-v/src/common/riscv_exception.c index e77a3c8ee8bb3..1995f12ada22b 100644 --- a/arch/risc-v/src/common/riscv_exception.c +++ b/arch/risc-v/src/common/riscv_exception.c @@ -301,7 +301,7 @@ void riscv_exception_attach(void) irq_attach(RISCV_IRQ_RESERVED14, riscv_exception, NULL); #ifdef CONFIG_SMP - irq_attach(RISCV_IRQ_SOFT, riscv_pause_handler, NULL); + irq_attach(RISCV_IRQ_SOFT, riscv_smp_call_handler, NULL); #else irq_attach(RISCV_IRQ_SOFT, riscv_exception, NULL); #endif diff --git a/arch/risc-v/src/common/riscv_internal.h b/arch/risc-v/src/common/riscv_internal.h index d9165356a97c1..6e69c9550b1ac 100644 --- a/arch/risc-v/src/common/riscv_internal.h +++ b/arch/risc-v/src/common/riscv_internal.h @@ -412,7 +412,7 @@ void riscv_stack_color(void *stackbase, size_t nbytes); #ifdef CONFIG_SMP void riscv_cpu_boot(int cpu); -int riscv_pause_handler(int irq, void *c, void *arg); +int riscv_smp_call_handler(int irq, void *c, void *arg); #endif /**************************************************************************** diff --git a/arch/risc-v/src/common/riscv_schedulesigaction.c b/arch/risc-v/src/common/riscv_schedulesigaction.c index d886443d9ee0e..076d11f1fb1ec 100644 --- a/arch/risc-v/src/common/riscv_schedulesigaction.c +++ b/arch/risc-v/src/common/riscv_schedulesigaction.c @@ -76,89 +76,63 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { uintptr_t int_ctx; - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on any CPU. + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! */ - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handler will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else - { -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the return EPC and STATUS registers. These will be - * by the signal trampoline after the signal has been delivered. - */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the return EPC and STATUS registers. These will be + * by the signal trampoline after the signal has been delivered. + */ - /* Save the current register context location */ + /* Save the current register context location */ - tcb->xcp.saved_regs = tcb->xcp.regs; + tcb->xcp.saved_regs = tcb->xcp.regs; - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (uintreg_t *) - ((uintptr_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); + tcb->xcp.regs = (uintreg_t *) + ((uintptr_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_SP] = (uintptr_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_SP] = (uintptr_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled. We must already be in privileged thread mode to be - * here. - */ + /* Then set up to vector to the trampoline with interrupts + * disabled. We must already be in privileged thread mode to be + * here. + */ - tcb->xcp.regs[REG_EPC] = (uintptr_t)riscv_sigdeliver; + tcb->xcp.regs[REG_EPC] = (uintptr_t)riscv_sigdeliver; - int_ctx = tcb->xcp.regs[REG_INT_CTX]; - int_ctx &= ~STATUS_PIE; + int_ctx = tcb->xcp.regs[REG_INT_CTX]; + int_ctx &= ~STATUS_PIE; #ifndef CONFIG_BUILD_FLAT - int_ctx |= STATUS_PPP; + int_ctx |= STATUS_PPP; #endif - tcb->xcp.regs[REG_INT_CTX] = int_ctx; -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } -#endif - } + tcb->xcp.regs[REG_INT_CTX] = int_ctx; } } diff --git a/arch/risc-v/src/common/riscv_sigdeliver.c b/arch/risc-v/src/common/riscv_sigdeliver.c index a9d63a66b3cd0..3d8e8f8841531 100644 --- a/arch/risc-v/src/common/riscv_sigdeliver.c +++ b/arch/risc-v/src/common/riscv_sigdeliver.c @@ -69,8 +69,8 @@ void riscv_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -102,7 +102,7 @@ void riscv_sigdeliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -147,7 +147,7 @@ void riscv_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of * execution. diff --git a/arch/risc-v/src/common/riscv_smpcall.c b/arch/risc-v/src/common/riscv_smpcall.c new file mode 100644 index 0000000000000..46b5f17bda7b7 --- /dev/null +++ b/arch/risc-v/src/common/riscv_smpcall.c @@ -0,0 +1,129 @@ +/**************************************************************************** + * arch/risc-v/src/common/riscv_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sched/sched.h" +#include "riscv_internal.h" +#include "riscv_ipi.h" +#include "chip.h" + +/**************************************************************************** + * Public Data + ****************************************************************************/ + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: riscv_smp_call_handler + * + * Description: + * This is the handler for SMP_CALL. + * + ****************************************************************************/ + +int riscv_smp_call_handler(int irq, void *c, void *arg) +{ + struct tcb_s *tcb; + int cpu = this_cpu(); + + nxsched_smp_call_handler(irq, c, arg); + + /* Clear IPI (Inter-Processor-Interrupt) */ + + riscv_ipi_clear(cpu); + + tcb = current_task(cpu); + riscv_savecontext(tcb); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + riscv_restorecontext(tcb); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + /* Execute Pause IRQ to CPU(cpu) */ + + riscv_ipi_send(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + int cpu; + + for (; cpuset != 0; cpuset &= ~(1 << cpu)) + { + cpu = ffs(cpuset) - 1; + up_send_smp_sched(cpu); + } +} diff --git a/arch/risc-v/src/jh7110/jh7110_timerisr.c b/arch/risc-v/src/jh7110/jh7110_timerisr.c index 920361fff6161..8277ffcc4ba4b 100644 --- a/arch/risc-v/src/jh7110/jh7110_timerisr.c +++ b/arch/risc-v/src/jh7110/jh7110_timerisr.c @@ -79,7 +79,7 @@ static int jh7110_ssoft_interrupt(int irq, void *context, void *arg) { /* We assume IPI has been issued */ - riscv_pause_handler(irq, context, arg); + riscv_smp_call_handler(irq, context, arg); } #endif diff --git a/arch/sim/include/irq.h b/arch/sim/include/irq.h index 1b2d31b024287..af25d57f5d53a 100644 --- a/arch/sim/include/irq.h +++ b/arch/sim/include/irq.h @@ -51,7 +51,6 @@ struct xcptcontext { - void *sigdeliver; /* Actual type is sig_deliver_t */ jmp_buf regs; }; diff --git a/arch/sim/src/sim/posix/sim_hostsmp.c b/arch/sim/src/sim/posix/sim_hostsmp.c index f338c5b38ac75..beed1dd332cd5 100644 --- a/arch/sim/src/sim/posix/sim_hostsmp.c +++ b/arch/sim/src/sim/posix/sim_hostsmp.c @@ -106,6 +106,8 @@ static void *sim_idle_trampoline(void *arg) host_cpu_started(); + sim_unlock(); + /* The idle Loop */ for (; ; ) diff --git a/arch/sim/src/sim/sim_doirq.c b/arch/sim/src/sim/sim_doirq.c index 62fba5aaea44e..5adcf15cdfadc 100644 --- a/arch/sim/src/sim/sim_doirq.c +++ b/arch/sim/src/sim/sim_doirq.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "sim_internal.h" @@ -34,6 +35,15 @@ * Public Functions ****************************************************************************/ +void sim_unlock(void) +{ + /* wait until cpu0 in idle() */ + + while (!OSINIT_IDLELOOP()); + + sched_unlock(); +} + /**************************************************************************** * Name: sim_doirq ****************************************************************************/ diff --git a/arch/sim/src/sim/sim_internal.h b/arch/sim/src/sim/sim_internal.h index 5660d9d129a80..1c35be0d965f7 100644 --- a/arch/sim/src/sim/sim_internal.h +++ b/arch/sim/src/sim/sim_internal.h @@ -206,6 +206,7 @@ extern char **g_argv; void sim_copyfullstate(xcpt_reg_t *dest, xcpt_reg_t *src); void *sim_doirq(int irq, void *regs); +void sim_unlock(void); /* sim_hostmisc.c ***********************************************************/ diff --git a/arch/sim/src/sim/sim_schedulesigaction.c b/arch/sim/src/sim/sim_schedulesigaction.c index 4e6ab46831f27..6bc8aa535b81d 100644 --- a/arch/sim/src/sim/sim_schedulesigaction.c +++ b/arch/sim/src/sim/sim_schedulesigaction.c @@ -71,27 +71,15 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - irqstate_t flags; - /* We don't have to anything complex for the simulated target */ - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); - - /* Make sure that interrupts are disabled */ + sinfo("tcb=%p\n", tcb); - flags = enter_critical_section(); - - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - if (tcb == this_task()) - { - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - - leave_critical_section(flags); } diff --git a/arch/sim/src/sim/sim_sigdeliver.c b/arch/sim/src/sim/sim_sigdeliver.c index eca8c3bf91045..238599c757066 100644 --- a/arch/sim/src/sim/sim_sigdeliver.c +++ b/arch/sim/src/sim/sim_sigdeliver.c @@ -61,7 +61,7 @@ void sim_sigdeliver(void) int16_t saved_irqcount; irqstate_t flags; #endif - if (NULL == (rtcb->xcp.sigdeliver)) + if (NULL == (rtcb->sigdeliver)) { return; } @@ -75,8 +75,8 @@ void sim_sigdeliver(void) #endif sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* NOTE: we do not save the return state for sim */ @@ -103,7 +103,7 @@ void sim_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -135,7 +135,7 @@ void sim_sigdeliver(void) /* Allows next handler to be scheduled */ - rtcb->xcp.sigdeliver = NULL; + rtcb->sigdeliver = NULL; /* NOTE: we leave a critical section here for sim */ diff --git a/arch/sim/src/sim/sim_smpsignal.c b/arch/sim/src/sim/sim_smpsignal.c index 140ea6f879e66..f535767aa83c8 100644 --- a/arch/sim/src/sim/sim_smpsignal.c +++ b/arch/sim/src/sim/sim_smpsignal.c @@ -33,77 +33,26 @@ #include "sched/sched.h" #include "sim_internal.h" -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** - * Name: sim_cpupause_handler + * Name: sim_smp_call_handler * * Description: - * This is the SIGUSR signal handler. It implements the core logic of - * up_cpu_pause() on the thread of execution the simulated CPU. - * - * Input Parameters: - * irq - the interrupt number - * context - not used - * arg - not used - * - * Returned Value: - * In case of success OK (0) is returned otherwise a negative value. + * This is the handler for SMP_CALL. * ****************************************************************************/ -static int sim_cpupause_handler(int irq, void *context, void *arg) +static int sim_smp_call_handler(int irq, void *context, void *arg) { struct tcb_s *tcb; int cpu = this_cpu(); - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused(). If the pause event has already - * been processed then g_cpu_paused[cpu] will not be locked. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - tcb = current_task(cpu); sim_savestate(tcb->xcp.regs); - nxsched_process_delivered(cpu); + nxsched_smp_call_handler(irq, context, arg); tcb = current_task(cpu); sim_restorestate(tcb->xcp.regs); @@ -111,155 +60,31 @@ static int sim_cpupause_handler(int irq, void *context, void *arg) } /**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq + * Name: sim_smp_sched_handler * * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. + * This is the handler for smp. * ****************************************************************************/ -bool up_cpu_pausereq(int cpu) +static int sim_smp_sched_handler(int irq, void *context, void *arg) { - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - /* Save the current context at current_regs into the TCB at the head - * of the assigned task list for this CPU. - */ + struct tcb_s *tcb; + int cpu = this_cpu(); + tcb = current_task(cpu); sim_savestate(tcb->xcp.regs); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + sim_restorestate(tcb->xcp.regs); return OK; } /**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Wait for the spinlock to be released */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * + * Public Functions ****************************************************************************/ -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - /* Then switch contexts. Any necessary address environment changes - * will be made when the interrupt returns. - */ - - sim_restorestate(tcb->xcp.regs); - - return OK; -} - /**************************************************************************** * Name: host_cpu_started * @@ -339,11 +164,11 @@ int up_cpu_start(int cpu) int sim_init_ipi(int irq) { up_enable_irq(irq); - return irq_attach(irq, sim_cpupause_handler, NULL); + return irq_attach(irq, sim_smp_sched_handler, NULL); } /**************************************************************************** - * Name: up_cpu_pause_async + * Name: up_send_smp_sched * * Description: * pause task execution on the CPU @@ -361,7 +186,7 @@ int sim_init_ipi(int irq) * ****************************************************************************/ -inline_function int up_cpu_pause_async(int cpu) +int up_send_smp_sched(int cpu) { /* Generate IRQ for CPU(cpu) */ @@ -370,115 +195,6 @@ inline_function int up_cpu_pause_async(int cpu) return OK; } -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - up_cpu_pause_async(cpu); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -#ifdef CONFIG_SMP - /**************************************************************************** * Name: sim_init_func_call_ipi * @@ -495,7 +211,7 @@ int up_cpu_resume(int cpu) int sim_init_func_call_ipi(int irq) { up_enable_irq(irq); - return irq_attach(irq, nxsched_smp_call_handler, NULL); + return irq_attach(irq, sim_smp_call_handler, NULL); } /**************************************************************************** @@ -516,4 +232,3 @@ void up_send_smp_call(cpu_set_t cpuset) host_send_func_call_ipi(cpu); } } -#endif diff --git a/arch/sparc/include/sparc_v8/irq.h b/arch/sparc/include/sparc_v8/irq.h index fa5c77b362ed1..527f756a7c47c 100644 --- a/arch/sparc/include/sparc_v8/irq.h +++ b/arch/sparc/include/sparc_v8/irq.h @@ -427,12 +427,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-NULL if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These additional register save locations are used to implement the * signal delivery trampoline. * diff --git a/arch/sparc/src/s698pm/Make.defs b/arch/sparc/src/s698pm/Make.defs index c3ce0323c8b6d..ff1935e7ad6b8 100644 --- a/arch/sparc/src/s698pm/Make.defs +++ b/arch/sparc/src/s698pm/Make.defs @@ -54,5 +54,5 @@ endif # Configuration-dependent files ifeq ($(CONFIG_SMP),y) -CHIP_CSRCS += s698pm_cpuindex.c s698pm_cpustart.c s698pm_cpupause.c s698pm_cpuidlestack.c +CHIP_CSRCS += s698pm_cpuindex.c s698pm_cpustart.c s698pm_smpcall.c s698pm_cpuidlestack.c endif diff --git a/arch/sparc/src/s698pm/s698pm-irq.c b/arch/sparc/src/s698pm/s698pm-irq.c index bc2fc173ecf62..de7f004859170 100644 --- a/arch/sparc/src/s698pm/s698pm-irq.c +++ b/arch/sparc/src/s698pm/s698pm-irq.c @@ -185,7 +185,7 @@ int s698pm_cpuint_initialize(void) #if defined CONFIG_SMP /* Attach IPI interrupts */ - irq_attach(S698PM_IPI_IRQ, s698pm_pause_handler, NULL); + irq_attach(S698PM_IPI_IRQ, s698pm_smp_call_handler, NULL); (void)s698pm_setup_irq(cpu, S698PM_IPI_IRQ, 0); diff --git a/arch/sparc/src/s698pm/s698pm.h b/arch/sparc/src/s698pm/s698pm.h index 6182b238551f1..0686e3fdc2398 100644 --- a/arch/sparc/src/s698pm/s698pm.h +++ b/arch/sparc/src/s698pm/s698pm.h @@ -416,21 +416,15 @@ void gpio_irqdisable(int irq); #endif /**************************************************************************** - * Name: s698pm_pause_handler + * Name: s698pm_smp_call_handler * * Description: - * Inter-CPU interrupt handler - * - * Input Parameters: - * Standard interrupt handler inputs - * - * Returned Value: - * Should always return OK + * This is the handler for SMP_CALL. * ****************************************************************************/ #ifdef CONFIG_SMP -int s698pm_pause_handler(int irq, void *c, void *arg); +int s698pm_smp_call_handler(int irq, void *c, void *arg); #endif #undef EXTERN diff --git a/arch/sparc/src/s698pm/s698pm_cpupause.c b/arch/sparc/src/s698pm/s698pm_cpupause.c deleted file mode 100644 index 225ef9ec24132..0000000000000 --- a/arch/sparc/src/s698pm/s698pm_cpupause.c +++ /dev/null @@ -1,440 +0,0 @@ -/**************************************************************************** - * arch/sparc/src/s698pm/s698pm_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "sched/sched.h" -#include "sparc_internal.h" -#include "chip.h" - -/**************************************************************************** - * Public Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - /* Save the current context at current_regs into the TCB at the head - * of the assigned task list for this CPU. - */ - - sparc_savestate(tcb->xcp.regs); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Wait for the spinlock to be released */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - /* Then switch contexts. Any necessary address environment changes - * will be made when the interrupt returns. - */ - - sparc_restorestate(tcb->xcp.regs); - - return OK; -} - -/**************************************************************************** - * Name: s698pm_pause_handler - * - * Description: - * Inter-CPU interrupt handler - * - * Input Parameters: - * Standard interrupt handler inputs - * - * Returned Value: - * Should always return OK - * - ****************************************************************************/ - -int s698pm_pause_handler(int irq, void *c, void *arg) -{ - struct tcb_s *tcb; - int cpu = this_cpu(); - - nxsched_smp_call_handler(irq, c, arg); - - /* Clear IPI (Inter-Processor-Interrupt) */ - - putreg32(1 << S698PM_IPI_VECTOR, S698PM_IRQREG_ICLEAR); - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - tcb = current_task(cpu); - sparc_savestate(tcb->xcp.regs); - nxsched_process_delivered(cpu); - tcb = current_task(cpu); - sparc_restorestate(tcb->xcp.regs); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - uintptr_t regaddr; - - /* Execute Pause IRQ to CPU(cpu) */ - - regaddr = (uintptr_t)S698PM_IRQREG_P0_FORCE + (4 * cpu); - putreg32(1 << S698PM_IPI_VECTOR, regaddr); - - return OK; -} - -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - int cpu; - - for (; cpuset != 0; cpuset &= ~(1 << cpu)) - { - cpu = ffs(cpuset) - 1; - up_cpu_pause_async(cpu); - } -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped/ - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - sinfo("cpu=%d\n", cpu); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2 - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - /* Execute Pause IRQ to CPU(cpu) */ - - up_cpu_pause_async(cpu); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - sinfo("cpu=%d\n", cpu); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} diff --git a/arch/sparc/src/s698pm/s698pm_smpcall.c b/arch/sparc/src/s698pm/s698pm_smpcall.c new file mode 100644 index 0000000000000..c08272d7224c7 --- /dev/null +++ b/arch/sparc/src/s698pm/s698pm_smpcall.c @@ -0,0 +1,130 @@ +/**************************************************************************** + * arch/sparc/src/s698pm/s698pm_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sched/sched.h" +#include "sparc_internal.h" +#include "chip.h" + +/**************************************************************************** + * Public Data + ****************************************************************************/ + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: s698pm_smp_call_handler + * + * Description: + * This is the handler for SMP_CALL. + * + ****************************************************************************/ + +int s698pm_smp_call_handler(int irq, void *c, void *arg) +{ + struct tcb_s *tcb; + int cpu = this_cpu(); + + /* Clear IPI (Inter-Processor-Interrupt) */ + + putreg32(1 << S698PM_IPI_VECTOR, S698PM_IRQREG_ICLEAR); + + tcb = current_task(cpu); + sparc_savestate(tcb->xcp.regs); + nxsched_smp_call_handler(irq, c, arg); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + sparc_restorestate(tcb->xcp.regs); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + uintptr_t regaddr; + + /* Execute Pause IRQ to CPU(cpu) */ + + regaddr = (uintptr_t)S698PM_IRQREG_P0_FORCE + (4 * cpu); + putreg32(1 << S698PM_IPI_VECTOR, regaddr); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + int cpu; + + for (; cpuset != 0; cpuset &= ~(1 << cpu)) + { + cpu = ffs(cpuset) - 1; + up_send_smp_sched(cpu); + } +} diff --git a/arch/sparc/src/sparc_v8/sparc_v8_schedulesigaction.c b/arch/sparc/src/sparc_v8/sparc_v8_schedulesigaction.c index 2f50eb397226f..e54871576aa3a 100644 --- a/arch/sparc/src/sparc_v8/sparc_v8_schedulesigaction.c +++ b/arch/sparc/src/sparc_v8/sparc_v8_schedulesigaction.c @@ -72,249 +72,140 @@ ****************************************************************************/ #ifndef CONFIG_SMP -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - irqstate_t flags; + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - /* Make sure that interrupts are disabled */ - - flags = enter_critical_section(); - - /* Refuse to handle nested signal actions */ - - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - * - * Hmmm... there looks like a latent bug here: The following - * logic would fail in the strange case where we are in an - * interrupt handler, the thread is signalling itself, but - * a context switch to another task has occurred so that - * current_regs does not refer to the thread of this_task()! - */ - - else - { - /* Save registers that must be protected while the signal - * handler runs. These will be restored by the signal - * trampoline after the signal(s) have been delivered. - */ - - tcb->xcp.saved_pc = up_current_regs()[REG_PC]; - tcb->xcp.saved_npc = up_current_regs()[REG_NPC]; - tcb->xcp.saved_status = up_current_regs()[REG_PSR]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_PC] = (uint32_t)sparc_sigdeliver; - up_current_regs()[REG_NPC] = (uint32_t)sparc_sigdeliver + 4; - up_current_regs()[REG_PSR] |= SPARC_PSR_ET_MASK; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* In this case just deliver the signal now. */ - sparc_savestate(tcb->xcp.regs); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following + * logic would fail in the strange case where we are in an + * interrupt handler, the thread is signalling itself, but + * a context switch to another task has occurred so that + * current_regs does not refer to the thread of this_task()! */ else { - /* Save registers that must be protected while the signal handler - * runs. These will be restored by the signal trampoline after - * the signals have been delivered. + /* Save registers that must be protected while the signal + * handler runs. These will be restored by the signal + * trampoline after the signal(s) have been delivered. */ - tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; - tcb->xcp.saved_npc = tcb->xcp.regs[REG_NPC]; - tcb->xcp.saved_status = tcb->xcp.regs[REG_PSR]; + tcb->xcp.saved_pc = up_current_regs()[REG_PC]; + tcb->xcp.saved_npc = up_current_regs()[REG_NPC]; + tcb->xcp.saved_status = up_current_regs()[REG_PSR]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_PC] = (uint32_t)sparc_sigdeliver; - tcb->xcp.regs[REG_NPC] = (uint32_t)sparc_sigdeliver + 4; - tcb->xcp.regs[REG_PSR] |= SPARC_PSR_ET_MASK; + up_current_regs()[REG_PC] = (uint32_t)sparc_sigdeliver; + up_current_regs()[REG_NPC] = (uint32_t)sparc_sigdeliver + 4; + up_current_regs()[REG_PSR] |= SPARC_PSR_ET_MASK; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + sparc_savestate(tcb->xcp.regs); } } - leave_critical_section(flags); + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save registers that must be protected while the signal handler + * runs. These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; + tcb->xcp.saved_npc = tcb->xcp.regs[REG_NPC]; + tcb->xcp.saved_status = tcb->xcp.regs[REG_PSR]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)sparc_sigdeliver; + tcb->xcp.regs[REG_NPC] = (uint32_t)sparc_sigdeliver + 4; + tcb->xcp.regs[REG_PSR] |= SPARC_PSR_ET_MASK; + } } #endif /* !CONFIG_SMP */ #ifdef CONFIG_SMP -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - irqstate_t flags; int cpu; int me; - sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver); - - /* Make sure that interrupts are disabled */ + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - flags = enter_critical_section(); + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ - /* Refuse to handle nested signal actions */ - - if (!tcb->xcp.sigdeliver) + if (tcb->task_state == TSTATE_TASK_RUNNING) { - tcb->xcp.sigdeliver = sigdeliver; + me = this_cpu(); + cpu = tcb->cpu; - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on any CPU. + /* CASE 1: We are not in an interrupt handler and a task is + * signaling itself for some reason. */ - sinfo("rtcb=0x%p current_regs=0x%p\n", this_task(), up_current_regs()); - - if (tcb->task_state == TSTATE_TASK_RUNNING) + if (cpu == me && !up_current_regs()) { - me = this_cpu(); - cpu = tcb->cpu; - - /* CASE 1: We are not in an interrupt handler and a task is - * signaling itself for some reason. + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! */ - if (cpu == me && !up_current_regs()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handler will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: The task that needs to receive the signal is running. - * This could happen if the task is running on another CPU OR if - * we are in an interrupt handler and the task is running on this - * CPU. In the former case, we will have to PAUSE the other CPU - * first. But in either case, we will have to modify the return - * state as well as the state in the TCB. - */ - - else - { - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - - /* Now tcb on the other CPU can be accessed safely */ - - /* Copy tcb->xcp.regs to tcp.xcp.saved. These will be - * restored by the signal trampoline after the signal has - * been delivered. - */ - - tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; - tcb->xcp.saved_npc = tcb->xcp.regs[REG_NPC]; - tcb->xcp.saved_status = tcb->xcp.regs[REG_PSR]; - - /* Then set up vector to the trampoline with interrupts - * disabled. We must already be in privileged thread mode - * to be here. - */ - - tcb->xcp.regs[REG_PC] = (uint32_t)sparc_sigdeliver; - tcb->xcp.regs[REG_NPC] = (uint32_t)sparc_sigdeliver + 4; - tcb->xcp.regs[REG_PSR] |= SPARC_PSR_ET_MASK; - } - else - { - /* tcb is running on the same CPU */ - - /* Save registers that must be protected while the signal - * handler runs. These will be restored by the signal - * trampoline after the signal(s) have been delivered. - */ - - tcb->xcp.saved_pc = up_current_regs()[REG_PC]; - tcb->xcp.saved_npc = up_current_regs()[REG_NPC]; - tcb->xcp.saved_status = up_current_regs()[REG_PSR]; - - /* Then set up vector to the trampoline with interrupts - * disabled. The kernel-space trampoline must run in - * privileged thread mode. - */ - - up_current_regs()[REG_PC] = (uint32_t)sparc_sigdeliver; - up_current_regs()[REG_NPC] = (uint32_t)sparc_sigdeliver - + 4; - up_current_regs()[REG_PSR] |= SPARC_PSR_ET_MASK; - - /* And make sure that the saved context in the TCB is the - * same as the interrupt return context. - */ - - sparc_savestate(tcb->xcp.regs); - } - - /* NOTE: If the task runs on another CPU(cpu), adjusting - * global IRQ controls will be done in the pause handler - * on the CPU(cpu) by taking a critical section. - * If the task is scheduled on this CPU(me), do nothing - * because this CPU already took a critical section - */ - - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running from an - * interrupt handler or (2) we are not in an interrupt handler and the - * running task is signaling some other non-running task. + /* CASE 2: The task that needs to receive the signal is running. + * This could happen if the task is running on another CPU OR if + * we are in an interrupt handler and the task is running on this + * CPU. In the former case, we will have to PAUSE the other CPU + * first. But in either case, we will have to modify the return + * state as well as the state in the TCB. */ else { + /* tcb is running on the same CPU */ + /* Save registers that must be protected while the signal * handler runs. These will be restored by the signal * trampoline after the signal(s) have been delivered. @@ -324,17 +215,48 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) tcb->xcp.saved_npc = up_current_regs()[REG_NPC]; tcb->xcp.saved_status = up_current_regs()[REG_PSR]; - /* Then set up to vector to the trampoline with interrupts - * disabled. We must already be in privileged thread mode to be - * here. + /* Then set up vector to the trampoline with interrupts + * disabled. The kernel-space trampoline must run in + * privileged thread mode. + */ + + up_current_regs()[REG_PC] = (uint32_t)sparc_sigdeliver; + up_current_regs()[REG_NPC] = (uint32_t)sparc_sigdeliver + + 4; + up_current_regs()[REG_PSR] |= SPARC_PSR_ET_MASK; + + /* And make sure that the saved context in the TCB is the + * same as the interrupt return context. */ - tcb->xcp.regs[REG_PC] = (uint32_t)sparc_sigdeliver; - tcb->xcp.regs[REG_NPC] = (uint32_t)sparc_sigdeliver + 4; - tcb->xcp.regs[REG_PSR] |= SPARC_PSR_ET_MASK; + sparc_savestate(tcb->xcp.regs); } } - leave_critical_section(flags); + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signaling some other non-running task. + */ + + else + { + /* Save registers that must be protected while the signal + * handler runs. These will be restored by the signal + * trampoline after the signal(s) have been delivered. + */ + + tcb->xcp.saved_pc = up_current_regs()[REG_PC]; + tcb->xcp.saved_npc = up_current_regs()[REG_NPC]; + tcb->xcp.saved_status = up_current_regs()[REG_PSR]; + + /* Then set up to vector to the trampoline with interrupts + * disabled. We must already be in privileged thread mode to be + * here. + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)sparc_sigdeliver; + tcb->xcp.regs[REG_NPC] = (uint32_t)sparc_sigdeliver + 4; + tcb->xcp.regs[REG_PSR] |= SPARC_PSR_ET_MASK; + } } #endif /* CONFIG_SMP */ diff --git a/arch/sparc/src/sparc_v8/sparc_v8_sigdeliver.c b/arch/sparc/src/sparc_v8/sparc_v8_sigdeliver.c index 06fa3c5b7770f..69caac640ecad 100644 --- a/arch/sparc/src/sparc_v8/sparc_v8_sigdeliver.c +++ b/arch/sparc/src/sparc_v8/sparc_v8_sigdeliver.c @@ -77,8 +77,8 @@ void sparc_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -114,7 +114,7 @@ void sparc_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -163,10 +163,10 @@ void sparc_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_PC] = rtcb->xcp.saved_pc; - regs[REG_NPC] = rtcb->xcp.saved_npc; - regs[REG_PSR] = rtcb->xcp.saved_status; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_PC] = rtcb->xcp.saved_pc; + regs[REG_NPC] = rtcb->xcp.saved_npc; + regs[REG_PSR] = rtcb->xcp.saved_status; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ #ifdef CONFIG_SMP /* Restore the saved 'irqcount' and recover the critical section diff --git a/arch/tricore/include/tc3xx/irq.h b/arch/tricore/include/tc3xx/irq.h index 7b0394fd4e628..39b35ba97ae81 100644 --- a/arch/tricore/include/tc3xx/irq.h +++ b/arch/tricore/include/tc3xx/irq.h @@ -116,12 +116,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of the context used during * signal processing. */ diff --git a/arch/tricore/src/common/tricore_schedulesigaction.c b/arch/tricore/src/common/tricore_schedulesigaction.c index 7b82a95746ccf..bc62436d4b0bb 100644 --- a/arch/tricore/src/common/tricore_schedulesigaction.c +++ b/arch/tricore/src/common/tricore_schedulesigaction.c @@ -76,88 +76,81 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - if (tcb == this_task()) + if (up_current_regs() == NULL) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (up_current_regs() == NULL) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the - * interrupted task is the same as the one that - * must receive the signal, then we will have to modify - * the return state as well as the state in the TCB. - * - * Hmmm... there looks like a latent bug here: The following - * logic would fail in the strange case where we are in an - * interrupt handler, the thread is signalling itself, but - * a context switch to another task has occurred so that - * g_current_regs does not refer to the thread of this_task()! - */ - - else - { - /* Save the context registers. These will be - * restored by the signal trampoline after the signals have - * been delivered. - */ + /* In this case just deliver the signal now. */ - tricore_savestate(tcb->xcp.saved_regs); - - /* Create a new CSA for signal delivery. The new context - * will borrow the process stack of the current tcb. - */ - - up_set_current_regs(tricore_alloc_csa((uintptr_t) - tricore_sigdeliver, - STACK_ALIGN_DOWN(up_getusrsp(tcb->xcp.regs)), - PSW_IO_SUPERVISOR | PSW_CDE, true)); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the + * interrupted task is the same as the one that + * must receive the signal, then we will have to modify + * the return state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following + * logic would fail in the strange case where we are in an + * interrupt handler, the thread is signalling itself, but + * a context switch to another task has occurred so that + * g_current_regs does not refer to the thread of this_task()! */ else { - /* Save the return EPC and STATUS registers. These will be + /* Save the context registers. These will be * restored by the signal trampoline after the signals have * been delivered. */ - /* Save the current register context location */ - - tcb->xcp.saved_regs = tcb->xcp.regs; + tricore_savestate(tcb->xcp.saved_regs); /* Create a new CSA for signal delivery. The new context * will borrow the process stack of the current tcb. */ - tcb->xcp.regs = tricore_alloc_csa((uintptr_t)tricore_sigdeliver, - STACK_ALIGN_DOWN(up_getusrsp(tcb->xcp.regs)), - PSW_IO_SUPERVISOR | PSW_CDE, true); + up_set_current_regs(tricore_alloc_csa((uintptr_t) + tricore_sigdeliver, + STACK_ALIGN_DOWN(up_getusrsp(tcb->xcp.regs)), + PSW_IO_SUPERVISOR | PSW_CDE, true)); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return EPC and STATUS registers. These will be + * restored by the signal trampoline after the signals have + * been delivered. + */ + + /* Save the current register context location */ + + tcb->xcp.saved_regs = tcb->xcp.regs; + + /* Create a new CSA for signal delivery. The new context + * will borrow the process stack of the current tcb. + */ + + tcb->xcp.regs = tricore_alloc_csa((uintptr_t)tricore_sigdeliver, + STACK_ALIGN_DOWN(up_getusrsp(tcb->xcp.regs)), + PSW_IO_SUPERVISOR | PSW_CDE, true); + } } diff --git a/arch/tricore/src/common/tricore_sigdeliver.c b/arch/tricore/src/common/tricore_sigdeliver.c index 9888597b9b7c1..e06ac8d046bc8 100644 --- a/arch/tricore/src/common/tricore_sigdeliver.c +++ b/arch/tricore/src/common/tricore_sigdeliver.c @@ -59,8 +59,8 @@ void tricore_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: @@ -74,7 +74,7 @@ void tricore_sigdeliver(void) /* Deliver the signal */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -106,7 +106,7 @@ void tricore_sigdeliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of * execution. diff --git a/arch/x86/include/i486/irq.h b/arch/x86/include/i486/irq.h index ce6c58bade18b..144618fa4640f 100644 --- a/arch/x86/include/i486/irq.h +++ b/arch/x86/include/i486/irq.h @@ -151,12 +151,6 @@ #ifndef __ASSEMBLY__ struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of instruction pointer and EFLAGS used during * signal processing. * diff --git a/arch/x86/src/i486/i486_schedulesigaction.c b/arch/x86/src/i486/i486_schedulesigaction.c index 30770529ecedd..219d3aa9bbd75 100644 --- a/arch/x86/src/i486/i486_schedulesigaction.c +++ b/arch/x86/src/i486/i486_schedulesigaction.c @@ -70,95 +70,87 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and a task is - * signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the interrupted task - * is the same as the one that must receive the signal, then we - * will have to modify the return state as well as the state in the - * TCB. - * - * Hmmm... there looks like a latent bug here: The following logic - * would fail in the strange case where we are in an interrupt - * handler, the thread is signalling itself, but a context switch - * to another task has occurred so that g_current_regs does not - * refer to the thread of this_task()! - */ - - else - { - /* Save the return lr and cpsr and one scratch register. These - * will be restored by the signal trampoline after the signals - * have been delivered. - */ + /* In this case just deliver the signal now. */ - tcb->xcp.saved_eip = up_current_regs()[REG_EIP]; - tcb->xcp.saved_eflags = up_current_regs()[REG_EFLAGS]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_EIP] = (uint32_t)x86_sigdeliver; - up_current_regs()[REG_EFLAGS] = 0; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ - - x86_savestate(tcb->xcp.regs); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the interrupted task + * is the same as the one that must receive the signal, then we + * will have to modify the return state as well as the state in the + * TCB. + * + * Hmmm... there looks like a latent bug here: The following logic + * would fail in the strange case where we are in an interrupt + * handler, the thread is signalling itself, but a context switch + * to another task has occurred so that g_current_regs does not + * refer to the thread of this_task()! */ else { - /* Save the return lr and cpsr and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. + /* Save the return lr and cpsr and one scratch register. These + * will be restored by the signal trampoline after the signals + * have been delivered. */ - tcb->xcp.saved_eip = tcb->xcp.regs[REG_EIP]; - tcb->xcp.saved_eflags = tcb->xcp.regs[REG_EFLAGS]; + tcb->xcp.saved_eip = up_current_regs()[REG_EIP]; + tcb->xcp.saved_eflags = up_current_regs()[REG_EFLAGS]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_EIP] = (uint32_t)x86_sigdeliver; - tcb->xcp.regs[REG_EFLAGS] = 0; + up_current_regs()[REG_EIP] = (uint32_t)x86_sigdeliver; + up_current_regs()[REG_EFLAGS] = 0; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + x86_savestate(tcb->xcp.regs); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_eip = tcb->xcp.regs[REG_EIP]; + tcb->xcp.saved_eflags = tcb->xcp.regs[REG_EFLAGS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_EIP] = (uint32_t)x86_sigdeliver; + tcb->xcp.regs[REG_EFLAGS] = 0; + } } diff --git a/arch/x86/src/i486/i486_sigdeliver.c b/arch/x86/src/i486/i486_sigdeliver.c index 9ddf0ace415b9..48ef001468091 100644 --- a/arch/x86/src/i486/i486_sigdeliver.c +++ b/arch/x86/src/i486/i486_sigdeliver.c @@ -59,8 +59,8 @@ void x86_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -76,7 +76,7 @@ void x86_sigdeliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -96,9 +96,9 @@ void x86_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_EIP] = rtcb->xcp.saved_eip; - regs[REG_EFLAGS] = rtcb->xcp.saved_eflags; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_EIP] = rtcb->xcp.saved_eip; + regs[REG_EFLAGS] = rtcb->xcp.saved_eflags; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/x86_64/include/intel64/irq.h b/arch/x86_64/include/intel64/irq.h index fbcdf54500039..ebb178e0b3f16 100644 --- a/arch/x86_64/include/intel64/irq.h +++ b/arch/x86_64/include/intel64/irq.h @@ -346,10 +346,18 @@ #define HPET0_IRQ IRQ2 #define HPET1_IRQ IRQ8 -/* Use IRQ15 IRQ16 for SMP */ +/* NuttX custom interrupts configuration starts from here. + * IRQ16-IRQ23 are reserved for GOLDFISH so we start from IRQ24. + */ + +/* Use IRQ24 IRQ25 for SMP */ + +#define SMP_IPI_CALL_IRQ IRQ24 +#define SMP_IPI_SCHED_IRQ IRQ25 + +/* Use IRQ32 and above for MSI */ -#define SMP_IPI_IRQ IRQ15 -#define SMP_IPI_ASYNC_IRQ IRQ16 +#define IRQ_MSI_START IRQ32 /* Common register save structure created by up_saveusercontext() and by * ISR/IRQ interrupt processing. @@ -487,12 +495,6 @@ enum ioapic_trigger_mode struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of instruction pointer and EFLAGS used during * signal processing. */ diff --git a/arch/x86_64/src/intel64/CMakeLists.txt b/arch/x86_64/src/intel64/CMakeLists.txt index f5b1a76977269..d92242b5e0dad 100644 --- a/arch/x86_64/src/intel64/CMakeLists.txt +++ b/arch/x86_64/src/intel64/CMakeLists.txt @@ -57,7 +57,7 @@ if(CONFIG_ARCH_HAVE_TESTSET) endif() if(CONFIG_SMP) - list(APPEND SRCS intel64_cpuidlestack.c intel64_cpupause.c intel64_cpustart.c) + list(APPEND SRCS intel64_cpuidlestack.c intel64_smpcall.c intel64_cpustart.c) endif() if(CONFIG_MULTBOOT2_FB_TERM) diff --git a/arch/x86_64/src/intel64/Make.defs b/arch/x86_64/src/intel64/Make.defs index e1c8579c977c9..1fc6f1de52acf 100644 --- a/arch/x86_64/src/intel64/Make.defs +++ b/arch/x86_64/src/intel64/Make.defs @@ -44,7 +44,7 @@ endif ifeq ($(CONFIG_SMP),y) CHIP_CSRCS += intel64_cpuidlestack.c -CHIP_CSRCS += intel64_cpupause.c +CHIP_CSRCS += intel64_smpcall.c CHIP_CSRCS += intel64_cpustart.c endif diff --git a/arch/x86_64/src/intel64/intel64_cpuidlestack.c b/arch/x86_64/src/intel64/intel64_cpuidlestack.c index 05d8771b43c28..e9aec20395628 100644 --- a/arch/x86_64/src/intel64/intel64_cpuidlestack.c +++ b/arch/x86_64/src/intel64/intel64_cpuidlestack.c @@ -88,7 +88,8 @@ int up_cpu_idlestack(int cpu, struct tcb_s *tcb, size_t stack_size) /* Get the top of the stack */ - stack_alloc = (uintptr_t)g_idle_topstack[cpu]; + stack_alloc = (uintptr_t)g_idle_topstack[cpu] - + CONFIG_IDLETHREAD_STACKSIZE; tcb->adj_stack_size = stack_size - 8; tcb->stack_alloc_ptr = (void *)stack_alloc; tcb->stack_base_ptr = tcb->stack_alloc_ptr; diff --git a/arch/x86_64/src/intel64/intel64_cpupause.c b/arch/x86_64/src/intel64/intel64_cpupause.c deleted file mode 100644 index 7f6891d24cabb..0000000000000 --- a/arch/x86_64/src/intel64/intel64_cpupause.c +++ /dev/null @@ -1,464 +0,0 @@ -/**************************************************************************** - * arch/x86_64/src/intel64/intel64_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include "sched/sched.h" - -#include "x86_64_internal.h" - -/**************************************************************************** - * Public Data - ****************************************************************************/ - -/* These spinlocks are used in the SMP configuration in order to implement - * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows - * - * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. - * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and - * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second - * blocks CPUm in the interrupt handler. - * - * When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler - * on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m] - * so that it will be ready for the next pause operation. - */ - -volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - /* Save the current context at current_regs into the TCB at the head - * of the assigned task list for this CPU. - */ - - x86_64_savestate(tcb->xcp.regs); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Release the g_cpu_paused spinlock to synchronize with the - * requesting CPU. - */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - /* Wait for the spinlock to be released. The requesting CPU will release - * the spinlock when the CPU is resumed. - */ - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - /* Then switch contexts. Any necessary address environment changes - * will be made when the interrupt returns. - */ - - x86_64_restorestate(tcb->xcp.regs); - - return OK; -} - -/**************************************************************************** - * Name: up_pause_handler - * - * Description: - * Inter-CPU interrupt handler - * - * Input Parameters: - * Standard interrupt handler inputs - * - * Returned Value: - * Should always return OK - * - ****************************************************************************/ - -int up_pause_handler(int irq, void *c, void *arg) -{ - int cpu = this_cpu(); - - nxsched_smp_call_handler(irq, c, arg); - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the SG2 - * interrupt by calling up_cpu_paused. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - return OK; -} - -/**************************************************************************** - * Name: up_pause_async_handler - * - * Description: - * This is the handler for async pause. - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It porcess g_delivertasks - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * Standard interrupt handling - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_pause_async_handler(int irq, void *c, void *arg) -{ - int cpu = this_cpu(); - - nxsched_process_delivered(cpu); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - cpu_set_t cpuset; - - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); - - up_trigger_irq(SMP_IPI_ASYNC_IRQ, cpuset); - - return OK; -} - -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - up_trigger_irq(SMP_IPI_IRQ, cpuset); -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped/ - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ - cpu_set_t cpuset; - sinfo("cpu=%d\n", cpu); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2 - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prefent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - /* Execute Pause IRQ to CPU(cpu) */ - - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); - - up_trigger_irq(SMP_IPI_IRQ, cpuset); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return 0; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ - sinfo("cpu=%d\n", cpu); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Release the spinlock. Releasing the spinlock will cause the SGI2 - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return 0; -} diff --git a/arch/x86_64/src/intel64/intel64_cpustart.c b/arch/x86_64/src/intel64/intel64_cpustart.c index 2fe404349a032..efd347982ba06 100644 --- a/arch/x86_64/src/intel64/intel64_cpustart.c +++ b/arch/x86_64/src/intel64/intel64_cpustart.c @@ -32,6 +32,7 @@ #include #include +#include "sched/sched.h" #include "init/init.h" #include "intel64_lowsetup.h" @@ -46,8 +47,8 @@ ****************************************************************************/ extern void __ap_entry(void); -extern int up_pause_handler(int irq, void *c, void *arg); -extern int up_pause_async_handler(int irq, void *c, void *arg); +extern int x86_64_smp_call_handler(int irq, void *c, void *arg); +extern int x86_64_smp_sched_handler(int irq, void *c, void *arg); /**************************************************************************** * Private Functions @@ -128,8 +129,11 @@ static int x86_64_ap_startup(int cpu) void x86_64_ap_boot(void) { + struct tcb_s *tcb = this_task(); uint8_t cpu = 0; + UNUSED(tcb); + /* Do some checking on CPU compatibilities at the top of this function */ x86_64_check_and_enable_capability(); @@ -153,17 +157,26 @@ void x86_64_ap_boot(void) #ifdef CONFIG_SCHED_INSTRUMENTATION /* Notify that this CPU has started */ - sched_note_cpu_started(this_task()); + sched_note_cpu_started(tcb); #endif sinfo("cpu=%d\n", cpu); /* Connect Pause IRQ to CPU */ - irq_attach(SMP_IPI_IRQ, up_pause_handler, NULL); - irq_attach(SMP_IPI_ASYNC_IRQ, up_pause_async_handler, NULL); - up_enable_irq(SMP_IPI_IRQ); - up_enable_irq(SMP_IPI_ASYNC_IRQ); + irq_attach(SMP_IPI_CALL_IRQ, x86_64_smp_call_handler, NULL); + irq_attach(SMP_IPI_SCHED_IRQ, x86_64_smp_sched_handler, NULL); + up_enable_irq(SMP_IPI_CALL_IRQ); + up_enable_irq(SMP_IPI_SCHED_IRQ); + +#ifdef CONFIG_STACK_COLORATION + /* If stack debug is enabled, then fill the stack with a + * recognizable value that we can use later to test for high + * water marks. + */ + + x86_64_stack_color(tcb->stack_alloc_ptr, 0); +#endif /* CPU ready */ diff --git a/arch/x86_64/src/intel64/intel64_head.S b/arch/x86_64/src/intel64/intel64_head.S index 5792067c2ce46..764b8c3ca9733 100644 --- a/arch/x86_64/src/intel64/intel64_head.S +++ b/arch/x86_64/src/intel64/intel64_head.S @@ -364,6 +364,10 @@ ap_start: add %rax, %rbx mov (%rbx), %rsp + /* Move initial RSP below IDLE TCB regs */ + sub $XCPTCONTEXT_SIZE, %rsp + and $(~XCPTCONTEXT_SIZE), %rsp + /* Jump to ap_start routine */ movabs $x86_64_ap_boot, %rbx jmp *%rbx diff --git a/arch/x86_64/src/intel64/intel64_irq.c b/arch/x86_64/src/intel64/intel64_irq.c index 374a240c68140..a71cd198d695c 100644 --- a/arch/x86_64/src/intel64/intel64_irq.c +++ b/arch/x86_64/src/intel64/intel64_irq.c @@ -48,7 +48,6 @@ * Pre-processor Definitions ****************************************************************************/ -#define IRQ_MSI_START IRQ32 #define X86_64_MAR_DEST 0xfee00000 #define X86_64_MDR_TYPE 0x4000 diff --git a/arch/x86_64/src/intel64/intel64_schedulesigaction.c b/arch/x86_64/src/intel64/intel64_schedulesigaction.c index 0763b2cab5bf8..d9d01d23efae2 100644 --- a/arch/x86_64/src/intel64/intel64_schedulesigaction.c +++ b/arch/x86_64/src/intel64/intel64_schedulesigaction.c @@ -71,245 +71,188 @@ ****************************************************************************/ #ifndef CONFIG_SMP -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); - sinfo("rtcb=%p g_current_regs=%p\n", this_task(), up_current_regs()); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. */ - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and a task is - * signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal with a function call - * now. - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the interrupted task - * is the same as the one that must receive the signal, then we - * will have to modify the return state as well as the state in the - * TCB. - * - * Hmmm... there looks like a latent bug here: The following logic - * would fail in the strange case where we are in an interrupt - * handler, the thread is signalling itself, but a context switch - * to another task has occurred so that current_regs does not - * refer to the thread of this_task()! + /* In this case just deliver the signal with a function call + * now. */ - else - { - /* Save the return lr and cpsr and one scratch register. These - * will be restored by the signal trampoline after the signals - * have been delivered. - */ - - tcb->xcp.saved_rip = up_current_regs()[REG_RIP]; - tcb->xcp.saved_rsp = up_current_regs()[REG_RSP]; - tcb->xcp.saved_rflags = up_current_regs()[REG_RFLAGS]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_RIP] = (uint64_t)x86_64_sigdeliver; - up_current_regs()[REG_RFLAGS] = 0; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ - - x86_64_savestate(tcb->xcp.regs); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the interrupted task + * is the same as the one that must receive the signal, then we + * will have to modify the return state as well as the state in the + * TCB. + * + * Hmmm... there looks like a latent bug here: The following logic + * would fail in the strange case where we are in an interrupt + * handler, the thread is signalling itself, but a context switch + * to another task has occurred so that current_regs does not + * refer to the thread of this_task()! */ else { - /* Save the return lr and cpsr and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. + /* Save the return lr and cpsr and one scratch register. These + * will be restored by the signal trampoline after the signals + * have been delivered. */ - tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP]; - tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP]; - tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS]; + tcb->xcp.saved_rip = up_current_regs()[REG_RIP]; + tcb->xcp.saved_rsp = up_current_regs()[REG_RSP]; + tcb->xcp.saved_rflags = up_current_regs()[REG_RFLAGS]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver; - tcb->xcp.regs[REG_RFLAGS] = 0; + up_current_regs()[REG_RIP] = (uint64_t)x86_64_sigdeliver; + up_current_regs()[REG_RSP] = up_current_regs()[REG_RSP] - 8; + up_current_regs()[REG_RFLAGS] = 0; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + x86_64_savestate(tcb->xcp.regs); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP]; + tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP]; + tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver; + tcb->xcp.regs[REG_RIP] = tcb->xcp.regs[REG_RIP] - 8; + tcb->xcp.regs[REG_RFLAGS] = 0; + } } #else /* !CONFIG_SMP */ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { int cpu; int me; - sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb->task_state == TSTATE_TASK_RUNNING) { - tcb->xcp.sigdeliver = sigdeliver; + me = this_cpu(); + cpu = tcb->cpu; - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on any CPU. + /* CASE 1: We are not in an interrupt handler and a task is + * signaling itself for some reason. */ - sinfo("rtcb=0x%p current_regs=0x%p\n", this_task(), - up_current_regs()); - - if (tcb->task_state == TSTATE_TASK_RUNNING) + if (cpu == me && !up_current_regs()) { - me = this_cpu(); - cpu = tcb->cpu; - - /* CASE 1: We are not in an interrupt handler and a task is - * signaling itself for some reason. - */ - - if (cpu == me && !up_current_regs()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handler will run in a critical section! - */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: The task that needs to receive the signal is running. - * This could happen if the task is running on another CPU OR if - * we are in an interrupt handler and the task is running on this - * CPU. In the former case, we will have to PAUSE the other CPU - * first. But in either case, we will have to modify the return - * state as well as the state in the TCB. + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! */ - else - { - /* If we signaling a task running on the other CPU, we have - * to PAUSE the other CPU. - */ - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - - /* Now tcb on the other CPU can be accessed safely */ - - /* Copy tcb->xcp.regs to tcp.xcp.saved. These will be - * restored by the signal trampoline after the signal has - * been delivered. - */ - - tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP]; - tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP]; - tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver; - tcb->xcp.regs[REG_RFLAGS] = 0; - } - else - { - /* tcb is running on the same CPU */ - - /* Save the return lr and cpsr and one scratch register. - * These will be restored by the signal trampoline after - * the signals have been delivered. - */ - - tcb->xcp.saved_rip = up_current_regs()[REG_RIP]; - tcb->xcp.saved_rsp = up_current_regs()[REG_RSP]; - tcb->xcp.saved_rflags = up_current_regs()[REG_RFLAGS]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - up_current_regs()[REG_RIP] = (uint64_t)x86_64_sigdeliver; - up_current_regs()[REG_RFLAGS] = 0; - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ - - x86_64_savestate(tcb->xcp.regs); - } - - /* NOTE: If the task runs on another CPU(cpu), adjusting - * global IRQ controls will be done in the pause handler - * on the CPU(cpu) by taking a critical section. - * If the task is scheduled on this CPU(me), do nothing - * because this CPU already took a critical section - */ - - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running from an - * interrupt handler or (2) we are not in an interrupt handler and the - * running task is signaling some other non-running task. + /* CASE 2: The task that needs to receive the signal is running. + * This could happen if the task is running on another CPU OR if + * we are in an interrupt handler and the task is running on this + * CPU. In the former case, we will have to PAUSE the other CPU + * first. But in either case, we will have to modify the return + * state as well as the state in the TCB. */ else { - /* Save the return lr and cpsr and one scratch register + /* tcb is running on the same CPU */ + + /* Save the return lr and cpsr and one scratch register. * These will be restored by the signal trampoline after * the signals have been delivered. */ - tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP]; - tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP]; - tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS]; + tcb->xcp.saved_rip = up_current_regs()[REG_RIP]; + tcb->xcp.saved_rsp = up_current_regs()[REG_RSP]; + tcb->xcp.saved_rflags = up_current_regs()[REG_RFLAGS]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver; - tcb->xcp.regs[REG_RFLAGS] = 0; + up_current_regs()[REG_RIP] = (uint64_t)x86_64_sigdeliver; + up_current_regs()[REG_RIP] = up_current_regs()[REG_RIP] - 8; + up_current_regs()[REG_RFLAGS] = 0; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + x86_64_savestate(tcb->xcp.regs); } } + + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signaling some other non-running task. + */ + + else + { + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP]; + tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP]; + tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver; + tcb->xcp.regs[REG_RSP] = tcb->xcp.regs[REG_RSP] - 8; + tcb->xcp.regs[REG_RFLAGS] = 0; + } } #endif /* CONFIG_SMP */ diff --git a/arch/x86_64/src/intel64/intel64_sigdeliver.c b/arch/x86_64/src/intel64/intel64_sigdeliver.c index a7f5763b424e7..5701291820e6a 100644 --- a/arch/x86_64/src/intel64/intel64_sigdeliver.c +++ b/arch/x86_64/src/intel64/intel64_sigdeliver.c @@ -69,8 +69,8 @@ void x86_64_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Align regs to 64 byte boundary for XSAVE */ @@ -113,7 +113,7 @@ void x86_64_sigdeliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -138,10 +138,10 @@ void x86_64_sigdeliver(void) * could be modified by a hostile program. */ - regs[REG_RIP] = rtcb->xcp.saved_rip; - regs[REG_RSP] = rtcb->xcp.saved_rsp; - regs[REG_RFLAGS] = rtcb->xcp.saved_rflags; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[REG_RIP] = rtcb->xcp.saved_rip; + regs[REG_RSP] = rtcb->xcp.saved_rsp; + regs[REG_RFLAGS] = rtcb->xcp.saved_rflags; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ #ifdef CONFIG_SMP /* Restore the saved 'irqcount' and recover the critical section diff --git a/arch/x86_64/src/intel64/intel64_smpcall.c b/arch/x86_64/src/intel64/intel64_smpcall.c new file mode 100644 index 0000000000000..24f23d8e2f338 --- /dev/null +++ b/arch/x86_64/src/intel64/intel64_smpcall.c @@ -0,0 +1,157 @@ +/**************************************************************************** + * arch/x86_64/src/intel64/intel64_smpcall.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "sched/sched.h" + +#include "x86_64_internal.h" + +/**************************************************************************** + * Public Data + ****************************************************************************/ + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: x86_64_smp_call_handler + * + * Description: + * This is the handler for SMP_CALL. + * + ****************************************************************************/ + +int x86_64_smp_call_handler(int irq, void *c, void *arg) +{ + struct tcb_s *tcb; + int cpu = this_cpu(); + + tcb = current_task(cpu); + x86_64_savestate(tcb->xcp.regs); + nxsched_smp_call_handler(irq, c, arg); + tcb = current_task(cpu); + x86_64_restorestate(tcb->xcp.regs); + + return OK; +} + +/**************************************************************************** + * Name: x86_64_smp_sched_handler + * + * Description: + * This is the handler for smp. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int x86_64_smp_sched_handler(int irq, void *c, void *arg) +{ + struct tcb_s *tcb; + int cpu = this_cpu(); + + tcb = current_task(cpu); + nxsched_suspend_scheduler(tcb); + x86_64_savestate(tcb->xcp.regs); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + nxsched_resume_scheduler(tcb); + x86_64_restorestate(tcb->xcp.regs); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_sched + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +int up_send_smp_sched(int cpu) +{ + cpu_set_t cpuset; + + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + + up_trigger_irq(SMP_IPI_SCHED_IRQ, cpuset); + + return OK; +} + +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ + +void up_send_smp_call(cpu_set_t cpuset) +{ + up_trigger_irq(SMP_IPI_CALL_IRQ, cpuset); +} diff --git a/arch/xtensa/include/irq.h b/arch/xtensa/include/irq.h index 21ce8f20f437b..8fc4ff1887b12 100644 --- a/arch/xtensa/include/irq.h +++ b/arch/xtensa/include/irq.h @@ -178,12 +178,6 @@ struct xcpt_syscall_s struct xcptcontext { - /* The following function pointer is non-zero if there are pending signals - * to be processed. - */ - - void *sigdeliver; /* Actual type is sig_deliver_t */ - /* These are saved copies of registers used during signal processing. * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/xtensa/src/common/Make.defs b/arch/xtensa/src/common/Make.defs index 46997a6e78b85..061575726be59 100644 --- a/arch/xtensa/src/common/Make.defs +++ b/arch/xtensa/src/common/Make.defs @@ -53,7 +53,7 @@ ifeq ($(CONFIG_SCHED_BACKTRACE),y) endif ifeq ($(CONFIG_SMP),y) - CMN_CSRCS += xtensa_cpupause.c + CMN_CSRCS += xtensa_smpcall.c endif ifeq ($(CONFIG_STACK_COLORATION),y) diff --git a/arch/xtensa/src/common/xtensa.h b/arch/xtensa/src/common/xtensa.h index bae9bd1f814ff..f0eb3a5ff3479 100644 --- a/arch/xtensa/src/common/xtensa.h +++ b/arch/xtensa/src/common/xtensa.h @@ -234,7 +234,7 @@ uint32_t *xtensa_user(int exccause, uint32_t *regs); #ifdef CONFIG_SMP int xtensa_intercpu_interrupt(int tocpu, int intcode); -void xtensa_pause_handler(void); +void xtensa_smp_call_handler(int irq, void *context, void *arg); #endif /* Signals */ diff --git a/arch/xtensa/src/common/xtensa_cpupause.c b/arch/xtensa/src/common/xtensa_cpupause.c deleted file mode 100644 index 1eeb13c5844db..0000000000000 --- a/arch/xtensa/src/common/xtensa_cpupause.c +++ /dev/null @@ -1,405 +0,0 @@ -/**************************************************************************** - * arch/xtensa/src/common/xtensa_cpupause.c - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include - -#include -#include - -#include -#include -#include -#include - -#include "xtensa.h" -#include "sched/sched.h" - -#ifdef CONFIG_SMP - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; -static spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -bool up_cpu_pausereq(int cpu) -{ - return spin_is_locked(&g_cpu_paused[cpu]); -} - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_save(void) -{ - struct tcb_s *tcb = this_task(); - - /* Update scheduler parameters */ - - nxsched_suspend_scheduler(tcb); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we are paused */ - - sched_note_cpu_paused(tcb); -#endif - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending pause - * operation in other contexts where the interrupt cannot be taken in - * order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused(int cpu) -{ - /* Wait for the spinlock to be released */ - - spin_unlock(&g_cpu_paused[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_lock(&g_cpu_wait[cpu]); - - spin_unlock(&g_cpu_wait[cpu]); - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -int up_cpu_paused_restore(void) -{ - struct tcb_s *tcb = this_task(); - -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify that we have resumed */ - - sched_note_cpu_resumed(tcb); -#endif - - /* Reset scheduler parameters */ - - nxsched_resume_scheduler(tcb); - - UNUSED(tcb); - - return OK; -} - -/**************************************************************************** - * Name: xtensa_pause_handler - * - * Description: - * This is the handler for CPU_INTCODE_PAUSE CPU interrupt. This - * implements up_cpu_pause() by performing the following operations: - * - * 1. The current task state at the head of the current assigned task - * list was saved when the interrupt was entered. - * 2. This function simply waits on a spinlock, then returns. - * 3. Upon return, the interrupt exit logic will restore the state of - * the new task at the head of the ready to run list. - * - * Input Parameters: - * None - * - * Returned Value: - * None - * - ****************************************************************************/ - -void xtensa_pause_handler(void) -{ - struct tcb_s *tcb; - int cpu = this_cpu(); - - /* Check for false alarms. Such false could occur as a consequence of - * some deadlock breaking logic that might have already serviced the - * interrupt by calling up_cpu_paused. - */ - - if (up_cpu_pausereq(cpu)) - { - /* NOTE: The following enter_critical_section() will call - * up_cpu_paused() to process a pause request to break a deadlock - * because the caller held a critical section. Once up_cpu_paused() - * finished, the caller will proceed and release the g_cpu_irqlock. - * Then this CPU will acquire g_cpu_irqlock in the function. - */ - - irqstate_t flags = enter_critical_section(); - - /* NOTE: the pause request should not exist here */ - - DEBUGVERIFY(!up_cpu_pausereq(cpu)); - - leave_critical_section(flags); - } - - nxsched_process_delivered(cpu); -} - -/**************************************************************************** - * Name: up_cpu_pause_async - * - * Description: - * pause task execution on the CPU - * check whether there are tasks delivered to specified cpu - * and try to run them. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; - * - ****************************************************************************/ - -inline_function int up_cpu_pause_async(int cpu) -{ - /* Execute the intercpu interrupt */ - - xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE); - - return OK; -} - -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu. - * - * Input Parameters: - * cpuset - The set of CPUs to receive the SGI. - * - * Returned Value: - * None. - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset) -{ - int cpu; - - for (; cpuset != 0; cpuset &= ~(1 << cpu)) - { - cpu = ffs(cpuset) - 1; - xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE); - } -} - -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be stopped. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_pause(int cpu) -{ -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the pause event */ - - sched_note_cpu_pause(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Take both spinlocks. The g_cpu_wait spinlock will prevent the interrupt - * handler from returning until up_cpu_resume() is called; g_cpu_paused - * is a handshake that will prevent this function from returning until - * the CPU is actually paused. - * Note that we might spin before getting g_cpu_wait, this just means that - * the other CPU still hasn't finished responding to the previous resume - * request. - */ - - DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu])); - - spin_lock(&g_cpu_wait[cpu]); - spin_lock(&g_cpu_paused[cpu]); - - up_cpu_pause_async(cpu); - - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - - spin_unlock(&g_cpu_paused[cpu]); - - /* On successful return g_cpu_wait will be locked, the other CPU will be - * spinning on g_cpu_wait and will not continue until g_cpu_resume() is - * called. g_cpu_paused will be unlocked in any case. - */ - - return OK; -} - -/**************************************************************************** - * Name: up_cpu_resume - * - * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order resume operation of - * the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being re-started. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - ****************************************************************************/ - -int up_cpu_resume(int cpu) -{ -#ifdef CONFIG_SCHED_INSTRUMENTATION - /* Notify of the resume event */ - - sched_note_cpu_resume(this_task(), cpu); -#endif - - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); - - /* Release the spinlock. Releasing the spinlock will cause the interrupt - * handler on 'cpu' to continue and return from interrupt to the newly - * established thread. - */ - - DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) && - !spin_is_locked(&g_cpu_paused[cpu])); - - spin_unlock(&g_cpu_wait[cpu]); - - /* Ensure the CPU has been resumed to avoid causing a deadlock */ - - spin_lock(&g_cpu_resumed[cpu]); - - spin_unlock(&g_cpu_resumed[cpu]); - - return OK; -} - -#endif /* CONFIG_SMP */ diff --git a/arch/xtensa/src/common/xtensa_schedsigaction.c b/arch/xtensa/src/common/xtensa_schedsigaction.c index 2d3a381583d3a..0a37b5bccada0 100644 --- a/arch/xtensa/src/common/xtensa_schedsigaction.c +++ b/arch/xtensa/src/common/xtensa_schedsigaction.c @@ -78,94 +78,67 @@ * ****************************************************************************/ -void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, this_task(), + this_task()->xcp.regs); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task() && !up_interrupt_context()) { - tcb->xcp.sigdeliver = sigdeliver; + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! + */ - /* First, handle some special cases when the signal is being delivered - * to task that is currently executing on any CPU. + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } + else + { + /* Save the context registers. These will be restored by the + * signal trampoline after the signals have been delivered. + * + * NOTE: that hi-priority interrupts are not disabled. */ - if (tcb == this_task() && !up_interrupt_context()) - { - /* In this case just deliver the signal now. - * REVISIT: Signal handler will run in a critical section! - */ + tcb->xcp.saved_regs = tcb->xcp.regs; - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - else + if ((tcb->xcp.saved_regs[REG_PS] & PS_EXCM_MASK) != 0) { -#ifdef CONFIG_SMP - int cpu = tcb->cpu; - int me = this_cpu(); - - if (cpu != me) - { - /* Pause the CPU */ - - up_cpu_pause(cpu); - } -#endif - - /* Save the context registers. These will be restored by the - * signal trampoline after the signals have been delivered. - * - * NOTE: that hi-priority interrupts are not disabled. - */ - - tcb->xcp.saved_regs = tcb->xcp.regs; - - if ((tcb->xcp.saved_regs[REG_PS] & PS_EXCM_MASK) != 0) - { - tcb->xcp.saved_regs[REG_PS] &= ~PS_EXCM_MASK; - } + tcb->xcp.saved_regs[REG_PS] &= ~PS_EXCM_MASK; + } - /* Duplicate the register context. These will be - * restored by the signal trampoline after the signal has been - * delivered. - */ + /* Duplicate the register context. These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ - tcb->xcp.regs = (void *) - ((uint32_t)tcb->xcp.regs - - XCPTCONTEXT_SIZE); - memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); + tcb->xcp.regs = (void *) + ((uint32_t)tcb->xcp.regs - + XCPTCONTEXT_SIZE); + memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE); - tcb->xcp.regs[REG_A1] = (uint32_t)tcb->xcp.regs + - XCPTCONTEXT_SIZE; + tcb->xcp.regs[REG_A1] = (uint32_t)tcb->xcp.regs + + XCPTCONTEXT_SIZE; - /* Then set up to vector to the trampoline with interrupts - * disabled - */ + /* Then set up to vector to the trampoline with interrupts + * disabled + */ - tcb->xcp.regs[REG_PC] = (uint32_t)xtensa_sig_deliver; + tcb->xcp.regs[REG_PC] = (uint32_t)xtensa_sig_deliver; #ifdef __XTENSA_CALL0_ABI__ - tcb->xcp.regs[REG_PS] = (uint32_t) - (PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM); + tcb->xcp.regs[REG_PS] = (uint32_t) + (PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM); #else - tcb->xcp.regs[REG_PS] = (uint32_t) - (PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | - PS_WOE | PS_CALLINC(1)); + tcb->xcp.regs[REG_PS] = (uint32_t) + (PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | + PS_WOE | PS_CALLINC(1)); #endif #ifndef CONFIG_BUILD_FLAT - xtensa_raiseprivilege(tcb->xcp.regs); + xtensa_raiseprivilege(tcb->xcp.regs); #endif - -#ifdef CONFIG_SMP - /* RESUME the other CPU if it was PAUSED */ - - if (cpu != me) - { - up_cpu_resume(cpu); - } -#endif - } } } diff --git a/arch/xtensa/src/common/xtensa_sigdeliver.c b/arch/xtensa/src/common/xtensa_sigdeliver.c index 97c4f3ded9330..3ea04d309918a 100644 --- a/arch/xtensa/src/common/xtensa_sigdeliver.c +++ b/arch/xtensa/src/common/xtensa_sigdeliver.c @@ -68,8 +68,8 @@ void xtensa_sig_deliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); retry: #ifdef CONFIG_SMP @@ -102,7 +102,7 @@ void xtensa_sig_deliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -146,7 +146,7 @@ void xtensa_sig_deliver(void) * could be modified by a hostile program. */ - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/sched/sched/sched_cpupause.c b/arch/xtensa/src/common/xtensa_smpcall.c similarity index 51% rename from sched/sched/sched_cpupause.c rename to arch/xtensa/src/common/xtensa_smpcall.c index 571c1a928b8a2..f92f442e2f135 100644 --- a/sched/sched/sched_cpupause.c +++ b/arch/xtensa/src/common/xtensa_smpcall.c @@ -1,7 +1,5 @@ /**************************************************************************** - * sched/sched/sched_cpupause.c - * - * SPDX-License-Identifier: Apache-2.0 + * arch/xtensa/src/common/xtensa_smpcall.c * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -26,81 +24,92 @@ #include -#include +#include #include -#include #include +#include +#include #include +#include "xtensa.h" #include "sched/sched.h" #ifdef CONFIG_SMP +/**************************************************************************** + * Private Data + ****************************************************************************/ + /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** - * Name: nxsched_pause_cpu + * Name: xtensa_smp_call_handler + * + * Description: + * This is the handler for smp. + * + ****************************************************************************/ + +void xtensa_smp_call_handler(int irq, void *context, void *arg) +{ + nxsched_smp_call_handler(irq, context, arg); + nxsched_process_delivered(this_cpu()); +} + +/**************************************************************************** + * Name: up_send_smp_sched * * Description: - * Check if task associated with 'tcb' is running on a different CPU. If - * so then pause that CPU and return its CPU index. + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. * * Input Parameters: - * tcb - The TCB of the task to be conditionally paused. + * cpu - The index of the CPU to be paused. * * Returned Value: - * If a CPU is pauses its non-negative CPU index is returned. This index - * may then be used to resume the CPU. If the task is not running at all - * (or if an error occurs), then a negated errno value is returned. -ESRCH - * is returned in the case where the task is not running on any CPU. + * Zero on success; a negated errno value on failure. * * Assumptions: - * This function was called in a critical section. In that case, no tasks - * may started or may exit until the we leave the critical section. This - * critical section should extend until up_cpu_resume() is called in the - * typical case. + * Called from within a critical section; * ****************************************************************************/ -int nxsched_pause_cpu(FAR struct tcb_s *tcb) +int up_send_smp_sched(int cpu) { - int cpu; - int ret; + /* Execute the intercpu interrupt */ - DEBUGASSERT(tcb != NULL); - - /* If the task is not running at all then our job is easy */ - - cpu = tcb->cpu; - if (tcb->task_state != TSTATE_TASK_RUNNING) - { - return -ESRCH; - } + xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE); - /* Check the CPU that the task is running on */ - - DEBUGASSERT(cpu != this_cpu() && (unsigned int)cpu < CONFIG_SMP_NCPUS); - if (cpu == this_cpu()) - { - /* We can't pause ourself */ + return OK; +} - return -EACCES; - } +/**************************************************************************** + * Name: up_send_smp_call + * + * Description: + * Send smp call to target cpu. + * + * Input Parameters: + * cpuset - The set of CPUs to receive the SGI. + * + * Returned Value: + * None. + * + ****************************************************************************/ - /* Pause the CPU that the task is running on */ +void up_send_smp_call(cpu_set_t cpuset) +{ + int cpu; - ret = up_cpu_pause(cpu); - if (ret < 0) + for (; cpuset != 0; cpuset &= ~(1 << cpu)) { - return ret; + cpu = ffs(cpuset) - 1; + up_send_smp_sched(cpu); } - - /* Return the CPU that the task is running on */ - - return cpu; } #endif /* CONFIG_SMP */ diff --git a/arch/xtensa/src/esp32/esp32_intercpu_interrupt.c b/arch/xtensa/src/esp32/esp32_intercpu_interrupt.c index d43a09ed3d810..96e189bc86386 100644 --- a/arch/xtensa/src/esp32/esp32_intercpu_interrupt.c +++ b/arch/xtensa/src/esp32/esp32_intercpu_interrupt.c @@ -49,7 +49,8 @@ * ****************************************************************************/ -static int IRAM_ATTR esp32_fromcpu_interrupt(int fromcpu) +static int IRAM_ATTR esp32_fromcpu_interrupt(int irq, void *context, + void *arg, int fromcpu) { uintptr_t regaddr; @@ -62,9 +63,9 @@ static int IRAM_ATTR esp32_fromcpu_interrupt(int fromcpu) DPORT_CPU_INTR_FROM_CPU_1_REG; putreg32(0, regaddr); - /* Call pause handler */ + /* Smp call handler */ - xtensa_pause_handler(); + xtensa_smp_call_handler(irq, context, arg); return OK; } @@ -83,14 +84,12 @@ static int IRAM_ATTR esp32_fromcpu_interrupt(int fromcpu) int IRAM_ATTR esp32_fromcpu0_interrupt(int irq, void *context, void *arg) { - nxsched_smp_call_handler(irq, context, arg); - return esp32_fromcpu_interrupt(0); + return esp32_fromcpu_interrupt(irq, context, arg, 0); } int IRAM_ATTR esp32_fromcpu1_interrupt(int irq, void *context, void *arg) { - nxsched_smp_call_handler(irq, context, arg); - return esp32_fromcpu_interrupt(1); + return esp32_fromcpu_interrupt(irq, context, arg, 1); } /**************************************************************************** diff --git a/arch/xtensa/src/esp32/esp32_spiram.c b/arch/xtensa/src/esp32/esp32_spiram.c index 091ccea4b09e9..8310dbf991786 100644 --- a/arch/xtensa/src/esp32/esp32_spiram.c +++ b/arch/xtensa/src/esp32/esp32_spiram.c @@ -74,6 +74,26 @@ static bool spiram_inited = false; +/**************************************************************************** + * Private Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: pause_cpu_handler + ****************************************************************************/ + +#ifdef CONFIG_SMP +static volatile bool g_cpu_wait = true; +static volatile bool g_cpu_pause = false; +static int pause_cpu_handler(FAR void *cookie) +{ + g_cpu_pause = true; + while (g_cpu_wait); + + return OK; +} +#endif + /**************************************************************************** * Public Functions ****************************************************************************/ @@ -183,7 +203,10 @@ unsigned int IRAM_ATTR cache_sram_mmu_set(int cpu_no, int pid, if (os_ready) { cpu_to_stop = this_cpu() == 1 ? 0 : 1; - up_cpu_pause(cpu_to_stop); + g_cpu_wait = true; + g_cpu_pause = false; + nxsched_smp_call_single(cpu_to_stop, pause_cpu_handler, NULL, false); + while (!g_cpu_pause); } spi_disable_cache(1); @@ -221,7 +244,7 @@ unsigned int IRAM_ATTR cache_sram_mmu_set(int cpu_no, int pid, if (os_ready) { - up_cpu_resume(cpu_to_stop); + g_cpu_wait = false; } #endif diff --git a/arch/xtensa/src/esp32s3/esp32s3_intercpu_interrupt.c b/arch/xtensa/src/esp32s3/esp32s3_intercpu_interrupt.c index f991d00310efe..52ed92e0dd94d 100644 --- a/arch/xtensa/src/esp32s3/esp32s3_intercpu_interrupt.c +++ b/arch/xtensa/src/esp32s3/esp32s3_intercpu_interrupt.c @@ -50,7 +50,8 @@ * ****************************************************************************/ -static int IRAM_ATTR esp32s3_fromcpu_interrupt(int fromcpu) +static int IRAM_ATTR esp32s3_fromcpu_interrupt(int irq, void *context, + void *arg, int fromcpu) { uintptr_t regaddr; @@ -63,9 +64,9 @@ static int IRAM_ATTR esp32s3_fromcpu_interrupt(int fromcpu) SYSTEM_CPU_INTR_FROM_CPU_1_REG; putreg32(0, regaddr); - /* Call pause handler */ + /* Smp call handler */ - xtensa_pause_handler(); + xtensa_smp_call_handler(irq, context, arg); return OK; } @@ -84,14 +85,12 @@ static int IRAM_ATTR esp32s3_fromcpu_interrupt(int fromcpu) int IRAM_ATTR esp32s3_fromcpu0_interrupt(int irq, void *context, void *arg) { - nxsched_smp_call_handler(irq, context, arg); - return esp32s3_fromcpu_interrupt(0); + return esp32s3_fromcpu_interrupt(irq, context, arg, 0); } int IRAM_ATTR esp32s3_fromcpu1_interrupt(int irq, void *context, void *arg) { - nxsched_smp_call_handler(irq, context, arg); - return esp32s3_fromcpu_interrupt(1); + return esp32s3_fromcpu_interrupt(irq, context, arg, 1); } /**************************************************************************** diff --git a/arch/xtensa/src/esp32s3/esp32s3_spiram.c b/arch/xtensa/src/esp32s3/esp32s3_spiram.c index 51b8ae72bb1eb..2b9d0f01fe983 100644 --- a/arch/xtensa/src/esp32s3/esp32s3_spiram.c +++ b/arch/xtensa/src/esp32s3/esp32s3_spiram.c @@ -263,6 +263,22 @@ static int IRAM_ATTR esp_mmu_map_region(uint32_t vaddr, uint32_t paddr, return ret; } +/**************************************************************************** + * Name: pause_cpu_handler + ****************************************************************************/ + +#ifdef CONFIG_SMP +static volatile bool g_cpu_wait = true; +static volatile bool g_cpu_pause = false; +static int pause_cpu_handler(FAR void *cookie) +{ + g_cpu_pause = true; + while (g_cpu_wait); + + return OK; +} +#endif + /**************************************************************************** * Public Functions ****************************************************************************/ @@ -309,7 +325,10 @@ int IRAM_ATTR cache_dbus_mmu_map(int vaddr, int paddr, int num) if (smp_start) { - up_cpu_pause(other_cpu); + g_cpu_wait = true; + g_cpu_pause = false; + nxsched_smp_call_single(other_cpu, pause_cpu_handler, NULL, false); + while (!g_cpu_pause); } cache_state[other_cpu] = cache_suspend_dcache(); @@ -336,7 +355,7 @@ int IRAM_ATTR cache_dbus_mmu_map(int vaddr, int paddr, int num) cache_resume_dcache(cache_state[other_cpu]); if (smp_start) { - up_cpu_resume(other_cpu); + g_cpu_wait = false; } #endif diff --git a/arch/z16/include/z16f/irq.h b/arch/z16/include/z16f/irq.h index 4a2be42ed43f3..0782cefe3979f 100644 --- a/arch/z16/include/z16f/irq.h +++ b/arch/z16/include/z16f/irq.h @@ -165,12 +165,6 @@ struct xcptcontext uint16_t regs[XCPTCONTEXT_REGS]; - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - CODE void *sigdeliver; /* Actual type is sig_deliver_t */ - /* The following retains that state during signal execution. * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/z16/src/common/z16_schedulesigaction.c b/arch/z16/src/common/z16_schedulesigaction.c index ca8f6bcb15b15..51cceea436159 100644 --- a/arch/z16/src/common/z16_schedulesigaction.c +++ b/arch/z16/src/common/z16_schedulesigaction.c @@ -74,93 +74,85 @@ * ****************************************************************************/ -void up_schedule_sigaction(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(FAR struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=0x%06x\n", tcb, (uint32_t)sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is + * being delivered to the currently executing task. + */ - if (!tcb->xcp.sigdeliver) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is - * being delivered to the currently executing task. + /* CASE 1: We are not in an interrupt handler and + * a task is signalling itself for some reason. */ - sinfo("rtcb=%p current_regs=%p\n", this_task(), up_current_regs()); - - if (tcb == this_task()) + if (!up_current_regs()) { - /* CASE 1: We are not in an interrupt handler and - * a task is signalling itself for some reason. - */ - - if (!up_current_regs()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the interrupted - * task is the same as the one that must receive the signal, then - * we will have to modify the return state as well as the state - * in the TCB. - */ - - else - { - FAR uint32_t *current_pc = - (FAR uint32_t *)&up_current_regs()[REG_PC]; - - /* Save the return address and interrupt state. These will be - * restored by the signal trampoline after the signals have - * been delivered. - */ - - tcb->xcp.saved_pc = *current_pc; - tcb->xcp.saved_i = up_current_regs()[REG_FLAGS]; - - /* Then set up to vector to the trampoline with interrupts - * disabled - */ - - *current_pc = (uint32_t)z16_sigdeliver; - up_current_regs()[REG_FLAGS] = 0; - - /* And make sure that the saved context in the TCB is the - * same as the interrupt return context. - */ + /* In this case just deliver the signal now. */ - z16_copystate(tcb->xcp.regs, up_current_regs()); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running from an - * interrupt handler or (2) we are not in an interrupt handler - * and the running task is signalling some non-running task. + /* CASE 2: We are in an interrupt handler AND the interrupted + * task is the same as the one that must receive the signal, then + * we will have to modify the return state as well as the state + * in the TCB. */ else { - FAR uint32_t *saved_pc = (FAR uint32_t *)&tcb->xcp.regs[REG_PC]; + FAR uint32_t *current_pc = + (FAR uint32_t *)&up_current_regs()[REG_PC]; - /* Save the return lr and cpsr and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. + /* Save the return address and interrupt state. These will be + * restored by the signal trampoline after the signals have + * been delivered. */ - tcb->xcp.saved_pc = *saved_pc; - tcb->xcp.saved_i = tcb->xcp.regs[REG_FLAGS]; + tcb->xcp.saved_pc = *current_pc; + tcb->xcp.saved_i = up_current_regs()[REG_FLAGS]; /* Then set up to vector to the trampoline with interrupts * disabled */ - *saved_pc = (uint32_t)z16_sigdeliver; - tcb->xcp.regs[REG_FLAGS] = 0; + *current_pc = (uint32_t)z16_sigdeliver; + up_current_regs()[REG_FLAGS] = 0; + + /* And make sure that the saved context in the TCB is the + * same as the interrupt return context. + */ + + z16_copystate(tcb->xcp.regs, up_current_regs()); } } + + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler + * and the running task is signalling some non-running task. + */ + + else + { + FAR uint32_t *saved_pc = (FAR uint32_t *)&tcb->xcp.regs[REG_PC]; + + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_pc = *saved_pc; + tcb->xcp.saved_i = tcb->xcp.regs[REG_FLAGS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + *saved_pc = (uint32_t)z16_sigdeliver; + tcb->xcp.regs[REG_FLAGS] = 0; + } } diff --git a/arch/z16/src/common/z16_sigdeliver.c b/arch/z16/src/common/z16_sigdeliver.c index 68e17b0afdb6a..f0d037c8d777f 100644 --- a/arch/z16/src/common/z16_sigdeliver.c +++ b/arch/z16/src/common/z16_sigdeliver.c @@ -60,8 +60,8 @@ void z16_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -77,7 +77,7 @@ void z16_sigdeliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -97,9 +97,9 @@ void z16_sigdeliver(void) * could be modified by a hostile program. */ - regs32[REG_PC / 2] = rtcb->xcp.saved_pc; - regs[REG_FLAGS] = rtcb->xcp.saved_i; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs32[REG_PC / 2] = rtcb->xcp.saved_pc; + regs[REG_FLAGS] = rtcb->xcp.saved_i; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/z80/include/ez80/irq.h b/arch/z80/include/ez80/irq.h index 351f6f94c1923..83f3bed2ff5f2 100644 --- a/arch/z80/include/ez80/irq.h +++ b/arch/z80/include/ez80/irq.h @@ -246,12 +246,6 @@ struct xcptcontext chipreg_t regs[XCPTCONTEXT_REGS]; - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - CODE void *sigdeliver; /* Actual type is sig_deliver_t */ - /* The following retains that state during signal execution * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/z80/include/z180/irq.h b/arch/z80/include/z180/irq.h index 3fb3b25cd032e..125422714a6d5 100644 --- a/arch/z80/include/z180/irq.h +++ b/arch/z80/include/z180/irq.h @@ -173,12 +173,6 @@ struct xcptcontext chipreg_t regs[XCPTCONTEXT_REGS]; - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - CODE void *sigdeliver; /* Actual type is sig_deliver_t */ - /* The following retains that state during signal execution * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/z80/include/z8/irq.h b/arch/z80/include/z8/irq.h index b50fe0f04cfb6..7faff99d001d7 100644 --- a/arch/z80/include/z8/irq.h +++ b/arch/z80/include/z8/irq.h @@ -304,12 +304,6 @@ struct xcptcontext chipreg_t regs[XCPTCONTEXT_REGS]; - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - CODE void *sigdeliver; /* Actual type is sig_deliver_t */ - /* The following retains that state during signal execution * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/z80/include/z80/irq.h b/arch/z80/include/z80/irq.h index 861fbb2822545..e0308d9707f1d 100644 --- a/arch/z80/include/z80/irq.h +++ b/arch/z80/include/z80/irq.h @@ -88,12 +88,6 @@ struct xcptcontext chipreg_t regs[XCPTCONTEXT_REGS]; - /* The following function pointer is non-zero if there - * are pending signals to be processed. - */ - - CODE void *sigdeliver; /* Actual type is sig_deliver_t */ - /* The following retains that state during signal execution. * * REVISIT: Because there is only one copy of these save areas, diff --git a/arch/z80/src/ez80/ez80_schedulesigaction.c b/arch/z80/src/ez80/ez80_schedulesigaction.c index d0d0be93b9bb5..e96611102e0a9 100644 --- a/arch/z80/src/ez80/ez80_schedulesigaction.c +++ b/arch/z80/src/ez80/ez80_schedulesigaction.c @@ -43,8 +43,7 @@ * Name: ez80_sigsetup ****************************************************************************/ -static void ez80_sigsetup(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver, - FAR chipreg_t *regs) +static void ez80_sigsetup(FAR struct tcb_s *tcb, FAR chipreg_t *regs) { /* Save the return address and interrupt state. These will be restored by * the signal trampoline after the signals have been delivered. @@ -99,66 +98,60 @@ static void ez80_sigsetup(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver, * ****************************************************************************/ -void up_schedule_sigaction(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(FAR struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=0x%06" PRIx32 "\n", tcb, (uint32_t)sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to the currently executing task. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. */ - if (tcb == this_task()) + if (!IN_INTERRUPT()) { - /* CASE 1: We are not in an interrupt handler and a task is - * signalling itself for some reason. - */ + /* In this case just deliver the signal now. */ - if (!IN_INTERRUPT()) - { - /* In this case just deliver the signal now. */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } + /* CASE 2: We are in an interrupt handler AND the interrupted task + * is the same as the one that must receive the signal, then we + * will have to modify the return state as well as the state in + * the TCB. + */ - /* CASE 2: We are in an interrupt handler AND the interrupted task - * is the same as the one that must receive the signal, then we - * will have to modify the return state as well as the state in - * the TCB. + else + { + /* Set up to vector to the trampoline with interrupts + * disabled. */ - else - { - /* Set up to vector to the trampoline with interrupts - * disabled. - */ - - ez80_sigsetup(tcb, sigdeliver, (chipreg_t *)IRQ_STATE()); + ez80_sigsetup(tcb, (chipreg_t *)IRQ_STATE()); - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ - SAVE_IRQCONTEXT(tcb); - } + SAVE_IRQCONTEXT(tcb); } + } - /* Otherwise, we are (1) signaling a task is not running from an - * interrupt handler or (2) we are not in an interrupt handler and the - * running task is signaling some non-running task. - */ + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signaling some non-running task. + */ - else - { - /* Set up to vector to the trampoline with interrupts disabled. */ + else + { + /* Set up to vector to the trampoline with interrupts disabled. */ - ez80_sigsetup(tcb, sigdeliver, tcb->xcp.regs); - } + ez80_sigsetup(tcb, tcb->xcp.regs); } } diff --git a/arch/z80/src/ez80/ez80_sigdeliver.c b/arch/z80/src/ez80/ez80_sigdeliver.c index cff88ad2e3a75..3047267075cf3 100644 --- a/arch/z80/src/ez80/ez80_sigdeliver.c +++ b/arch/z80/src/ez80/ez80_sigdeliver.c @@ -61,8 +61,8 @@ void z80_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -78,7 +78,7 @@ void z80_sigdeliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -98,9 +98,9 @@ void z80_sigdeliver(void) * could be modified by a hostile program. */ - regs[XCPT_PC] = rtcb->xcp.saved_pc; - regs[XCPT_I] = rtcb->xcp.saved_i; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[XCPT_PC] = rtcb->xcp.saved_pc; + regs[XCPT_I] = rtcb->xcp.saved_i; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Modify the saved return state with the actual saved values in the * TCB. This depends on the fact that nested signal handling is diff --git a/arch/z80/src/z180/z180_schedulesigaction.c b/arch/z80/src/z180/z180_schedulesigaction.c index 21f40053b5cd0..c577745c3f910 100644 --- a/arch/z80/src/z180/z180_schedulesigaction.c +++ b/arch/z80/src/z180/z180_schedulesigaction.c @@ -46,8 +46,7 @@ * Name: z180_sigsetup ****************************************************************************/ -static void z180_sigsetup(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver, - FAR chipreg_t *regs) +static void z180_sigsetup(FAR struct tcb_s *tcb, FAR chipreg_t *regs) { /* Save the return address and interrupt state. These will be restored by * the signal trampoline after the signals have been delivered. @@ -102,67 +101,61 @@ static void z180_sigsetup(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver, * ****************************************************************************/ -void up_schedule_sigaction(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(FAR struct tcb_s *tcb) { - _info("tcb=%p sigdeliver=0x%04x\n", tcb, (uint16_t)sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to the currently executing task. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. */ - if (tcb == this_task()) + if (!IN_INTERRUPT()) { - /* CASE 1: We are not in an interrupt handler and a task is - * signalling itself for some reason. - */ + /* In this case just deliver the signal now. */ - if (!IN_INTERRUPT()) - { - /* In this case just deliver the signal now. */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } + /* CASE 2: We are in an interrupt handler AND the interrupted task + * is the same as the one that must receive the signal, then we + * will have to modify the return state as well as the state in + * the TCB. + */ - /* CASE 2: We are in an interrupt handler AND the interrupted task - * is the same as the one that must receive the signal, then we - * will have to modify the return state as well as the state in - * the TCB. + else + { + /* Set up to vector to the trampoline with interrupts + * disabled. */ - else - { - /* Set up to vector to the trampoline with interrupts - * disabled. - */ - - z180_sigsetup(tcb, sigdeliver, IRQ_STATE()); + z180_sigsetup(tcb, IRQ_STATE()); - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ - SAVE_IRQCONTEXT(tcb); - } + SAVE_IRQCONTEXT(tcb); } + } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. - */ + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ - else - { - /* Set up to vector to the trampoline with interrupts disabled. */ + else + { + /* Set up to vector to the trampoline with interrupts disabled. */ - z180_sigsetup(tcb, sigdeliver, tcb->xcp.regs); - } + z180_sigsetup(tcb, tcb->xcp.regs); } } diff --git a/arch/z80/src/z180/z180_sigdeliver.c b/arch/z80/src/z180/z180_sigdeliver.c index 10979e651543a..7bebc5e4c60a6 100644 --- a/arch/z80/src/z180/z180_sigdeliver.c +++ b/arch/z80/src/z180/z180_sigdeliver.c @@ -58,8 +58,8 @@ void z80_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -75,7 +75,7 @@ void z80_sigdeliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -95,9 +95,9 @@ void z80_sigdeliver(void) * could be modified by a hostile program. */ - regs[XCPT_PC] = rtcb->xcp.saved_pc; - regs[XCPT_I] = rtcb->xcp.saved_i; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[XCPT_PC] = rtcb->xcp.saved_pc; + regs[XCPT_I] = rtcb->xcp.saved_i; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/z80/src/z8/z8_schedulesigaction.c b/arch/z80/src/z8/z8_schedulesigaction.c index b018bbe3aac89..cddf5c6af3880 100644 --- a/arch/z80/src/z8/z8_schedulesigaction.c +++ b/arch/z80/src/z8/z8_schedulesigaction.c @@ -43,8 +43,7 @@ * Name: z8_sigsetup ****************************************************************************/ -static void z8_sigsetup(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver, - FAR chipreg_t *regs) +static void z8_sigsetup(FAR struct tcb_s *tcb, FAR chipreg_t *regs) { /* Save the return address and interrupt state. These will be restored by * the signal trampoline after the signals have been delivered. @@ -99,60 +98,33 @@ static void z8_sigsetup(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver, * ****************************************************************************/ -void up_schedule_sigaction(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(FAR struct tcb_s *tcb) { - sinfo("tcb=%p sigdeliver=0x%04x\n", tcb, (uint16_t)sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to the currently executing task. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. */ - if (tcb == this_task()) + if (!IN_INTERRUPT()) { - /* CASE 1: We are not in an interrupt handler and a task is - * signalling itself for some reason. - */ - - if (!IN_INTERRUPT()) - { - /* In this case just deliver the signal now. */ - - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } - - /* CASE 2: We are in an interrupt handler AND the interrupted task - * is the same as the one that must receive the signal, then we - * will have to modify the return state as well as the state in - * the TCB. - */ - - else - { - /* Set up to vector to the trampoline with interrupts - * disabled. - */ - - z8_sigsetup(tcb, sigdeliver, IRQ_STATE()); - - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* In this case just deliver the signal now. */ - SAVE_IRQCONTEXT(tcb); - } + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. + /* CASE 2: We are in an interrupt handler AND the interrupted task + * is the same as the one that must receive the signal, then we + * will have to modify the return state as well as the state in + * the TCB. */ else @@ -161,7 +133,28 @@ void up_schedule_sigaction(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver) * disabled. */ - z8_sigsetup(tcb, sigdeliver, tcb->xcp.regs); + z8_sigsetup(tcb, IRQ_STATE()); + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + SAVE_IRQCONTEXT(tcb); } } + + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ + + else + { + /* Set up to vector to the trampoline with interrupts + * disabled. + */ + + z8_sigsetup(tcb, tcb->xcp.regs); + } } diff --git a/arch/z80/src/z8/z8_sigdeliver.c b/arch/z80/src/z8/z8_sigdeliver.c index 3c93c08536b44..2c13db2e0c09e 100644 --- a/arch/z80/src/z8/z8_sigdeliver.c +++ b/arch/z80/src/z8/z8_sigdeliver.c @@ -77,8 +77,8 @@ void z80_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -94,7 +94,7 @@ void z80_sigdeliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -114,9 +114,9 @@ void z80_sigdeliver(void) * could be modified by a hostile program. */ - regs[XCPT_PC] = rtcb->xcp.saved_pc; - regs[XCPT_IRQCTL] = rtcb->xcp.saved_irqctl; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[XCPT_PC] = rtcb->xcp.saved_pc; + regs[XCPT_IRQCTL] = rtcb->xcp.saved_irqctl; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/arch/z80/src/z80/z80_schedulesigaction.c b/arch/z80/src/z80/z80_schedulesigaction.c index 4bfba686db88b..f03690e784a65 100644 --- a/arch/z80/src/z80/z80_schedulesigaction.c +++ b/arch/z80/src/z80/z80_schedulesigaction.c @@ -44,8 +44,7 @@ * Name: z80_sigsetup ****************************************************************************/ -static void z80_sigsetup(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver, - FAR chipreg_t *regs) +static void z80_sigsetup(FAR struct tcb_s *tcb, FAR chipreg_t *regs) { /* Save the return address and interrupt state. These will be restored by * the signal trampoline after the signals have been delivered. @@ -100,67 +99,61 @@ static void z80_sigsetup(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver, * ****************************************************************************/ -void up_schedule_sigaction(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver) +void up_schedule_sigaction(FAR struct tcb_s *tcb) { - _info("tcb=%p sigdeliver=0x%04x\n", tcb, (uint16_t)sigdeliver); + sinfo("tcb=%p, rtcb=%p current_regs=%p\n", tcb, + this_task(), up_current_regs()); - /* Refuse to handle nested signal actions */ + /* First, handle some special cases when the signal is being delivered + * to the currently executing task. + */ - if (tcb->xcp.sigdeliver == NULL) + if (tcb == this_task()) { - tcb->xcp.sigdeliver = sigdeliver; - - /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. */ - if (tcb == this_task()) + if (!IN_INTERRUPT()) { - /* CASE 1: We are not in an interrupt handler and a task is - * signalling itself for some reason. - */ + /* In this case just deliver the signal now. */ - if (!IN_INTERRUPT()) - { - /* In this case just deliver the signal now. */ + (tcb->sigdeliver)(tcb); + tcb->sigdeliver = NULL; + } - sigdeliver(tcb); - tcb->xcp.sigdeliver = NULL; - } + /* CASE 2: We are in an interrupt handler AND the interrupted task + * is the same as the one that must receive the signal, then we + * will have to modify the return state as well as the state in + * the TCB. + */ - /* CASE 2: We are in an interrupt handler AND the interrupted task - * is the same as the one that must receive the signal, then we - * will have to modify the return state as well as the state in - * the TCB. + else + { + /* Set up to vector to the trampoline with interrupts + * disabled. */ - else - { - /* Set up to vector to the trampoline with interrupts - * disabled. - */ - - z80_sigsetup(tcb, sigdeliver, IRQ_STATE()); + z80_sigsetup(tcb, IRQ_STATE()); - /* And make sure that the saved context in the TCB - * is the same as the interrupt return context. - */ + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ - SAVE_IRQCONTEXT(tcb); - } + SAVE_IRQCONTEXT(tcb); } + } - /* Otherwise, we are (1) signaling a task is not running - * from an interrupt handler or (2) we are not in an - * interrupt handler and the running task is signalling - * some non-running task. - */ + /* Otherwise, we are (1) signaling a task is not running + * from an interrupt handler or (2) we are not in an + * interrupt handler and the running task is signalling + * some non-running task. + */ - else - { - /* Set up to vector to the trampoline with interrupts disabled. */ + else + { + /* Set up to vector to the trampoline with interrupts disabled. */ - z80_sigsetup(tcb, sigdeliver, tcb->xcp.regs); - } + z80_sigsetup(tcb, tcb->xcp.regs); } } diff --git a/arch/z80/src/z80/z80_sigdeliver.c b/arch/z80/src/z80/z80_sigdeliver.c index 3d793cadc7d78..ce2a6d9cbbadc 100644 --- a/arch/z80/src/z80/z80_sigdeliver.c +++ b/arch/z80/src/z80/z80_sigdeliver.c @@ -58,8 +58,8 @@ void z80_sigdeliver(void) board_autoled_on(LED_SIGNAL); sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); - DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + rtcb, rtcb->sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->sigdeliver != NULL); /* Save the return state on the stack. */ @@ -75,7 +75,7 @@ void z80_sigdeliver(void) /* Deliver the signals */ - ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); + (rtcb->sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -95,9 +95,9 @@ void z80_sigdeliver(void) * could be modified by a hostile program. */ - regs[XCPT_PC] = rtcb->xcp.saved_pc; - regs[XCPT_I] = rtcb->xcp.saved_i; - rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ + regs[XCPT_PC] = rtcb->xcp.saved_pc; + regs[XCPT_I] = rtcb->xcp.saved_i; + rtcb->sigdeliver = NULL; /* Allows next handler to be scheduled */ /* Then restore the correct state for this thread of execution. */ diff --git a/boards/boardctl.c b/boards/boardctl.c index 388e1e00a7844..9e652560d6c02 100644 --- a/boards/boardctl.c +++ b/boards/boardctl.c @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -394,6 +395,7 @@ int boardctl(unsigned int cmd, uintptr_t arg) case BOARDIOC_POWEROFF: { reboot_notifier_call_chain(SYS_POWER_OFF, (FAR void *)arg); + up_flush_dcache_all(); ret = board_power_off((int)arg); } break; @@ -410,6 +412,7 @@ int boardctl(unsigned int cmd, uintptr_t arg) case BOARDIOC_RESET: { reboot_notifier_call_chain(SYS_RESTART, (FAR void *)arg); + up_flush_dcache_all(); ret = board_reset((int)arg); } break; diff --git a/include/nuttx/arch.h b/include/nuttx/arch.h index 77b4e392269a1..c9b46b126588f 100644 --- a/include/nuttx/arch.h +++ b/include/nuttx/arch.h @@ -101,7 +101,6 @@ * Public Types ****************************************************************************/ -typedef CODE void (*sig_deliver_t)(FAR struct tcb_s *tcb); typedef CODE void (*phy_enable_t)(bool enable); typedef CODE void (*initializer_t)(void); typedef CODE void (*debug_callback_t)(int type, FAR void *addr, size_t size, @@ -542,7 +541,7 @@ int up_backtrace(FAR struct tcb_s *tcb, * ****************************************************************************/ -void up_schedule_sigaction(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver); +void up_schedule_sigaction(FAR struct tcb_s *tcb); /**************************************************************************** * Name: up_task_start @@ -1745,18 +1744,6 @@ void up_secure_irq(int irq, bool secure); # define up_secure_irq(i, s) #endif -#ifdef CONFIG_SMP -/**************************************************************************** - * Name: up_send_smp_call - * - * Description: - * Send smp call to target cpu - * - ****************************************************************************/ - -void up_send_smp_call(cpu_set_t cpuset); -#endif - /**************************************************************************** * Name: up_secure_irq_all * @@ -2289,36 +2276,9 @@ int up_cpu_idlestack(int cpu, FAR struct tcb_s *tcb, size_t stack_size); int up_cpu_start(int cpu); #endif -/**************************************************************************** - * Name: up_cpu_pause - * - * Description: - * Save the state of the current task at the head of the - * g_assignedtasks[cpu] task list and then pause task execution on the - * CPU. - * - * This function is called by the OS when the logic executing on one CPU - * needs to modify the state of the g_assignedtasks[cpu] list for another - * CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be paused. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; up_cpu_resume() must be called - * later while still within the same critical section. - * - ****************************************************************************/ - #ifdef CONFIG_SMP -int up_cpu_pause(int cpu); -#endif - /**************************************************************************** - * Name: up_cpu_pause_async + * Name: up_send_smp_sched * * Description: * pause task execution on the CPU @@ -2336,128 +2296,17 @@ int up_cpu_pause(int cpu); * ****************************************************************************/ -#ifdef CONFIG_SMP -int up_cpu_pause_async(int cpu); -#endif - -/**************************************************************************** - * Name: up_cpu_pausereq - * - * Description: - * Return true if a pause request is pending for this CPU. - * - * Input Parameters: - * cpu - The index of the CPU to be queried - * - * Returned Value: - * true = a pause request is pending. - * false = no pasue request is pending. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -bool up_cpu_pausereq(int cpu); -#endif - -/**************************************************************************** - * Name: up_cpu_paused_save - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending - * pause operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -int up_cpu_paused_save(void); -#endif +int up_send_smp_sched(int cpu); /**************************************************************************** - * Name: up_cpu_paused - * - * Description: - * Handle a pause request from another CPU. Normally, this logic is - * executed from interrupt handling logic within the architecture-specific - * However, it is sometimes necessary to perform the pending pause - * operation in other contexts where the interrupt cannot be taken - * in order to avoid deadlocks. - * - * This function performs the following operations: - * - * 1. It saves the current task state at the head of the current assigned - * task list. - * 2. It waits on a spinlock, then - * 3. Returns from interrupt, restoring the state of the new task at the - * head of the ready to run list. - * - * Input Parameters: - * cpu - The index of the CPU to be paused - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -int up_cpu_paused(int cpu); -#endif - -/**************************************************************************** - * Name: up_cpu_paused_restore - * - * Description: - * Restore the state of the CPU after it was paused via up_cpu_pause(), - * and resume normal tasking. - * - * Input Parameters: - * None - * - * Returned Value: - * On success, OK is returned. Otherwise, a negated errno value indicating - * the nature of the failure is returned. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -int up_cpu_paused_restore(void); -#endif - -/**************************************************************************** - * Name: up_cpu_resume + * Name: up_send_smp_call * * Description: - * Restart the cpu after it was paused via up_cpu_pause(), restoring the - * state of the task at the head of the g_assignedtasks[cpu] list, and - * resume normal tasking. - * - * This function is called after up_cpu_pause in order to resume operation - * of the CPU after modifying its g_assignedtasks[cpu] list. - * - * Input Parameters: - * cpu - The index of the CPU being resumed. - * - * Returned Value: - * Zero on success; a negated errno value on failure. - * - * Assumptions: - * Called from within a critical section; up_cpu_pause() must have - * previously been called within the same critical section. + * Send smp call to target cpu * ****************************************************************************/ -#ifdef CONFIG_SMP -int up_cpu_resume(int cpu); +void up_send_smp_call(cpu_set_t cpuset); #endif /**************************************************************************** diff --git a/include/nuttx/init.h b/include/nuttx/init.h index 63e5b266b41a1..4d0dc4023f36c 100644 --- a/include/nuttx/init.h +++ b/include/nuttx/init.h @@ -69,7 +69,8 @@ enum nx_initstate_e * initialization. */ OSINIT_OSREADY = 5, /* The OS is fully initialized and multi-tasking is * active. */ - OSINIT_IDLELOOP = 6 /* The OS enter idle loop */ + OSINIT_IDLELOOP = 6, /* The OS enter idle loop. */ + OSINIT_PANIC = 7 /* Fatal error happened. */ }; /**************************************************************************** diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h index 2177440c98604..6ee2dd8ed0170 100644 --- a/include/nuttx/irq.h +++ b/include/nuttx/irq.h @@ -78,9 +78,7 @@ do \ { \ g_cpu_irqset = 0; \ - SP_DMB(); \ - g_cpu_irqlock = SP_UNLOCKED; \ - SP_DSB(); \ + spin_unlock_wo_note(&g_cpu_irqlock); \ } \ while (0) #endif diff --git a/include/nuttx/sched.h b/include/nuttx/sched.h index e607802bb6aca..37b06cb6e4723 100644 --- a/include/nuttx/sched.h +++ b/include/nuttx/sched.h @@ -289,6 +289,7 @@ typedef enum tstate_e tstate_t; /* The following is the form of a thread start-up function */ typedef CODE void (*start_t)(void); +typedef CODE void (*sig_deliver_t)(FAR struct tcb_s *tcb); /* This is the entry point into the main thread of the task or into a created * pthread within the task. @@ -706,6 +707,11 @@ struct tcb_s struct xcptcontext xcp; /* Interrupt register save area */ + /* The following function pointer is non-zero if there are pending signals + * to be processed. + */ + + sig_deliver_t sigdeliver; #if CONFIG_TASK_NAME_SIZE > 0 char name[CONFIG_TASK_NAME_SIZE + 1]; /* Task name (with NUL terminator) */ #endif diff --git a/sched/Kconfig b/sched/Kconfig index 4fc761319411f..bf210765f332f 100644 --- a/sched/Kconfig +++ b/sched/Kconfig @@ -1989,3 +1989,11 @@ config SCHED_EVENTS This option enables event objects. Threads may wait on event objects for specific events, but both threads and ISRs may deliver events to event objects. + +config ASSERT_PAUSE_CPU_TIMEOUT + int "Timeout in milisecond to pause another CPU when assert" + default 2000 + depends on SMP + ---help--- + Timeout in milisecond to pause another CPU when assert. Only available + when SMP is enabled. diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 3397741a76f7a..a0432f7fe9202 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -75,96 +75,6 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS]; * Private Functions ****************************************************************************/ -/**************************************************************************** - * Name: irq_waitlock - * - * Description: - * Spin to get g_cpu_irqlock, handling a known deadlock condition: - * - * A deadlock may occur if enter_critical_section is called from an - * interrupt handler. Suppose: - * - * - CPUn is in a critical section and has the g_cpu_irqlock spinlock. - * - CPUm takes an interrupt and attempts to enter the critical section. - * - It spins waiting on g_cpu_irqlock with interrupts disabled. - * - CPUn calls up_cpu_pause() to pause operation on CPUm. This will - * issue an inter-CPU interrupt to CPUm - * - But interrupts are disabled on CPUm so the up_cpu_pause() is never - * handled, causing the deadlock. - * - * This same deadlock can occur in the normal tasking case: - * - * - A task on CPUn enters a critical section and has the g_cpu_irqlock - * spinlock. - * - Another task on CPUm attempts to enter the critical section but has - * to wait, spinning to get g_cpu_irqlock with interrupts disabled. - * - The task on CPUn causes a new task to become ready-to-run and the - * scheduler selects CPUm. CPUm is requested to pause via a pause - * interrupt. - * - But the task on CPUm is also attempting to enter the critical - * section. Since it is spinning with interrupts disabled, CPUm cannot - * process the pending pause interrupt, causing the deadlock. - * - * This function detects this deadlock condition while spinning with - * interrupts disabled. - * - * Input Parameters: - * cpu - The index of CPU that is trying to enter the critical section. - * - * Returned Value: - * True: The g_cpu_irqlock spinlock has been taken. - * False: The g_cpu_irqlock spinlock has not been taken yet, but there is - * a pending pause interrupt request. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -static inline_function bool irq_waitlock(int cpu) -{ -#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS - FAR struct tcb_s *tcb = current_task(cpu); - - /* Notify that we are waiting for a spinlock */ - - sched_note_spinlock(tcb, &g_cpu_irqlock, NOTE_SPINLOCK_LOCK); -#endif - - /* Duplicate the spin_lock() logic from spinlock.c, but adding the check - * for the deadlock condition. - */ - - while (!spin_trylock_wo_note(&g_cpu_irqlock)) - { - /* Is a pause request pending? */ - - if (up_cpu_pausereq(cpu)) - { - /* Yes.. some other CPU is requesting to pause this CPU! - * Abort the wait and return false. - */ - -#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS - /* Notify that we have aborted the wait for the spinlock */ - - sched_note_spinlock(tcb, &g_cpu_irqlock, NOTE_SPINLOCK_ABORT); -#endif - - return false; - } - } - - /* We have g_cpu_irqlock! */ - -#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS - /* Notify that we have the spinlock */ - - sched_note_spinlock(tcb, &g_cpu_irqlock, NOTE_SPINLOCK_LOCKED); -#endif - - return true; -} -#endif - /**************************************************************************** * Public Functions ****************************************************************************/ @@ -198,7 +108,6 @@ irqstate_t enter_critical_section(void) * the local CPU. */ -try_again: ret = up_irq_save(); /* If called from an interrupt handler, then just take the spinlock. @@ -259,8 +168,6 @@ irqstate_t enter_critical_section(void) else { - int paused = false; - /* Make sure that the g_cpu_irqset was not already set * by previous logic on this CPU that was executed by the * interrupt handler. We know that the bit in g_cpu_irqset @@ -274,43 +181,14 @@ irqstate_t enter_critical_section(void) * no longer blocked by the critical section). */ -try_again_in_irq: - if (!irq_waitlock(cpu)) - { - /* We are in a deadlock condition due to a pending - * pause request interrupt. Break the deadlock by - * handling the pause request now. - */ - - if (!paused) - { - up_cpu_paused_save(); - } - - DEBUGVERIFY(up_cpu_paused(cpu)); - paused = true; - - DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0); - - /* NOTE: Here, this CPU does not hold g_cpu_irqlock, - * so call irq_waitlock(cpu) to acquire g_cpu_irqlock. - */ - - goto try_again_in_irq; - } - - cpu_irqlock_set(cpu); + spin_lock(&g_cpu_irqlock); + cpu_irqlock_set(cpu); } /* In any event, the nesting count is now one */ g_cpu_nestcount[cpu] = 1; - if (paused) - { - up_cpu_paused_restore(); - } - DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) && (g_cpu_irqset & (1 << cpu)) != 0); } @@ -354,18 +232,7 @@ irqstate_t enter_critical_section(void) DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0); - if (!irq_waitlock(cpu)) - { - /* We are in a deadlock condition due to a pending pause - * request interrupt. Re-enable interrupts on this CPU - * and try again. Briefly re-enabling interrupts should - * be sufficient to permit processing the pending pause - * request. - */ - - up_irq_restore(ret); - goto try_again; - } + spin_lock(&g_cpu_irqlock); /* Then set the lock count to 1. * diff --git a/sched/misc/assert.c b/sched/misc/assert.c index fb9c1a91077c5..53b16d297ef1e 100644 --- a/sched/misc/assert.c +++ b/sched/misc/assert.c @@ -28,12 +28,16 @@ #include #include +#include #include -#include +#include +#include #include #include +#include #include #include +#include #ifdef CONFIG_ARCH_LEDS # include #endif @@ -41,11 +45,13 @@ #include #include #include +#include #include #include #include #include +#include #include #include "irq/irq.h" @@ -93,8 +99,13 @@ * Private Data ****************************************************************************/ -static uintptr_t -g_last_regs[XCPTCONTEXT_REGS] aligned_data(XCPTCONTEXT_ALIGN); +#ifdef CONFIG_SMP +static bool g_cpu_paused[CONFIG_SMP_NCPUS]; +#endif + +static uintptr_t g_last_regs[CONFIG_SMP_NCPUS][XCPTCONTEXT_REGS] + aligned_data(XCPTCONTEXT_ALIGN); + static FAR const char * const g_policy[4] = { "FIFO", "RR", "SPORADIC" @@ -108,8 +119,6 @@ static FAR const char * const g_ttypenames[4] = "Invalid" }; -static bool g_fatal_assert = false; - /**************************************************************************** * Private Functions ****************************************************************************/ @@ -212,8 +221,14 @@ static void dump_stack(FAR const char *tag, uintptr_t sp, static void dump_stacks(FAR struct tcb_s *rtcb, uintptr_t sp) { +#ifdef CONFIG_SMP + int cpu = rtcb->cpu; +#else + int cpu = this_cpu(); + UNUSED(cpu); +#endif #if CONFIG_ARCH_INTERRUPTSTACK > 0 - uintptr_t intstack_base = up_get_intstackbase(this_cpu()); + uintptr_t intstack_base = up_get_intstackbase(cpu); size_t intstack_size = CONFIG_ARCH_INTERRUPTSTACK; uintptr_t intstack_top = intstack_base + intstack_size; uintptr_t intstack_sp = 0; @@ -262,13 +277,16 @@ static void dump_stacks(FAR struct tcb_s *rtcb, uintptr_t sp) intstack_base, intstack_size, #ifdef CONFIG_STACK_COLORATION - up_check_intstack(this_cpu()) + up_check_intstack(cpu) #else 0 #endif ); - tcbstack_sp = up_getusrsp((FAR void *)up_current_regs()); + /* Try to restore SP from current_regs if assert from interrupt. */ + + tcbstack_sp = up_interrupt_context() ? + up_getusrsp((FAR void *)up_current_regs()) : 0; if (tcbstack_sp < tcbstack_base || tcbstack_sp >= tcbstack_top) { tcbstack_sp = 0; @@ -363,7 +381,7 @@ static void dump_task(FAR struct tcb_s *tcb, FAR void *arg) #ifdef CONFIG_SMP " %4d" #endif - " %3d %-8s %-7s %c" + " %3d %-8s %-7s %-3c" " %-18s" " " SIGSET_FMT " %p" @@ -542,110 +560,91 @@ static void dump_deadlock(void) } #endif +#ifdef CONFIG_SMP + +/**************************************************************************** + * Name: pause_cpu_handler + ****************************************************************************/ + +static noreturn_function int pause_cpu_handler(FAR void *arg) +{ + memcpy(g_last_regs[this_cpu()], up_current_regs(), sizeof(g_last_regs[0])); + g_cpu_paused[this_cpu()] = true; + up_flush_dcache_all(); + while (1); +} + /**************************************************************************** * Name: pause_all_cpu ****************************************************************************/ -#ifdef CONFIG_SMP static void pause_all_cpu(void) { - int cpu; + cpu_set_t cpus = (1 << CONFIG_SMP_NCPUS) - 1; + int delay = CONFIG_ASSERT_PAUSE_CPU_TIMEOUT; - for (cpu = 0; cpu < CONFIG_SMP_NCPUS; cpu++) + CPU_CLR(this_cpu(), &cpus); + nxsched_smp_call(cpus, pause_cpu_handler, NULL, false); + g_cpu_paused[this_cpu()] = true; + + /* Check if all CPUs paused with timeout */ + + cpus = 0; + while (delay-- > 0 && cpus < CONFIG_SMP_NCPUS) { - if (cpu != this_cpu()) + if (g_cpu_paused[cpus]) + { + cpus++; + } + else { - up_cpu_pause(cpu); + up_mdelay(1); } } } #endif -/**************************************************************************** - * Public Functions - ****************************************************************************/ +static void dump_running_task(FAR struct tcb_s *rtcb, FAR void *regs) +{ + /* Register dump */ + + up_dump_register(regs); + +#ifdef CONFIG_ARCH_STACKDUMP + dump_stacks(rtcb, up_getusrsp(regs)); +#endif + + /* Show back trace */ + +#ifdef CONFIG_SCHED_BACKTRACE + sched_dumpstack(rtcb->pid); +#endif +} /**************************************************************************** - * Name: _assert + * Name: dump_assert_info + * + * Description: + * Dump basic information of assertion ****************************************************************************/ -void _assert(FAR const char *filename, int linenum, - FAR const char *msg, FAR void *regs) +static void dump_assert_info(FAR struct tcb_s *rtcb, + FAR const char *filename, int linenum, + FAR const char *msg, FAR void *regs) { - const bool os_ready = OSINIT_OS_READY(); - FAR struct tcb_s *rtcb = running_task(); #if CONFIG_TASK_NAME_SIZE > 0 FAR struct tcb_s *ptcb = NULL; #endif - struct panic_notifier_s notifier_data; struct utsname name; - irqstate_t flags; - bool fatal = true; #if CONFIG_TASK_NAME_SIZE > 0 - if (rtcb->group && !(rtcb->flags & TCB_FLAG_TTYPE_KERNEL)) - { - ptcb = nxsched_get_tcb(rtcb->group->tg_pid); - } -#endif - - flags = 0; /* suppress GCC warning */ - if (os_ready) - { - flags = enter_critical_section(); - } - - if (g_fatal_assert) - { - goto reset; - } - - if (os_ready && fatal) - { -#ifdef CONFIG_SMP - pause_all_cpu(); -#endif - } - - /* try to save current context if regs is null */ - - if (regs == NULL) - { - up_saveusercontext(g_last_regs); - regs = g_last_regs; - } - else - { - memcpy(g_last_regs, regs, sizeof(g_last_regs)); - } - -#if CONFIG_BOARD_RESET_ON_ASSERT < 2 - if (!up_interrupt_context() && + if (rtcb->group && (rtcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_KERNEL) { - fatal = false; - } - else -#endif - { - g_fatal_assert = true; + ptcb = nxsched_get_tcb(rtcb->group->tg_pid); } - - notifier_data.rtcb = rtcb; - notifier_data.regs = regs; - notifier_data.filename = filename; - notifier_data.linenum = linenum; - notifier_data.msg = msg; - panic_notifier_call_chain(fatal ? PANIC_KERNEL : PANIC_TASK, - ¬ifier_data); -#ifdef CONFIG_ARCH_LEDS - board_autoled_on(LED_ASSERTION); #endif - /* Flush any buffered SYSLOG data (from prior to the assertion) */ - - syslog_flush(); - uname(&name); _alert("Current Version: %s %s %s %s %s\n", name.sysname, name.nodename, @@ -672,42 +671,63 @@ void _assert(FAR const char *filename, int linenum, #endif rtcb->entry.main); - /* Register dump */ + /* Dump current CPU registers, running task stack and backtrace. */ - up_dump_register(regs); + dump_running_task(rtcb, regs); -#ifdef CONFIG_ARCH_STACKDUMP - dump_stacks(rtcb, up_getusrsp(regs)); -#endif + /* Flush any buffered SYSLOG data */ - /* Show back trace */ + syslog_flush(); +} -#ifdef CONFIG_SCHED_BACKTRACE - sched_dumpstack(rtcb->pid); -#endif +/**************************************************************************** + * Name: dump_fatal_info + ****************************************************************************/ - /* Flush any buffered SYSLOG data */ +static void dump_fatal_info(FAR struct tcb_s *rtcb, + FAR const char *filename, int linenum, + FAR const char *msg, FAR void *regs) +{ +#ifdef CONFIG_SMP + int cpu; - syslog_flush(); + /* Dump other CPUs registers, running task stack and backtrace. */ - if (fatal) + for (cpu = 0; cpu < CONFIG_SMP_NCPUS; cpu++) { - dump_tasks(); + if (cpu == this_cpu()) + { + continue; + } + + _alert("Dump CPU%d: %s\n", cpu, + g_cpu_paused[cpu] ? "PAUSED" : "RUNNING"); + + if (g_cpu_paused[cpu]) + { + dump_running_task(g_running_tasks[cpu], g_last_regs[cpu]); + } + } +#endif + + /* Dump backtrace of other tasks. */ + + dump_tasks(); #ifdef CONFIG_ARCH_DEADLOCKDUMP - /* Deadlock Dump */ + /* Deadlock Dump */ - dump_deadlock(); + dump_deadlock(); #endif #ifdef CONFIG_ARCH_USBDUMP - /* Dump USB trace data */ + /* Dump USB trace data */ - usbtrace_enumerate(assert_tracecallback, NULL); + usbtrace_enumerate(assert_tracecallback, NULL); #endif #ifdef CONFIG_BOARD_CRASHDUMP - board_crashdump(up_getsp(), rtcb, filename, linenum, msg, regs); + board_crashdump(up_getsp(), rtcb, filename, linenum, msg, regs); #endif #if defined(CONFIG_BOARD_COREDUMP_SYSLOG) || \ @@ -715,37 +735,139 @@ void _assert(FAR const char *filename, int linenum, /* Dump core information */ # ifdef CONFIG_BOARD_COREDUMP_FULL - coredump_dump(INVALID_PROCESS_ID); + coredump_dump(INVALID_PROCESS_ID); # else - coredump_dump(rtcb->pid); + coredump_dump(rtcb->pid); # endif #endif - /* Flush any buffered SYSLOG data */ + /* Flush any buffered SYSLOG data */ - syslog_flush(); - panic_notifier_call_chain(PANIC_KERNEL_FINAL, ¬ifier_data); + syslog_flush(); +} - reboot_notifier_call_chain(SYS_HALT, NULL); +/**************************************************************************** + * Name: reset_board + * + * Description: + * Reset board or stuck here to flash LED. It should never return. + ****************************************************************************/ -reset: +static void reset_board(void) +{ #if CONFIG_BOARD_RESET_ON_ASSERT >= 1 - board_reset(CONFIG_BOARD_ASSERT_RESET_VALUE); + up_flush_dcache_all(); + board_reset(CONFIG_BOARD_ASSERT_RESET_VALUE); #else - for (; ; ) - { + for (; ; ) + { #ifdef CONFIG_ARCH_LEDS - /* FLASH LEDs a 2Hz */ + /* FLASH LEDs a 2Hz */ - board_autoled_on(LED_PANIC); - up_mdelay(250); - board_autoled_off(LED_PANIC); + board_autoled_on(LED_PANIC); + up_mdelay(250); + board_autoled_off(LED_PANIC); #endif - up_mdelay(250); + up_mdelay(250); + } +#endif +} + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: _assert + ****************************************************************************/ + +void _assert(FAR const char *filename, int linenum, + FAR const char *msg, FAR void *regs) +{ + const bool os_ready = OSINIT_OS_READY(); + FAR struct tcb_s *rtcb = running_task(); + struct panic_notifier_s notifier_data; + irqstate_t flags; + + if (g_nx_initstate == OSINIT_PANIC) + { + /* Already in fatal state, reset board directly. */ + + reset_board(); /* Should not return. */ + } + + flags = 0; /* suppress GCC warning */ + if (os_ready) + { + flags = enter_critical_section(); + } + +#if CONFIG_BOARD_RESET_ON_ASSERT < 2 + if (up_interrupt_context() || + (rtcb->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_KERNEL) +#endif + { + /* Fatal error, enter panic state. */ + + g_nx_initstate = OSINIT_PANIC; + + /* Disable KASAN to avoid false positive */ + + kasan_stop(); + +#ifdef CONFIG_SMP + if (os_ready) + { + pause_all_cpu(); } #endif } + /* try to save current context if regs is null */ + + if (regs == NULL) + { + up_saveusercontext(g_last_regs[this_cpu()]); + regs = g_last_regs[this_cpu()]; + } + else + { + memcpy(g_last_regs[this_cpu()], regs, sizeof(g_last_regs[0])); + } + + notifier_data.rtcb = rtcb; + notifier_data.regs = regs; + notifier_data.filename = filename; + notifier_data.linenum = linenum; + notifier_data.msg = msg; + panic_notifier_call_chain(g_nx_initstate == OSINIT_PANIC + ? PANIC_KERNEL : PANIC_TASK, + ¬ifier_data); +#ifdef CONFIG_ARCH_LEDS + board_autoled_on(LED_ASSERTION); +#endif + + /* Flush any buffered SYSLOG data (from prior to the assertion) */ + + syslog_flush(); + + /* Dump basic info of assertion. */ + + dump_assert_info(rtcb, filename, linenum, msg, regs); + + if (g_nx_initstate == OSINIT_PANIC) + { + /* Dump fatal info of assertion. */ + + dump_fatal_info(rtcb, filename, linenum, msg, regs); + + panic_notifier_call_chain(PANIC_KERNEL_FINAL, ¬ifier_data); + + reboot_notifier_call_chain(SYS_HALT, NULL); + + reset_board(); /* Should not return. */ + } + if (os_ready) { leave_critical_section(flags); diff --git a/sched/sched/CMakeLists.txt b/sched/sched/CMakeLists.txt index 29bf9c1f59988..6f18fb7cfbab1 100644 --- a/sched/sched/CMakeLists.txt +++ b/sched/sched/CMakeLists.txt @@ -59,7 +59,6 @@ if(CONFIG_SMP) APPEND SRCS sched_cpuselect.c - sched_cpupause.c sched_getcpu.c sched_getaffinity.c sched_setaffinity.c diff --git a/sched/sched/Make.defs b/sched/sched/Make.defs index b6ca4a7131b06..2ce6fe813a44f 100644 --- a/sched/sched/Make.defs +++ b/sched/sched/Make.defs @@ -37,7 +37,7 @@ CSRCS += sched_reprioritize.c endif ifeq ($(CONFIG_SMP),y) -CSRCS += sched_cpuselect.c sched_cpupause.c sched_getcpu.c sched_process_delivered.c +CSRCS += sched_cpuselect.c sched_getcpu.c sched_process_delivered.c CSRCS += sched_getaffinity.c sched_setaffinity.c endif diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 47ea3b448bd55..6f1e75860514f 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -401,11 +401,9 @@ static inline_function FAR struct tcb_s *this_task(void) } int nxsched_select_cpu(cpu_set_t affinity); -int nxsched_pause_cpu(FAR struct tcb_s *tcb); void nxsched_process_delivered(int cpu); #else # define nxsched_select_cpu(a) (0) -# define nxsched_pause_cpu(t) (-38) /* -ENOSYS */ #endif #define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0) diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index 79c66a991299a..18032dc9b4aab 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -233,7 +233,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) g_delivertasks[cpu] = btcb; btcb->cpu = cpu; btcb->task_state = TSTATE_TASK_ASSIGNED; - up_cpu_pause_async(cpu); + up_send_smp_sched(cpu); } else { diff --git a/sched/sched/sched_process_delivered.c b/sched/sched/sched_process_delivered.c index bb90b0cf0eb4d..b0394354431a8 100644 --- a/sched/sched/sched_process_delivered.c +++ b/sched/sched/sched_process_delivered.c @@ -73,13 +73,7 @@ void nxsched_process_delivered(int cpu) if ((g_cpu_irqset & (1 << cpu)) == 0) { - while (!spin_trylock_wo_note(&g_cpu_irqlock)) - { - if (up_cpu_pausereq(cpu)) - { - up_cpu_paused(cpu); - } - } + spin_lock_wo_note(&g_cpu_irqlock); g_cpu_irqset |= (1 << cpu); } diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index 5ff83b31cee26..e54c69d7b93a5 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -290,19 +290,9 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge) if (tcb->task_state == TSTATE_TASK_RUNNING) { - int me = this_cpu(); - int cpu = tcb->cpu; - if (cpu != me) - { - up_cpu_pause(tcb->cpu); - nxsched_remove_running(tcb); - up_cpu_resume(tcb->cpu); - } - else - { - nxsched_remove_running(tcb); - doswitch = true; - } + DEBUGASSERT(tcb->cpu == this_cpu()); + nxsched_remove_running(tcb); + return true; } else { diff --git a/sched/sched/sched_smp.c b/sched/sched/sched_smp.c index f43cb08ac74a3..ff2607964e473 100644 --- a/sched/sched/sched_smp.c +++ b/sched/sched/sched_smp.c @@ -26,6 +26,7 @@ #include +#include #include #include #include @@ -61,6 +62,7 @@ struct smp_call_data_s static sq_queue_t g_smp_call_queue[CONFIG_SMP_NCPUS]; static struct smp_call_data_s g_smp_call_data; +static spinlock_t g_smp_call_lock; /**************************************************************************** * Private Functions @@ -86,9 +88,9 @@ static void nxsched_smp_call_add(int cpu, { irqstate_t flags; - flags = enter_critical_section(); + flags = spin_lock_irqsave(&g_smp_call_lock); sq_addlast(&call_data->node[cpu], &g_smp_call_queue[cpu]); - leave_critical_section(flags); + spin_unlock_irqrestore(&g_smp_call_lock, flags); } /**************************************************************************** @@ -119,11 +121,10 @@ int nxsched_smp_call_handler(int irq, FAR void *context, FAR sq_entry_t *next; int cpu = this_cpu(); - irqstate_t flags = enter_critical_section(); + irqstate_t flags = spin_lock_irqsave(&g_smp_call_lock); call_queue = &g_smp_call_queue[cpu]; - up_cpu_paused_save(); sq_for_every_safe(call_queue, curr, next) { FAR struct smp_call_data_s *call_data = @@ -132,32 +133,31 @@ int nxsched_smp_call_handler(int irq, FAR void *context, sq_rem(&call_data->node[cpu], call_queue); - leave_critical_section(flags); + spin_unlock_irqrestore(&g_smp_call_lock, flags); ret = call_data->func(call_data->arg); - flags = enter_critical_section(); - if (call_data->cookie != NULL) + flags = spin_lock_irqsave(&g_smp_call_lock); + if (spin_is_locked(&call_data->lock)) { - if (ret < 0) + if (--call_data->refcount == 0) { - call_data->cookie->error = ret; + spin_unlock(&call_data->lock); } - - nxsem_post(&call_data->cookie->sem); } - if (spin_is_locked(&call_data->lock)) + if (call_data->cookie != NULL) { - if (--call_data->refcount == 0) + if (ret < 0) { - spin_unlock(&call_data->lock); + call_data->cookie->error = ret; } + + nxsem_post(&call_data->cookie->sem); } } - up_cpu_paused_restore(); - leave_critical_section(flags); + spin_unlock_irqrestore(&g_smp_call_lock, flags); return OK; } @@ -219,13 +219,20 @@ int nxsched_smp_call(cpu_set_t cpuset, nxsched_smp_call_t func, }; FAR struct smp_call_data_s *call_data; - int remote_cpus = 0; + int remote_cpus; int ret = OK; int i; + /* Cannot wait in interrupt context. */ + + DEBUGASSERT(!(wait && up_interrupt_context())); + /* Prevent reschedule on another processor */ - sched_lock(); + if (!up_interrupt_context()) + { + sched_lock(); + } if (CPU_ISSET(this_cpu(), &cpuset)) { @@ -293,6 +300,10 @@ int nxsched_smp_call(cpu_set_t cpuset, nxsched_smp_call_t func, } out: - sched_unlock(); + if (!up_interrupt_context()) + { + sched_unlock(); + } + return ret; } diff --git a/sched/signal/sig_dispatch.c b/sched/signal/sig_dispatch.c index 7e238b8e95f7d..d726d5c3ae486 100644 --- a/sched/signal/sig_dispatch.c +++ b/sched/signal/sig_dispatch.c @@ -46,10 +46,57 @@ #include "signal/signal.h" #include "mqueue/mqueue.h" +/**************************************************************************** + * Private Types + ****************************************************************************/ + +struct sig_arg_s +{ + pid_t pid; + cpu_set_t saved_affinity; + uint16_t saved_flags; + bool need_restore; +}; + /**************************************************************************** * Private Functions ****************************************************************************/ +#ifdef CONFIG_SMP +static int sig_handler(FAR void *cookie) +{ + FAR struct sig_arg_s *arg = cookie; + FAR struct tcb_s *tcb; + irqstate_t flags; + + flags = enter_critical_section(); + tcb = nxsched_get_tcb(arg->pid); + + if (!tcb || tcb->task_state == TSTATE_TASK_INVALID || + (tcb->flags & TCB_FLAG_EXIT_PROCESSING) != 0) + { + /* There is no TCB with this pid or, if there is, it is not a task. */ + + leave_critical_section(flags); + return -ESRCH; + } + + if (arg->need_restore) + { + tcb->affinity = arg->saved_affinity; + tcb->flags = arg->saved_flags; + } + + if (tcb->sigdeliver) + { + up_schedule_sigaction(tcb); + } + + leave_critical_section(flags); + return OK; +} +#endif + /**************************************************************************** * Name: nxsig_queue_action * @@ -115,7 +162,43 @@ static int nxsig_queue_action(FAR struct tcb_s *stcb, siginfo_t *info) * up_schedule_sigaction() */ - up_schedule_sigaction(stcb, nxsig_deliver); + if (!stcb->sigdeliver) + { +#ifdef CONFIG_SMP + int cpu = stcb->cpu; + int me = this_cpu(); + + stcb->sigdeliver = nxsig_deliver; + if (cpu != me && stcb->task_state == TSTATE_TASK_RUNNING) + { + struct sig_arg_s arg; + + if ((stcb->flags & TCB_FLAG_CPU_LOCKED) != 0) + { + arg.need_restore = false; + } + else + { + arg.saved_flags = stcb->flags; + arg.saved_affinity = stcb->affinity; + arg.need_restore = true; + + stcb->flags |= TCB_FLAG_CPU_LOCKED; + CPU_SET(stcb->cpu, &stcb->affinity); + } + + arg.pid = stcb->pid; + nxsched_smp_call_single(stcb->cpu, sig_handler, &arg, + true); + } + else +#endif + { + stcb->sigdeliver = nxsig_deliver; + up_schedule_sigaction(stcb); + } + } + leave_critical_section(flags); } } diff --git a/sched/task/task_restart.c b/sched/task/task_restart.c index 51d8020d08059..65c08624a59ae 100644 --- a/sched/task/task_restart.c +++ b/sched/task/task_restart.c @@ -38,136 +38,104 @@ #include "signal/signal.h" #include "task/task.h" +#ifndef CONFIG_BUILD_KERNEL + /**************************************************************************** - * Private Functions + * Private Type Declarations ****************************************************************************/ +#ifdef CONFIG_SMP +struct restart_arg_s +{ + pid_t pid; + cpu_set_t saved_affinity; + uint16_t saved_flags; + bool need_restore; +}; + /**************************************************************************** - * Name: nxtask_restart - * - * Description: - * This function "restarts" a task. The task is first terminated and then - * reinitialized with same ID, priority, original entry point, stack size, - * and parameters it had when it was first started. - * - * Input Parameters: - * pid - The task ID of the task to delete. An ID of zero signifies the - * calling task. - * - * Returned Value: - * Zero (OK) on success; or negated errno on failure - * - * This function can fail if: - * (1) A pid of zero or the pid of the calling task is provided - * (functionality not implemented) - * (2) The pid is not associated with any task known to the system. - * + * Private Functions ****************************************************************************/ -#ifndef CONFIG_BUILD_KERNEL -static int nxtask_restart(pid_t pid) +static int restart_handler(FAR void *cookie) { - FAR struct tcb_s *rtcb; - FAR struct task_tcb_s *tcb; - FAR dq_queue_t *tasklist; + FAR struct restart_arg_s *arg = cookie; + FAR struct tcb_s *tcb; irqstate_t flags; - int ret; -#ifdef CONFIG_SMP - int cpu; -#endif - /* Check if the task to restart is the calling task */ + flags = enter_critical_section(); - rtcb = this_task(); - if ((pid == 0) || (pid == rtcb->pid)) + /* tcb that we want restart */ + + tcb = nxsched_get_tcb(arg->pid); + if (!tcb || tcb->task_state == TSTATE_TASK_INVALID || + (tcb->flags & TCB_FLAG_EXIT_PROCESSING) != 0) { - /* Not implemented */ + /* There is no TCB with this pid or, if there is, it is not a task. */ - ret = -ENOSYS; - goto errout; + leave_critical_section(flags); + return -ESRCH; } - /* We are restarting some other task than ourselves. Make sure that the - * task does not change its state while we are executing. In the single - * CPU state this could be done by disabling pre-emption. But we will - * a little stronger medicine on the SMP case: The task make be running - * on another CPU. - */ + if (arg->need_restore) + { + tcb->affinity = arg->saved_affinity; + tcb->flags = arg->saved_flags; + } - flags = enter_critical_section(); + nxsched_remove_readytorun(tcb, false); - /* Find for the TCB associated with matching pid */ + leave_critical_section(flags); - tcb = (FAR struct task_tcb_s *)nxsched_get_tcb(pid); -#ifndef CONFIG_DISABLE_PTHREAD - if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == - TCB_FLAG_TTYPE_PTHREAD) -#else - if (!tcb) + return OK; +} #endif - { - /* There is no TCB with this pid or, if there is, it is not a task. */ - ret = -ESRCH; - goto errout_with_lock; - } - -#ifdef CONFIG_SMP - /* If the task is running on another CPU, then pause that CPU. We can - * then manipulate the TCB of the restarted task and when we resume the - * that CPU, the restart take effect. - */ - - cpu = nxsched_pause_cpu(&tcb->cmn); -#endif /* CONFIG_SMP */ +/**************************************************************************** + * Name: nxtask_reset_task + * + * Description: + * We use this function to reset tcb + * + ****************************************************************************/ +static void nxtask_reset_task(FAR struct tcb_s *tcb, bool remove) +{ /* Try to recover from any bad states */ - nxtask_recover((FAR struct tcb_s *)tcb); + nxtask_recover(tcb); /* Kill any children of this thread */ #ifdef HAVE_GROUP_MEMBERS - group_kill_children((FAR struct tcb_s *)tcb); + group_kill_children(tcb); #endif /* Remove the TCB from whatever list it is in. After this point, the TCB * should no longer be accessible to the system */ -#ifdef CONFIG_SMP - if ((FAR struct tcb_s *)tcb == g_delivertasks[tcb->cmn.cpu]) + if (remove) { - g_delivertasks[tcb->cmn.cpu] = NULL; + nxsched_remove_readytorun(tcb, false); } - else - { - tasklist = TLIST_HEAD(&tcb->cmn, tcb->cmn.cpu); - dq_rem((FAR dq_entry_t *)tcb, tasklist); - } -#else - tasklist = TLIST_HEAD(&tcb->cmn); - dq_rem((FAR dq_entry_t *)tcb, tasklist); -#endif - - tcb->cmn.task_state = TSTATE_TASK_INVALID; /* Deallocate anything left in the TCB's signal queues */ - nxsig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ - sigemptyset(&tcb->cmn.sigprocmask); /* Reset sigprocmask */ + nxsig_cleanup(tcb); /* Deallocate Signal lists */ + sigemptyset(&tcb->sigprocmask); /* Reset sigprocmask */ /* Reset the current task priority */ - tcb->cmn.sched_priority = tcb->cmn.init_priority; + tcb->sched_priority = tcb->init_priority; /* The task should restart with pre-emption disabled and not in a critical * section. */ - tcb->cmn.lockcount = 0; + tcb->lockcount = 0; #ifdef CONFIG_SMP - tcb->cmn.irqcount = 0; + tcb->irqcount = 0; #endif /* Reset the base task priority and the number of pending @@ -175,44 +143,141 @@ static int nxtask_restart(pid_t pid) */ #ifdef CONFIG_PRIORITY_INHERITANCE - tcb->cmn.base_priority = tcb->cmn.init_priority; - tcb->cmn.boost_priority = 0; + tcb->base_priority = tcb->init_priority; + tcb->boost_priority = 0; #endif /* Re-initialize the processor-specific portion of the TCB. This will * reset the entry point and the start-up parameters */ - up_initial_state((FAR struct tcb_s *)tcb); + up_initial_state(tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t *)tcb, list_inactivetasks()); - tcb->cmn.task_state = TSTATE_TASK_INACTIVE; + tcb->task_state = TSTATE_TASK_INACTIVE; +} -#ifdef CONFIG_SMP - /* Resume the paused CPU (if any) */ +/**************************************************************************** + * Name: nxtask_restart + * + * Description: + * This function "restarts" a task. The task is first terminated and then + * reinitialized with same ID, priority, original entry point, stack size, + * and parameters it had when it was first started. + * + * Input Parameters: + * pid - The task ID of the task to delete. An ID of zero signifies the + * calling task. + * + * Returned Value: + * Zero (OK) on success; or negated errno on failure + * + * This function can fail if: + * (1) A pid of zero or the pid of the calling task is provided + * (functionality not implemented) + * (2) The pid is not associated with any task known to the system. + * + ****************************************************************************/ + +static int nxtask_restart(pid_t pid) +{ + FAR struct tcb_s *rtcb; + FAR struct tcb_s *tcb; + irqstate_t flags; + int ret; + + /* We are restarting some other task than ourselves. Make sure that the + * task does not change its state while we are executing. In the single + * CPU state this could be done by disabling pre-emption. But we will + * a little stronger medicine on the SMP case: The task make be running + * on another CPU. + */ + + flags = enter_critical_section(); + + /* Check if the task to restart is the calling task */ + + rtcb = this_task(); + if (pid == 0 || pid == rtcb->pid) + { + /* Not implemented */ + + ret = -ENOSYS; + goto errout_with_lock; + } + + /* Find for the TCB associated with matching pid */ + + tcb = nxsched_get_tcb(pid); +#ifndef CONFIG_DISABLE_PTHREAD + if (!tcb || (tcb->flags & TCB_FLAG_TTYPE_MASK) == + TCB_FLAG_TTYPE_PTHREAD) +#else + if (!tcb) +#endif + { + /* There is no TCB with this pid or, if there is, it is not a task. */ + + ret = -ESRCH; + goto errout_with_lock; + } - if (cpu >= 0) +#ifdef CONFIG_SMP + if (tcb->task_state == TSTATE_TASK_RUNNING && + tcb->cpu != this_cpu()) { - ret = up_cpu_resume(cpu); - if (ret < 0) + struct restart_arg_s arg; + + if ((tcb->flags & TCB_FLAG_CPU_LOCKED) != 0) + { + arg.pid = tcb->pid; + arg.need_restore = false; + } + else { + arg.pid = tcb->pid; + arg.saved_flags = tcb->flags; + arg.saved_affinity = tcb->affinity; + arg.need_restore = true; + + tcb->flags |= TCB_FLAG_CPU_LOCKED; + CPU_SET(tcb->cpu, &tcb->affinity); + } + + nxsched_smp_call_single(tcb->cpu, restart_handler, &arg, true); + + tcb = nxsched_get_tcb(pid); + if (!tcb || tcb->task_state != TSTATE_TASK_INVALID || + (tcb->flags & TCB_FLAG_EXIT_PROCESSING) != 0) + { + ret = -ESRCH; goto errout_with_lock; } + + DEBUGASSERT(tcb->task_state != TSTATE_TASK_RUNNING); + nxtask_reset_task(tcb, false); + leave_critical_section(flags); + + /* Activate the task. */ + + nxtask_activate(tcb); + + return OK; } #endif /* CONFIG_SMP */ + nxtask_reset_task(tcb, true); leave_critical_section(flags); /* Activate the task. */ - nxtask_activate((FAR struct tcb_s *)tcb); + nxtask_activate(tcb); return OK; errout_with_lock: leave_critical_section(flags); -errout: return ret; }