From 0ce3fe55db8c9e492d3446e9d87c5cb17f2945db Mon Sep 17 00:00:00 2001 From: hujun5 Date: Wed, 10 Jan 2024 16:03:08 +0800 Subject: [PATCH 1/2] sched: change the SMP scheduling policy from synchronous to asynchronous reason: Currently, if we need to schedule a task to another CPU, we have to completely halt the other CPU, manipulate the scheduling linked list, and then resume the operation of that CPU. This process is both time-consuming and unnecessary. During this process, both the current CPU and the target CPU are inevitably subjected to busyloop. The improved strategy is to simply send a cross-core interrupt to the target CPU. The current CPU continues to run while the target CPU responds to the interrupt, eliminating the certainty of a busyloop occurring. Signed-off-by: hujun5 --- arch/arm/src/armv7-a/arm_cpupause.c | 32 ++++- arch/arm/src/armv7-r/arm_cpupause.c | 32 ++++- arch/arm/src/cxd56xx/cxd56_cpupause.c | 2 + arch/arm/src/lc823450/lc823450_cpupause.c | 2 + arch/arm/src/rp2040/rp2040_cpupause.c | 2 + arch/arm/src/sam34/sam4cm_cpupause.c | 2 + arch/arm64/src/common/arm64_cpupause.c | 54 +++++--- arch/arm64/src/common/arm64_gic.h | 2 +- arch/arm64/src/common/arm64_gicv2.c | 5 +- arch/arm64/src/common/arm64_gicv3.c | 4 +- arch/risc-v/src/common/riscv_cpupause.c | 35 +++++ arch/sim/src/sim/sim_smpsignal.c | 39 +++++- arch/sparc/src/s698pm/s698pm_cpupause.c | 7 + arch/x86_64/src/intel64/intel64_cpupause.c | 6 +- arch/xtensa/src/common/xtensa_cpupause.c | 63 ++++++--- include/nuttx/arch.h | 25 +++- sched/init/nx_start.c | 1 + sched/sched/CMakeLists.txt | 3 +- sched/sched/Make.defs | 2 +- sched/sched/queue.h | 10 ++ sched/sched/sched.h | 9 ++ sched/sched/sched_addreadytorun.c | 37 +++-- sched/sched/sched_process_delivered.c | 154 +++++++++++++++++++++ sched/sched/sched_removereadytorun.c | 14 ++ sched/task/task_restart.c | 12 +- 25 files changed, 482 insertions(+), 72 deletions(-) create mode 100644 sched/sched/sched_process_delivered.c diff --git a/arch/arm/src/armv7-a/arm_cpupause.c b/arch/arm/src/armv7-a/arm_cpupause.c index b36c6de7aa2b9..0a25b9af5d31b 100644 --- a/arch/arm/src/armv7-a/arm_cpupause.c +++ b/arch/arm/src/armv7-a/arm_cpupause.c @@ -256,6 +256,34 @@ int arm_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + return OK; } @@ -303,9 +331,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Execute SGI2 */ - - arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + up_cpu_pause_async(cpu); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/arm/src/armv7-r/arm_cpupause.c b/arch/arm/src/armv7-r/arm_cpupause.c index a466d963eba99..f68f41821697d 100644 --- a/arch/arm/src/armv7-r/arm_cpupause.c +++ b/arch/arm/src/armv7-r/arm_cpupause.c @@ -256,6 +256,34 @@ int arm_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + return OK; } @@ -303,9 +331,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Execute SGI2 */ - - arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + up_cpu_pause_async(cpu); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/arm/src/cxd56xx/cxd56_cpupause.c b/arch/arm/src/cxd56xx/cxd56_cpupause.c index 3857728f45095..42807545bf909 100644 --- a/arch/arm/src/cxd56xx/cxd56_cpupause.c +++ b/arch/arm/src/cxd56xx/cxd56_cpupause.c @@ -353,6 +353,8 @@ int arm_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + return ret; } diff --git a/arch/arm/src/lc823450/lc823450_cpupause.c b/arch/arm/src/lc823450/lc823450_cpupause.c index c75abbae17b63..b4249b36d9a16 100644 --- a/arch/arm/src/lc823450/lc823450_cpupause.c +++ b/arch/arm/src/lc823450/lc823450_cpupause.c @@ -268,6 +268,8 @@ int lc823450_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + return OK; } diff --git a/arch/arm/src/rp2040/rp2040_cpupause.c b/arch/arm/src/rp2040/rp2040_cpupause.c index b5a4e8f820184..d71a3c5f6508c 100644 --- a/arch/arm/src/rp2040/rp2040_cpupause.c +++ b/arch/arm/src/rp2040/rp2040_cpupause.c @@ -324,6 +324,8 @@ int arm_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + return OK; } diff --git a/arch/arm/src/sam34/sam4cm_cpupause.c b/arch/arm/src/sam34/sam4cm_cpupause.c index 4abd9068de7c1..2ebc5ea2d9f5c 100644 --- a/arch/arm/src/sam34/sam4cm_cpupause.c +++ b/arch/arm/src/sam34/sam4cm_cpupause.c @@ -259,6 +259,8 @@ int arm_pause_handler(int irq, void *c, void *arg) return up_cpu_paused(cpu); } + nxsched_process_delivered(cpu); + return OK; } diff --git a/arch/arm64/src/common/arm64_cpupause.c b/arch/arm64/src/common/arm64_cpupause.c index 870917ca152c8..f6579ba3b7f04 100644 --- a/arch/arm64/src/common/arm64_cpupause.c +++ b/arch/arm64/src/common/arm64_cpupause.c @@ -259,6 +259,36 @@ int arm64_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + /* Execute SGI2 */ + + arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + return OK; } @@ -284,8 +314,6 @@ int arm64_pause_handler(int irq, void *context, void *arg) int up_cpu_pause(int cpu) { - int ret; - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); #ifdef CONFIG_SCHED_INSTRUMENTATION @@ -308,23 +336,13 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Execute SGI2 */ - - ret = arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); - if (ret < 0) - { - /* What happened? Unlock the g_cpu_wait spinlock */ + up_cpu_pause_async(cpu); - spin_unlock(&g_cpu_wait[cpu]); - } - else - { - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ + /* Wait for the other CPU to unlock g_cpu_paused meaning that + * it is fully paused and ready for up_cpu_resume(); + */ - spin_lock(&g_cpu_paused[cpu]); - } + spin_lock(&g_cpu_paused[cpu]); spin_unlock(&g_cpu_paused[cpu]); @@ -333,7 +351,7 @@ int up_cpu_pause(int cpu) * called. g_cpu_paused will be unlocked in any case. */ - return ret; + return OK; } /**************************************************************************** diff --git a/arch/arm64/src/common/arm64_gic.h b/arch/arm64/src/common/arm64_gic.h index cce6eebf3bd57..5b81e6ebdef67 100644 --- a/arch/arm64/src/common/arm64_gic.h +++ b/arch/arm64/src/common/arm64_gic.h @@ -317,7 +317,7 @@ int arm64_gic_irq_trigger(unsigned int intid, uint32_t flags); uint64_t * arm64_decodeirq(uint64_t *regs); -int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list); +void arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list); #ifdef CONFIG_SMP diff --git a/arch/arm64/src/common/arm64_gicv2.c b/arch/arm64/src/common/arm64_gicv2.c index 33a1ced3206b4..3f019bfd9a3c1 100644 --- a/arch/arm64/src/common/arm64_gicv2.c +++ b/arch/arm64/src/common/arm64_gicv2.c @@ -1474,14 +1474,13 @@ void arm64_gic_secondary_init(void) * cpuset - The set of CPUs to receive the SGI * * Returned Value: - * OK is always returned at present. + * None * ****************************************************************************/ -int arm64_gic_raise_sgi(unsigned int sgi, uint16_t cpuset) +void arm64_gic_raise_sgi(unsigned int sgi, uint16_t cpuset) { arm_cpu_sgi(sgi, cpuset); - return 0; } # ifdef CONFIG_SMP diff --git a/arch/arm64/src/common/arm64_gicv3.c b/arch/arm64/src/common/arm64_gicv3.c index 5512ab4c5de97..59b300cbf22ea 100644 --- a/arch/arm64/src/common/arm64_gicv3.c +++ b/arch/arm64/src/common/arm64_gicv3.c @@ -408,7 +408,7 @@ static int arm64_gic_send_sgi(unsigned int sgi_id, uint64_t target_aff, return 0; } -int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list) +void arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list) { uint64_t pre_cluster_id = UINT64_MAX; uint64_t curr_cluster_id; @@ -437,8 +437,6 @@ int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list) } arm64_gic_send_sgi(sgi_id, pre_cluster_id, tlist); - - return 0; } /* Wake up GIC redistributor. diff --git a/arch/risc-v/src/common/riscv_cpupause.c b/arch/risc-v/src/common/riscv_cpupause.c index e883c73a70699..148706ca449da 100644 --- a/arch/risc-v/src/common/riscv_cpupause.c +++ b/arch/risc-v/src/common/riscv_cpupause.c @@ -227,6 +227,7 @@ int up_cpu_paused_restore(void) int riscv_pause_handler(int irq, void *c, void *arg) { + struct tcb_s *tcb; int cpu = this_cpu(); nxsched_smp_call_handler(irq, c, arg); @@ -258,6 +259,40 @@ int riscv_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + tcb = current_task(cpu); + riscv_savecontext(tcb); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + riscv_restorecontext(tcb); + + return OK; +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + /* Execute Pause IRQ to CPU(cpu) */ + + riscv_ipi_send(cpu); + return OK; } diff --git a/arch/sim/src/sim/sim_smpsignal.c b/arch/sim/src/sim/sim_smpsignal.c index fd06cb2b97301..140adff83437e 100644 --- a/arch/sim/src/sim/sim_smpsignal.c +++ b/arch/sim/src/sim/sim_smpsignal.c @@ -74,6 +74,7 @@ static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; static int sim_cpupause_handler(int irq, void *context, void *arg) { + struct tcb_s *tcb; int cpu = this_cpu(); /* Check for false alarms. Such false could occur as a consequence of @@ -100,6 +101,12 @@ static int sim_cpupause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } + tcb = current_task(cpu); + sim_savestate(tcb->xcp.regs); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + sim_restorestate(tcb->xcp.regs); + return OK; } @@ -339,6 +346,34 @@ int sim_init_ipi(int irq) return irq_attach(irq, sim_cpupause_handler, NULL); } +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + /* Generate IRQ for CPU(cpu) */ + + host_send_ipi(cpu); + + return OK; +} + /**************************************************************************** * Name: up_cpu_pause * @@ -381,9 +416,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Generate IRQ for CPU(cpu) */ - - host_send_ipi(cpu); + up_cpu_pause_async(cpu); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/sparc/src/s698pm/s698pm_cpupause.c b/arch/sparc/src/s698pm/s698pm_cpupause.c index 4071ba624d7ea..225ef9ec24132 100644 --- a/arch/sparc/src/s698pm/s698pm_cpupause.c +++ b/arch/sparc/src/s698pm/s698pm_cpupause.c @@ -226,6 +226,7 @@ int up_cpu_paused_restore(void) int s698pm_pause_handler(int irq, void *c, void *arg) { + struct tcb_s *tcb; int cpu = this_cpu(); nxsched_smp_call_handler(irq, c, arg); @@ -257,6 +258,12 @@ int s698pm_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + tcb = current_task(cpu); + sparc_savestate(tcb->xcp.regs); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + sparc_restorestate(tcb->xcp.regs); + return OK; } diff --git a/arch/x86_64/src/intel64/intel64_cpupause.c b/arch/x86_64/src/intel64/intel64_cpupause.c index 9a543a2ddd662..44e3220c2ac2c 100644 --- a/arch/x86_64/src/intel64/intel64_cpupause.c +++ b/arch/x86_64/src/intel64/intel64_cpupause.c @@ -265,7 +265,7 @@ int up_pause_handler(int irq, void *c, void *arg) } /**************************************************************************** - * Name: up_cpu_async_pause + * Name: up_cpu_pause_async * * Description: * pause task execution on the CPU @@ -283,7 +283,7 @@ int up_pause_handler(int irq, void *c, void *arg) * ****************************************************************************/ -inline_function int up_cpu_async_pause(int cpu) +inline_function int up_cpu_pause_async(int cpu) { cpu_set_t cpuset; @@ -362,7 +362,7 @@ int up_cpu_pause(int cpu) /* Execute Pause IRQ to CPU(cpu) */ - up_cpu_async_pause(cpu); + up_cpu_pause_async(cpu); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/xtensa/src/common/xtensa_cpupause.c b/arch/xtensa/src/common/xtensa_cpupause.c index edc9b8bd85b04..37f749c44f24d 100644 --- a/arch/xtensa/src/common/xtensa_cpupause.c +++ b/arch/xtensa/src/common/xtensa_cpupause.c @@ -218,6 +218,7 @@ int up_cpu_paused_restore(void) void xtensa_pause_handler(void) { + struct tcb_s *tcb; int cpu = this_cpu(); /* Check for false alarms. Such false could occur as a consequence of @@ -242,6 +243,40 @@ void xtensa_pause_handler(void) leave_critical_section(flags); } + + tcb = current_task(cpu); + xtensa_savestate(tcb->xcp.regs); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + xtensa_restorestate(tcb->xcp.regs); +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + /* Execute the intercpu interrupt */ + + xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE); + + return OK; } /**************************************************************************** @@ -291,8 +326,6 @@ void up_send_smp_call(cpu_set_t cpuset) int up_cpu_pause(int cpu) { - int ret; - #ifdef CONFIG_SCHED_INSTRUMENTATION /* Notify of the pause event */ @@ -315,23 +348,13 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Execute the intercpu interrupt */ - - ret = xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE); - if (ret < 0) - { - /* What happened? Unlock the g_cpu_wait spinlock */ + up_cpu_pause_async(cpu); - spin_unlock(&g_cpu_wait[cpu]); - } - else - { - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ + /* Wait for the other CPU to unlock g_cpu_paused meaning that + * it is fully paused and ready for up_cpu_resume(); + */ - spin_lock(&g_cpu_paused[cpu]); - } + spin_lock(&g_cpu_paused[cpu]); spin_unlock(&g_cpu_paused[cpu]); @@ -340,7 +363,7 @@ int up_cpu_pause(int cpu) * called. g_cpu_paused will be unlocked in any case. */ - return ret; + return OK; } /**************************************************************************** @@ -351,8 +374,8 @@ int up_cpu_pause(int cpu) * state of the task at the head of the g_assignedtasks[cpu] list, and * resume normal tasking. * - * This function is called after up_cpu_pause in order to resume operation - * of the CPU after modifying its g_assignedtasks[cpu] list. + * This function is called after up_cpu_pause in order resume operation of + * the CPU after modifying its g_assignedtasks[cpu] list. * * Input Parameters: * cpu - The index of the CPU being re-started. diff --git a/include/nuttx/arch.h b/include/nuttx/arch.h index a58cf1818d294..a763ad526858d 100644 --- a/include/nuttx/arch.h +++ b/include/nuttx/arch.h @@ -2307,6 +2307,29 @@ int up_cpu_start(int cpu); int up_cpu_pause(int cpu); #endif +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +#ifdef CONFIG_SMP +int up_cpu_pause_async(int cpu); +#endif + /**************************************************************************** * Name: up_cpu_pausereq * @@ -2408,7 +2431,7 @@ int up_cpu_paused_restore(void); * state of the task at the head of the g_assignedtasks[cpu] list, and * resume normal tasking. * - * This function is called after up_cpu_pause in order ot resume operation + * This function is called after up_cpu_pause in order to resume operation * of the CPU after modifying its g_assignedtasks[cpu] list. * * Input Parameters: diff --git a/sched/init/nx_start.c b/sched/init/nx_start.c index 4bf9969d4c599..027b805e2c31d 100644 --- a/sched/init/nx_start.c +++ b/sched/init/nx_start.c @@ -126,6 +126,7 @@ dq_queue_t g_readytorun; #ifdef CONFIG_SMP dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS]; +FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS]; #endif /* g_running_tasks[] holds a references to the running task for each cpu. diff --git a/sched/sched/CMakeLists.txt b/sched/sched/CMakeLists.txt index ca4ad08b033d2..29bf9c1f59988 100644 --- a/sched/sched/CMakeLists.txt +++ b/sched/sched/CMakeLists.txt @@ -62,7 +62,8 @@ if(CONFIG_SMP) sched_cpupause.c sched_getcpu.c sched_getaffinity.c - sched_setaffinity.c) + sched_setaffinity.c + sched_process_delivered.c) endif() if(CONFIG_SIG_SIGSTOP_ACTION) diff --git a/sched/sched/Make.defs b/sched/sched/Make.defs index 1103128bc9f18..b6ca4a7131b06 100644 --- a/sched/sched/Make.defs +++ b/sched/sched/Make.defs @@ -37,7 +37,7 @@ CSRCS += sched_reprioritize.c endif ifeq ($(CONFIG_SMP),y) -CSRCS += sched_cpuselect.c sched_cpupause.c sched_getcpu.c +CSRCS += sched_cpuselect.c sched_cpupause.c sched_getcpu.c sched_process_delivered.c CSRCS += sched_getaffinity.c sched_setaffinity.c endif diff --git a/sched/sched/queue.h b/sched/sched/queue.h index c91a37de7af26..320638b1989c8 100644 --- a/sched/sched/queue.h +++ b/sched/sched/queue.h @@ -63,4 +63,14 @@ } \ while (0) +#define dq_insert_mid(pre, mid, next) \ + do \ + { \ + mid->flink = next; \ + mid->blink = prev; \ + pre->flink = mid; \ + next->blink = mid; \ + } \ + while (0) + #endif /* __INCLUDE_NUTTX_QUEUE_H_ */ diff --git a/sched/sched/sched.h b/sched/sched/sched.h index a68353a9b00a5..26600cae45f3a 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -190,6 +190,14 @@ extern dq_queue_t g_readytorun; extern dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS]; #endif +/* g_delivertasks is used to record the tcb that needs to be passed to + * another cpu for scheduling. When it is null, it means that there + * is no tcb that needs to be processed. When it is not null, + * it indicates that there is a tcb that needs to be processed. + */ + +extern FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS]; + /* g_running_tasks[] holds a references to the running task for each cpu. * It is valid only when up_interrupt_context() returns true. */ @@ -397,6 +405,7 @@ static inline_function FAR struct tcb_s *this_task(void) int nxsched_select_cpu(cpu_set_t affinity); int nxsched_pause_cpu(FAR struct tcb_s *tcb); +void nxsched_process_delivered(int cpu); # define nxsched_islocked_global() (g_cpu_lockset != 0) # define nxsched_islocked_tcb(tcb) nxsched_islocked_global() diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index 4c760b7fa1204..ff481ee669ab5 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -222,13 +222,38 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) else /* (task_state == TSTATE_TASK_RUNNING) */ { /* If we are modifying some assigned task list other than our own, we - * will need to stop that CPU. + * will need to switch that CPU. */ me = this_cpu(); if (cpu != me) { - DEBUGVERIFY(up_cpu_pause(cpu)); + if (g_delivertasks[cpu] == NULL) + { + g_delivertasks[cpu] = btcb; + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_ASSIGNED; + up_cpu_pause_async(cpu); + } + else + { + rtcb = g_delivertasks[cpu]; + if (rtcb->sched_priority < btcb->sched_priority) + { + g_delivertasks[cpu] = btcb; + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_ASSIGNED; + nxsched_add_prioritized(rtcb, &g_readytorun); + rtcb->task_state = TSTATE_TASK_READYTORUN; + } + else + { + nxsched_add_prioritized(btcb, &g_readytorun); + btcb->task_state = TSTATE_TASK_READYTORUN; + } + } + + return false; } tasklist = &g_assignedtasks[cpu]; @@ -258,14 +283,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) { g_cpu_lockset |= (1 << cpu); } - - /* All done, restart the other CPU (if it was paused). */ - - if (cpu != me) - { - DEBUGVERIFY(up_cpu_resume(cpu)); - doswitch = false; - } } return doswitch; diff --git a/sched/sched/sched_process_delivered.c b/sched/sched/sched_process_delivered.c new file mode 100644 index 0000000000000..574e89e0ed776 --- /dev/null +++ b/sched/sched/sched_process_delivered.c @@ -0,0 +1,154 @@ +/**************************************************************************** + * sched/sched/sched_process_delivered.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include + +#include + +#include "irq/irq.h" +#include "sched/sched.h" +#include "sched/queue.h" + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: nxsched_process_delivered + * + * Description: + * This function is used to process the tcb in g_delivertasks. + * 1 We use direct locking instead of enter_critical_section + * to save processing time + * 2 If there is a higher priority task, we will still perform + * the higher priority task + * 3 If the schedule lock is on, the task will be placed in g_pendingtasks + * + * Input Parameters: + * cpu + * + * Returned Value: + * OK + * + * Assumptions: + * - The caller must be in irq + * - current cpu must not be locked + * + ****************************************************************************/ + +void nxsched_process_delivered(int cpu) +{ + FAR dq_queue_t *tasklist; + FAR struct tcb_s *next; + FAR struct tcb_s *prev; + struct tcb_s *btcb = NULL; + struct tcb_s *tcb; + + DEBUGASSERT(g_cpu_nestcount[cpu] == 0); + DEBUGASSERT(up_interrupt_context()); + + if ((g_cpu_irqset & (1 << cpu)) == 0) + { + while (!spin_trylock_wo_note(&g_cpu_irqlock)) + { + if (up_cpu_pausereq(cpu)) + { + up_cpu_paused(cpu); + } + } + + g_cpu_irqset |= (1 << cpu); + } + + if (g_delivertasks[cpu] == NULL) + { + tcb = current_task(cpu); + if (tcb->irqcount <= 0) + { + cpu_irqlock_clear(); + } + + return; + } + + if (nxsched_islocked_global()) + { + btcb = g_delivertasks[cpu]; + g_delivertasks[cpu] = NULL; + nxsched_add_prioritized(btcb, &g_pendingtasks); + btcb->task_state = TSTATE_TASK_PENDING; + tcb = current_task(cpu); + if (tcb->irqcount <= 0) + { + cpu_irqlock_clear(); + } + + return; + } + + btcb = g_delivertasks[cpu]; + tasklist = &g_assignedtasks[cpu]; + + for (next = (FAR struct tcb_s *)tasklist->head; + (next && btcb->sched_priority <= next->sched_priority); + next = next->flink); + + prev = next->blink; + if (prev == NULL) + { + /* Special case: Insert at the head of the list */ + + dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist); + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_RUNNING; + + DEBUGASSERT(btcb->flink != NULL); + DEBUGASSERT(next == btcb->flink); + next->task_state = TSTATE_TASK_ASSIGNED; + + if (btcb->lockcount > 0) + { + g_cpu_lockset |= (1 << cpu); + } + } + else + { + /* Insert in the middle of the list */ + + dq_insert_mid(prev, btcb, next); + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_ASSIGNED; + } + + g_delivertasks[cpu] = NULL; + tcb = current_task(cpu); + + if (tcb->irqcount <= 0) + { + cpu_irqlock_clear(); + } +} diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index 923ec59d870d2..e9b26a68d897f 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -324,6 +324,19 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge) else { FAR dq_queue_t *tasklist; + int i; + + /* if tcb == g_delivertasks[i] we set NULL to g_delivertasks[i] */ + + for (i = 0; i < CONFIG_SMP_NCPUS; i++) + { + if (tcb == g_delivertasks[i]) + { + g_delivertasks[i] = NULL; + tcb->task_state = TSTATE_TASK_INVALID; + goto finish; + } + } tasklist = TLIST_HEAD(tcb, tcb->cpu); @@ -341,6 +354,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge) tcb->task_state = TSTATE_TASK_INVALID; } +finish: if (list_pendingtasks()->head && merge) { doswitch |= nxsched_merge_pending(); diff --git a/sched/task/task_restart.c b/sched/task/task_restart.c index f9c70e345a567..51d8020d08059 100644 --- a/sched/task/task_restart.c +++ b/sched/task/task_restart.c @@ -136,12 +136,20 @@ static int nxtask_restart(pid_t pid) */ #ifdef CONFIG_SMP - tasklist = TLIST_HEAD(&tcb->cmn, tcb->cmn.cpu); + if ((FAR struct tcb_s *)tcb == g_delivertasks[tcb->cmn.cpu]) + { + g_delivertasks[tcb->cmn.cpu] = NULL; + } + else + { + tasklist = TLIST_HEAD(&tcb->cmn, tcb->cmn.cpu); + dq_rem((FAR dq_entry_t *)tcb, tasklist); + } #else tasklist = TLIST_HEAD(&tcb->cmn); + dq_rem((FAR dq_entry_t *)tcb, tasklist); #endif - dq_rem((FAR dq_entry_t *)tcb, tasklist); tcb->cmn.task_state = TSTATE_TASK_INVALID; /* Deallocate anything left in the TCB's signal queues */ From 26ef816602b2c94cf5faca014cb4b070da002916 Mon Sep 17 00:00:00 2001 From: hujun5 Date: Fri, 26 Apr 2024 12:26:53 +0800 Subject: [PATCH 2/2] arch: We can use an independent SIG interrupt to handle async pause, which can save processing time. Signed-off-by: hujun5 --- arch/arm/src/armv7-a/arm_cpupause.c | 34 +++++++++++++++++--- arch/arm/src/armv7-a/arm_gicv2.c | 2 ++ arch/arm/src/armv7-a/gic.h | 25 +++++++++++++++ arch/arm/src/armv7-r/arm_cpupause.c | 34 +++++++++++++++++--- arch/arm/src/armv7-r/arm_gicv2.c | 2 ++ arch/arm/src/armv7-r/gic.h | 26 +++++++++++++++ arch/arm/src/armv8-r/arm_gic.h | 6 ++++ arch/arm/src/armv8-r/arm_gicv3.c | 3 ++ arch/arm64/src/common/arm64_cpupause.c | 35 +++++++++++++++++--- arch/arm64/src/common/arm64_gic.h | 24 ++++++++++++++ arch/arm64/src/common/arm64_gicv2.c | 2 ++ arch/arm64/src/common/arm64_gicv3.c | 3 ++ arch/x86_64/include/intel64/irq.h | 3 +- arch/x86_64/src/intel64/intel64_cpupause.c | 37 ++++++++++++++++++++-- arch/x86_64/src/intel64/intel64_cpustart.c | 3 ++ 15 files changed, 223 insertions(+), 16 deletions(-) diff --git a/arch/arm/src/armv7-a/arm_cpupause.c b/arch/arm/src/armv7-a/arm_cpupause.c index 0a25b9af5d31b..81dc1a534e70d 100644 --- a/arch/arm/src/armv7-a/arm_cpupause.c +++ b/arch/arm/src/armv7-a/arm_cpupause.c @@ -208,6 +208,34 @@ int up_cpu_paused_restore(void) return OK; } +/**************************************************************************** + * Name: arm_pause_async_handler + * + * Description: + * This is the handler for async pause. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int arm_pause_async_handler(int irq, void *context, void *arg) +{ + int cpu = this_cpu(); + + nxsched_process_delivered(cpu); + return OK; +} + /**************************************************************************** * Name: arm_pause_handler * @@ -256,8 +284,6 @@ int arm_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } - nxsched_process_delivered(cpu); - return OK; } @@ -282,7 +308,7 @@ int arm_pause_handler(int irq, void *context, void *arg) inline_function int up_cpu_pause_async(int cpu) { - arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + arm_cpu_sgi(GIC_SMP_CPUPAUSE_ASYNC, (1 << cpu)); return OK; } @@ -331,7 +357,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - up_cpu_pause_async(cpu); + arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/arm/src/armv7-a/arm_gicv2.c b/arch/arm/src/armv7-a/arm_gicv2.c index 87c5aa19e3bc0..47a132fbc68d5 100644 --- a/arch/arm/src/armv7-a/arm_gicv2.c +++ b/arch/arm/src/armv7-a/arm_gicv2.c @@ -215,6 +215,8 @@ void arm_gic0_initialize(void) DEBUGVERIFY(irq_attach(GIC_SMP_CPUSTART, arm_start_handler, NULL)); DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm_pause_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, + arm_pause_async_handler, NULL)); DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, nxsched_smp_call_handler, NULL)); #endif diff --git a/arch/arm/src/armv7-a/gic.h b/arch/arm/src/armv7-a/gic.h index 29359e12ddf4f..b652dea0a1159 100644 --- a/arch/arm/src/armv7-a/gic.h +++ b/arch/arm/src/armv7-a/gic.h @@ -619,10 +619,12 @@ # define GIC_SMP_CPUSTART GIC_IRQ_SGI9 # define GIC_SMP_CPUPAUSE GIC_IRQ_SGI10 # define GIC_SMP_CPUCALL GIC_IRQ_SGI11 +# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI12 #else # define GIC_SMP_CPUSTART GIC_IRQ_SGI1 # define GIC_SMP_CPUPAUSE GIC_IRQ_SGI2 # define GIC_SMP_CPUCALL GIC_IRQ_SGI3 +# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI4 #endif /**************************************************************************** @@ -839,6 +841,29 @@ int arm_start_handler(int irq, void *context, void *arg); int arm_pause_handler(int irq, void *context, void *arg); #endif +/**************************************************************************** + * Name: arm_pause_async_handler + * + * Description: + * This is the handler for async pause. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +#ifdef CONFIG_SMP +int arm_pause_async_handler(int irq, void *context, void *arg); +#endif /**************************************************************************** * Name: arm_gic_dump * diff --git a/arch/arm/src/armv7-r/arm_cpupause.c b/arch/arm/src/armv7-r/arm_cpupause.c index f68f41821697d..c7d1ebe38eddb 100644 --- a/arch/arm/src/armv7-r/arm_cpupause.c +++ b/arch/arm/src/armv7-r/arm_cpupause.c @@ -230,8 +230,6 @@ int up_cpu_paused_restore(void) int arm_pause_handler(int irq, void *context, void *arg) { - int cpu = this_cpu(); - /* Check for false alarms. Such false could occur as a consequence of * some deadlock breaking logic that might have already serviced the SG2 * interrupt by calling up_cpu_paused(). If the pause event has already @@ -256,8 +254,34 @@ int arm_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } - nxsched_process_delivered(cpu); + return OK; +} + +/**************************************************************************** + * Name: arm_pause_async_handler + * + * Description: + * This is the handler for async pause. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int arm_pause_async_handler(int irq, void *context, void *arg) +{ + int cpu = this_cpu(); + nxsched_process_delivered(cpu); return OK; } @@ -282,7 +306,7 @@ int arm_pause_handler(int irq, void *context, void *arg) inline_function int up_cpu_pause_async(int cpu) { - arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + arm_cpu_sgi(GIC_SMP_CPUPAUSE_ASYNC, (1 << cpu)); return OK; } @@ -331,7 +355,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - up_cpu_pause_async(cpu); + arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/arm/src/armv7-r/arm_gicv2.c b/arch/arm/src/armv7-r/arm_gicv2.c index d7513d252dce8..431ace37a4cf5 100644 --- a/arch/arm/src/armv7-r/arm_gicv2.c +++ b/arch/arm/src/armv7-r/arm_gicv2.c @@ -161,6 +161,8 @@ void arm_gic0_initialize(void) DEBUGVERIFY(irq_attach(GIC_SMP_CPUSTART, arm_start_handler, NULL)); DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm_pause_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, + arm_pause_async_handler, NULL)); DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, nxsched_smp_call_handler, NULL)); #endif diff --git a/arch/arm/src/armv7-r/gic.h b/arch/arm/src/armv7-r/gic.h index ecee23563df01..d8632330564ba 100644 --- a/arch/arm/src/armv7-r/gic.h +++ b/arch/arm/src/armv7-r/gic.h @@ -610,10 +610,12 @@ # define GIC_SMP_CPUSTART GIC_IRQ_SGI9 # define GIC_SMP_CPUPAUSE GIC_IRQ_SGI10 # define GIC_SMP_CPUCALL GIC_IRQ_SGI11 +# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI12 #else # define GIC_SMP_CPUSTART GIC_IRQ_SGI1 # define GIC_SMP_CPUPAUSE GIC_IRQ_SGI2 # define GIC_SMP_CPUCALL GIC_IRQ_SGI3 +# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI4 #endif /**************************************************************************** @@ -827,6 +829,30 @@ int arm_start_handler(int irq, void *context, void *arg); int arm_pause_handler(int irq, void *context, void *arg); #endif +/**************************************************************************** + * Name: arm_pause_async_handler + * + * Description: + * This is the handler for async pause. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +#ifdef CONFIG_SMP +int arm_pause_async_handler(int irq, void *context, void *arg); +#endif + /**************************************************************************** * Name: arm_gic_dump * diff --git a/arch/arm/src/armv8-r/arm_gic.h b/arch/arm/src/armv8-r/arm_gic.h index e43ccff8d0364..717e3660d22e0 100644 --- a/arch/arm/src/armv8-r/arm_gic.h +++ b/arch/arm/src/armv8-r/arm_gic.h @@ -313,10 +313,12 @@ # define GIC_SMP_CPUSTART GIC_IRQ_SGI9 # define GIC_SMP_CPUPAUSE GIC_IRQ_SGI10 # define GIC_SMP_CPUCALL GIC_IRQ_SGI11 +# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI12 #else # define GIC_SMP_CPUSTART GIC_IRQ_SGI1 # define GIC_SMP_CPUPAUSE GIC_IRQ_SGI2 # define GIC_SMP_CPUCALL GIC_IRQ_SGI3 +# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI4 #endif /**************************************************************************** @@ -355,6 +357,10 @@ int arm_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list); int arm_pause_handler(int irq, void *context, void *arg); +#ifdef CONFIG_SMP +int arm_pause_async_handler(int irq, void *context, void *arg); +#endif + void arm_gic_secondary_init(void); #endif diff --git a/arch/arm/src/armv8-r/arm_gicv3.c b/arch/arm/src/armv8-r/arm_gicv3.c index 7ff1d3558242d..d32db05ac7735 100644 --- a/arch/arm/src/armv8-r/arm_gicv3.c +++ b/arch/arm/src/armv8-r/arm_gicv3.c @@ -568,6 +568,8 @@ static void gicv3_dist_init(void) /* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */ DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, + arm64_pause_async_handler, NULL)); DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, nxsched_smp_call_handler, NULL)); #endif @@ -814,6 +816,7 @@ static void arm_gic_init(void) #ifdef CONFIG_SMP up_enable_irq(GIC_SMP_CPUPAUSE); + up_enable_irq(GIC_SMP_CPUPAUSE_ASYNC); #endif } diff --git a/arch/arm64/src/common/arm64_cpupause.c b/arch/arm64/src/common/arm64_cpupause.c index f6579ba3b7f04..da81259d8d558 100644 --- a/arch/arm64/src/common/arm64_cpupause.c +++ b/arch/arm64/src/common/arm64_cpupause.c @@ -211,6 +211,35 @@ int up_cpu_paused_restore(void) return OK; } +/**************************************************************************** + * Name: arm64_pause_async_handler + * + * Description: + * This is the handler for async pause. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int arm64_pause_async_handler(int irq, void *context, void *arg) +{ + int cpu = this_cpu(); + + nxsched_process_delivered(cpu); + + return OK; +} + /**************************************************************************** * Name: arm64_pause_handler * @@ -259,8 +288,6 @@ int arm64_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } - nxsched_process_delivered(cpu); - return OK; } @@ -287,7 +314,7 @@ inline_function int up_cpu_pause_async(int cpu) { /* Execute SGI2 */ - arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE_ASYNC, (1 << cpu)); return OK; } @@ -336,7 +363,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - up_cpu_pause_async(cpu); + arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/arm64/src/common/arm64_gic.h b/arch/arm64/src/common/arm64_gic.h index 5b81e6ebdef67..de66d61696360 100644 --- a/arch/arm64/src/common/arm64_gic.h +++ b/arch/arm64/src/common/arm64_gic.h @@ -281,10 +281,12 @@ #define GIC_IRQ_SGI15 15 #ifdef CONFIG_ARCH_TRUSTZONE_SECURE +# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI8 # define GIC_SMP_CPUSTART GIC_IRQ_SGI9 # define GIC_SMP_CPUPAUSE GIC_IRQ_SGI10 # define GIC_SMP_CPUCALL GIC_IRQ_SGI11 #else +# define GIC_SMP_CPUPAUSE_ASYNC GIC_IRQ_SGI0 # define GIC_SMP_CPUSTART GIC_IRQ_SGI1 # define GIC_SMP_CPUPAUSE GIC_IRQ_SGI2 # define GIC_SMP_CPUCALL GIC_IRQ_SGI3 @@ -343,6 +345,28 @@ void arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list); int arm64_pause_handler(int irq, void *context, void *arg); +/**************************************************************************** + * Name: arm64_pause_async_handler + * + * Description: + * This is the handler for async pause. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int arm64_pause_async_handler(int irq, void *context, void *arg); + void arm64_gic_secondary_init(void); #endif diff --git a/arch/arm64/src/common/arm64_gicv2.c b/arch/arm64/src/common/arm64_gicv2.c index 3f019bfd9a3c1..525298b1857a5 100644 --- a/arch/arm64/src/common/arm64_gicv2.c +++ b/arch/arm64/src/common/arm64_gicv2.c @@ -911,6 +911,8 @@ static void arm_gic0_initialize(void) /* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */ DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, + arm64_pause_async_handler, NULL)); DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, nxsched_smp_call_handler, NULL)); #endif diff --git a/arch/arm64/src/common/arm64_gicv3.c b/arch/arm64/src/common/arm64_gicv3.c index 59b300cbf22ea..49b9d513350d3 100644 --- a/arch/arm64/src/common/arm64_gicv3.c +++ b/arch/arm64/src/common/arm64_gicv3.c @@ -654,6 +654,8 @@ static void gicv3_dist_init(void) /* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */ DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL)); + DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC, + arm64_pause_async_handler, NULL)); DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, nxsched_smp_call_handler, NULL)); #endif @@ -952,6 +954,7 @@ static void arm64_gic_init(void) #ifdef CONFIG_SMP up_enable_irq(GIC_SMP_CPUPAUSE); + up_enable_irq(GIC_SMP_CPUPAUSE_ASYNC); up_enable_irq(GIC_SMP_CPUCALL); #endif } diff --git a/arch/x86_64/include/intel64/irq.h b/arch/x86_64/include/intel64/irq.h index 676458020d24d..fbcdf54500039 100644 --- a/arch/x86_64/include/intel64/irq.h +++ b/arch/x86_64/include/intel64/irq.h @@ -346,9 +346,10 @@ #define HPET0_IRQ IRQ2 #define HPET1_IRQ IRQ8 -/* Use IRQ15 for SMP */ +/* Use IRQ15 IRQ16 for SMP */ #define SMP_IPI_IRQ IRQ15 +#define SMP_IPI_ASYNC_IRQ IRQ16 /* Common register save structure created by up_saveusercontext() and by * ISR/IRQ interrupt processing. diff --git a/arch/x86_64/src/intel64/intel64_cpupause.c b/arch/x86_64/src/intel64/intel64_cpupause.c index 44e3220c2ac2c..7f6891d24cabb 100644 --- a/arch/x86_64/src/intel64/intel64_cpupause.c +++ b/arch/x86_64/src/intel64/intel64_cpupause.c @@ -264,6 +264,35 @@ int up_pause_handler(int irq, void *c, void *arg) return OK; } +/**************************************************************************** + * Name: up_pause_async_handler + * + * Description: + * This is the handler for async pause. + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It porcess g_delivertasks + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * Standard interrupt handling + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + ****************************************************************************/ + +int up_pause_async_handler(int irq, void *c, void *arg) +{ + int cpu = this_cpu(); + + nxsched_process_delivered(cpu); + + return OK; +} + /**************************************************************************** * Name: up_cpu_pause_async * @@ -290,7 +319,7 @@ inline_function int up_cpu_pause_async(int cpu) CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); - up_trigger_irq(SMP_IPI_IRQ, cpuset); + up_trigger_irq(SMP_IPI_ASYNC_IRQ, cpuset); return OK; } @@ -336,6 +365,7 @@ void up_send_smp_call(cpu_set_t cpuset) int up_cpu_pause(int cpu) { + cpu_set_t cpuset; sinfo("cpu=%d\n", cpu); #ifdef CONFIG_SCHED_INSTRUMENTATION @@ -362,7 +392,10 @@ int up_cpu_pause(int cpu) /* Execute Pause IRQ to CPU(cpu) */ - up_cpu_pause_async(cpu); + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + + up_trigger_irq(SMP_IPI_IRQ, cpuset); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/x86_64/src/intel64/intel64_cpustart.c b/arch/x86_64/src/intel64/intel64_cpustart.c index c766ebfe1c000..2fe404349a032 100644 --- a/arch/x86_64/src/intel64/intel64_cpustart.c +++ b/arch/x86_64/src/intel64/intel64_cpustart.c @@ -47,6 +47,7 @@ extern void __ap_entry(void); extern int up_pause_handler(int irq, void *c, void *arg); +extern int up_pause_async_handler(int irq, void *c, void *arg); /**************************************************************************** * Private Functions @@ -160,7 +161,9 @@ void x86_64_ap_boot(void) /* Connect Pause IRQ to CPU */ irq_attach(SMP_IPI_IRQ, up_pause_handler, NULL); + irq_attach(SMP_IPI_ASYNC_IRQ, up_pause_async_handler, NULL); up_enable_irq(SMP_IPI_IRQ); + up_enable_irq(SMP_IPI_ASYNC_IRQ); /* CPU ready */