From 007f81c2d7fe4fb62566df8c56bdc0a5d980af37 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Sun, 21 Nov 2021 16:46:16 +0000 Subject: [PATCH] arm/gic: Use ci_cc_nintr rather than RAS to restart fast path. The ad hoc RAS mechanism was a little leaky -- it was possible to accidentally invoke the fast path even during an interrupt exception, because ci_intr_depth is incremented only while we're calling the interrupt handlers and goes back down to zero before we block hardware interrupts with DAIF and then ERET. This has slightly less overhead and doesn't require any cooperation by the interrupt handler -- it is even reentrant, so it doesn't penalize splx inside an interrupt or softint handler. The interrupt counter ci_cc_nintr is approximate -- >1 interrupts, a second one interrupting the first before ERET, might be counted as only 1 -- but that's OK because we just need the counter to change if there is an interrupt. WARNING: Not every interrupt controller's irq dispatch function increments ci_cc_nintr when it touches the hardware priority level -- e.g., ep93xx_intr_dispatch does not. But that's OK for now because this logic is specific to GIC, for which both armgic_irq_handler and gicv3_irq_handler both always increment ci_cc_nintr (at least, barring a race with a nested interrupt, which is still safe as noted above). --- sys/arch/arm/cortex/gic_splfuncs_armv8.S | 27 +++++++++++------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/sys/arch/arm/cortex/gic_splfuncs_armv8.S b/sys/arch/arm/cortex/gic_splfuncs_armv8.S index ac2a987da593..d721f1c33bd9 100644 --- a/sys/arch/arm/cortex/gic_splfuncs_armv8.S +++ b/sys/arch/arm/cortex/gic_splfuncs_armv8.S @@ -90,14 +90,8 @@ ENTRY_NP(gic_splx) cmp w0, w2 b.hs .Ldone - /* Slow path if ci_intr_depth != 0 */ - ldr w2, [x1, #CI_INTR_DEPTH] - cbnz w2, .Lslow - - /* Save newipl and restart address in cpu info */ - str w0, [x1, #CI_SPLX_SAVEDIPL] - adr x2, .Lrestart - str x2, [x1, #CI_SPLX_RESTART] + /* Start the interruptible fast path */ + ldr x3, [x1, #CI_CC_NINTR] /* Slow path if hwpl > newipl */ ldr w2, [x1, #CI_HWPL] @@ -107,8 +101,10 @@ ENTRY_NP(gic_splx) /* Update cpl */ str w0, [x1, #CI_CPL] - /* Clear saved restart address from cpu info */ - str xzr, [x1, #CI_SPLX_RESTART] + /* Verify the interruptible fast path */ + ldr x4, [x1, #CI_CC_NINTR] + cmp x3, x4 + b.ne .Lrestart /* Check for pending softints */ ldr w2, [x1, #CI_SOFTINTS] @@ -120,12 +116,13 @@ ENTRY_NP(gic_splx) ret .Lrestart: - /* Reload registers */ - mrs x3, tpidr_el1 /* get curlwp */ - ldr x1, [x3, #L_CPU] /* get curcpu */ - ldr w0, [x1, #CI_SPLX_SAVEDIPL] /* get newipl */ + /* + * We just branch to .Lagain, using a separate .Lrestart label + * so the conditional branch will be forward and therefore + * statically predicted not-taken. + */ b .Lagain - + .Lrestore: /* Clear saved restart address from cpu info */ str xzr, [x1, #CI_SPLX_RESTART]