Skip to content

Commit 27120e7

Browse files
committed
srcu: Check for consistent per-CPU per-srcu_struct NMI safety
This commit adds runtime checks to verify that a given srcu_struct uses consistent NMI-safe (or not) read-side primitives on a per-CPU basis. Link: https://lore.kernel.org/all/20220910221947.171557773@linutronix.de/ Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Ogness <john.ogness@linutronix.de> Cc: Petr Mladek <pmladek@suse.com>
1 parent 2e83b87 commit 27120e7

3 files changed

Lines changed: 43 additions & 12 deletions

File tree

include/linux/srcu.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,14 +65,14 @@ unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
6565
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
6666

6767
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
68-
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
69-
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
68+
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe) __acquires(ssp);
69+
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe) __releases(ssp);
7070
#else
71-
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
71+
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
7272
{
7373
return __srcu_read_lock(ssp);
7474
}
75-
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
75+
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe)
7676
{
7777
__srcu_read_unlock(ssp, idx);
7878
}
@@ -192,7 +192,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
192192
int retval;
193193

194194
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
195-
retval = __srcu_read_lock_nmisafe(ssp);
195+
retval = __srcu_read_lock_nmisafe(ssp, true);
196196
else
197197
retval = __srcu_read_lock(ssp);
198198
rcu_lock_acquire(&(ssp)->dep_map);
@@ -237,7 +237,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
237237
WARN_ON_ONCE(idx & ~0x1);
238238
rcu_lock_release(&(ssp)->dep_map);
239239
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
240-
__srcu_read_unlock_nmisafe(ssp, idx);
240+
__srcu_read_unlock_nmisafe(ssp, idx, true);
241241
else
242242
__srcu_read_unlock(ssp, idx);
243243
}

include/linux/srcutree.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ struct srcu_data {
2525
/* Read-side state. */
2626
atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
2727
atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
28+
int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */
2829

2930
/* Update-side state. */
3031
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
@@ -42,6 +43,10 @@ struct srcu_data {
4243
struct srcu_struct *ssp;
4344
};
4445

46+
#define SRCU_NMI_UNKNOWN 0x0
47+
#define SRCU_NMI_NMI_UNSAFE 0x1
48+
#define SRCU_NMI_NMI_SAFE 0x2
49+
4550
/*
4651
* Node in SRCU combining tree, similar in function to rcu_data.
4752
*/

kernel/rcu/srcutree.c

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -626,6 +626,26 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
626626
}
627627
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
628628

629+
/*
630+
* Check for consistent NMI safety.
631+
*/
632+
static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
633+
{
634+
int nmi_safe_mask = 1 << nmi_safe;
635+
int old_nmi_safe_mask;
636+
struct srcu_data *sdp;
637+
638+
if (!IS_ENABLED(CONFIG_PROVE_RCU))
639+
return;
640+
sdp = raw_cpu_ptr(ssp->sda);
641+
old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
642+
if (!old_nmi_safe_mask) {
643+
WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
644+
return;
645+
}
646+
WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
647+
}
648+
629649
/*
630650
* Counts the new reader in the appropriate per-CPU element of the
631651
* srcu_struct.
@@ -638,6 +658,7 @@ int __srcu_read_lock(struct srcu_struct *ssp)
638658
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
639659
this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
640660
smp_mb(); /* B */ /* Avoid leaking the critical section. */
661+
srcu_check_nmi_safety(ssp, false);
641662
return idx;
642663
}
643664
EXPORT_SYMBOL_GPL(__srcu_read_lock);
@@ -651,6 +672,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
651672
{
652673
smp_mb(); /* C */ /* Avoid leaking the critical section. */
653674
this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
675+
srcu_check_nmi_safety(ssp, false);
654676
}
655677
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
656678

@@ -661,14 +683,16 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
661683
* srcu_struct, but in an NMI-safe manner using RMW atomics.
662684
* Returns an index that must be passed to the matching srcu_read_unlock().
663685
*/
664-
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
686+
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
665687
{
666688
int idx;
667689
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
668690

669691
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
670692
atomic_long_inc(&sdp->srcu_lock_count[idx]);
671693
smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
694+
if (chknmisafe)
695+
srcu_check_nmi_safety(ssp, true);
672696
return idx;
673697
}
674698
EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
@@ -678,12 +702,14 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
678702
* element of the srcu_struct. Note that this may well be a different
679703
* CPU than that which was incremented by the corresponding srcu_read_lock().
680704
*/
681-
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
705+
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe)
682706
{
683707
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
684708

685709
smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
686710
atomic_long_inc(&sdp->srcu_unlock_count[idx]);
711+
if (chknmisafe)
712+
srcu_check_nmi_safety(ssp, true);
687713
}
688714
EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
689715

@@ -1125,7 +1151,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11251151
int ss_state;
11261152

11271153
check_init_srcu_struct(ssp);
1128-
idx = __srcu_read_lock_nmisafe(ssp);
1154+
idx = __srcu_read_lock_nmisafe(ssp, false);
11291155
ss_state = smp_load_acquire(&ssp->srcu_size_state);
11301156
if (ss_state < SRCU_SIZE_WAIT_CALL)
11311157
sdp = per_cpu_ptr(ssp->sda, 0);
@@ -1158,7 +1184,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11581184
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
11591185
else if (needexp)
11601186
srcu_funnel_exp_start(ssp, sdp_mynode, s);
1161-
__srcu_read_unlock_nmisafe(ssp, idx);
1187+
__srcu_read_unlock_nmisafe(ssp, idx, false);
11621188
return s;
11631189
}
11641190

@@ -1462,13 +1488,13 @@ void srcu_barrier(struct srcu_struct *ssp)
14621488
/* Initial count prevents reaching zero until all CBs are posted. */
14631489
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
14641490

1465-
idx = __srcu_read_lock_nmisafe(ssp);
1491+
idx = __srcu_read_lock_nmisafe(ssp, false);
14661492
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
14671493
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
14681494
else
14691495
for_each_possible_cpu(cpu)
14701496
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1471-
__srcu_read_unlock_nmisafe(ssp, idx);
1497+
__srcu_read_unlock_nmisafe(ssp, idx, false);
14721498

14731499
/* Remove the initial count, at which point reaching zero can happen. */
14741500
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))

0 commit comments

Comments
 (0)