@@ -626,6 +626,26 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
626626}
627627EXPORT_SYMBOL_GPL (cleanup_srcu_struct );
628628
629+ /*
630+ * Check for consistent NMI safety.
631+ */
632+ static void srcu_check_nmi_safety (struct srcu_struct * ssp , bool nmi_safe )
633+ {
634+ int nmi_safe_mask = 1 << nmi_safe ;
635+ int old_nmi_safe_mask ;
636+ struct srcu_data * sdp ;
637+
638+ if (!IS_ENABLED (CONFIG_PROVE_RCU ))
639+ return ;
640+ sdp = raw_cpu_ptr (ssp -> sda );
641+ old_nmi_safe_mask = READ_ONCE (sdp -> srcu_nmi_safety );
642+ if (!old_nmi_safe_mask ) {
643+ WRITE_ONCE (sdp -> srcu_nmi_safety , nmi_safe_mask );
644+ return ;
645+ }
646+ WARN_ONCE (old_nmi_safe_mask != nmi_safe_mask , "CPU %d old state %d new state %d\n" , sdp -> cpu , old_nmi_safe_mask , nmi_safe_mask );
647+ }
648+
629649/*
630650 * Counts the new reader in the appropriate per-CPU element of the
631651 * srcu_struct.
@@ -638,6 +658,7 @@ int __srcu_read_lock(struct srcu_struct *ssp)
638658 idx = READ_ONCE (ssp -> srcu_idx ) & 0x1 ;
639659 this_cpu_inc (ssp -> sda -> srcu_lock_count [idx ].counter );
640660 smp_mb (); /* B */ /* Avoid leaking the critical section. */
661+ srcu_check_nmi_safety (ssp , false);
641662 return idx ;
642663}
643664EXPORT_SYMBOL_GPL (__srcu_read_lock );
@@ -651,6 +672,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
651672{
652673 smp_mb (); /* C */ /* Avoid leaking the critical section. */
653674 this_cpu_inc (ssp -> sda -> srcu_unlock_count [idx ].counter );
675+ srcu_check_nmi_safety (ssp , false);
654676}
655677EXPORT_SYMBOL_GPL (__srcu_read_unlock );
656678
@@ -661,14 +683,16 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
661683 * srcu_struct, but in an NMI-safe manner using RMW atomics.
662684 * Returns an index that must be passed to the matching srcu_read_unlock().
663685 */
664- int __srcu_read_lock_nmisafe (struct srcu_struct * ssp )
686+ int __srcu_read_lock_nmisafe (struct srcu_struct * ssp , bool chknmisafe )
665687{
666688 int idx ;
667689 struct srcu_data * sdp = raw_cpu_ptr (ssp -> sda );
668690
669691 idx = READ_ONCE (ssp -> srcu_idx ) & 0x1 ;
670692 atomic_long_inc (& sdp -> srcu_lock_count [idx ]);
671693 smp_mb__after_atomic (); /* B */ /* Avoid leaking the critical section. */
694+ if (chknmisafe )
695+ srcu_check_nmi_safety (ssp , true);
672696 return idx ;
673697}
674698EXPORT_SYMBOL_GPL (__srcu_read_lock_nmisafe );
@@ -678,12 +702,14 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
678702 * element of the srcu_struct. Note that this may well be a different
679703 * CPU than that which was incremented by the corresponding srcu_read_lock().
680704 */
681- void __srcu_read_unlock_nmisafe (struct srcu_struct * ssp , int idx )
705+ void __srcu_read_unlock_nmisafe (struct srcu_struct * ssp , int idx , bool chknmisafe )
682706{
683707 struct srcu_data * sdp = raw_cpu_ptr (ssp -> sda );
684708
685709 smp_mb__before_atomic (); /* C */ /* Avoid leaking the critical section. */
686710 atomic_long_inc (& sdp -> srcu_unlock_count [idx ]);
711+ if (chknmisafe )
712+ srcu_check_nmi_safety (ssp , true);
687713}
688714EXPORT_SYMBOL_GPL (__srcu_read_unlock_nmisafe );
689715
@@ -1125,7 +1151,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11251151 int ss_state ;
11261152
11271153 check_init_srcu_struct (ssp );
1128- idx = __srcu_read_lock_nmisafe (ssp );
1154+ idx = __srcu_read_lock_nmisafe (ssp , false );
11291155 ss_state = smp_load_acquire (& ssp -> srcu_size_state );
11301156 if (ss_state < SRCU_SIZE_WAIT_CALL )
11311157 sdp = per_cpu_ptr (ssp -> sda , 0 );
@@ -1158,7 +1184,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11581184 srcu_funnel_gp_start (ssp , sdp , s , do_norm );
11591185 else if (needexp )
11601186 srcu_funnel_exp_start (ssp , sdp_mynode , s );
1161- __srcu_read_unlock_nmisafe (ssp , idx );
1187+ __srcu_read_unlock_nmisafe (ssp , idx , false );
11621188 return s ;
11631189}
11641190
@@ -1462,13 +1488,13 @@ void srcu_barrier(struct srcu_struct *ssp)
14621488 /* Initial count prevents reaching zero until all CBs are posted. */
14631489 atomic_set (& ssp -> srcu_barrier_cpu_cnt , 1 );
14641490
1465- idx = __srcu_read_lock_nmisafe (ssp );
1491+ idx = __srcu_read_lock_nmisafe (ssp , false );
14661492 if (smp_load_acquire (& ssp -> srcu_size_state ) < SRCU_SIZE_WAIT_BARRIER )
14671493 srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , 0 ));
14681494 else
14691495 for_each_possible_cpu (cpu )
14701496 srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , cpu ));
1471- __srcu_read_unlock_nmisafe (ssp , idx );
1497+ __srcu_read_unlock_nmisafe (ssp , idx , false );
14721498
14731499 /* Remove the initial count, at which point reaching zero can happen. */
14741500 if (atomic_dec_and_test (& ssp -> srcu_barrier_cpu_cnt ))
0 commit comments