Skip to content

Commit b1e6c41

Browse files
committed
Merge branch 'rework/preempt-legacy-kthread' into for-linus
2 parents 2d786a5 + 1bc9a28 commit b1e6c41

1 file changed

Lines changed: 118 additions & 66 deletions

File tree

kernel/printk/printk.c

Lines changed: 118 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -3134,104 +3134,147 @@ static inline void printk_kthreads_check_locked(void) { }
31343134

31353135
#endif /* CONFIG_PRINTK */
31363136

3137+
31373138
/*
3138-
* Print out all remaining records to all consoles.
3139+
* Print out one record for each console.
31393140
*
31403141
* @do_cond_resched is set by the caller. It can be true only in schedulable
31413142
* context.
31423143
*
31433144
* @next_seq is set to the sequence number after the last available record.
3144-
* The value is valid only when this function returns true. It means that all
3145-
* usable consoles are completely flushed.
3145+
* The value is valid only when all usable consoles were flushed. It is
3146+
* when the function returns true (can do the job) and @try_again parameter
3147+
* is set to false, see below.
31463148
*
31473149
* @handover will be set to true if a printk waiter has taken over the
31483150
* console_lock, in which case the caller is no longer holding the
31493151
* console_lock. Otherwise it is set to false.
31503152
*
3151-
* Returns true when there was at least one usable console and all messages
3152-
* were flushed to all usable consoles. A returned false informs the caller
3153-
* that everything was not flushed (either there were no usable consoles or
3154-
* another context has taken over printing or it is a panic situation and this
3155-
* is not the panic CPU). Regardless the reason, the caller should assume it
3156-
* is not useful to immediately try again.
3153+
* @try_again will be set to true when it still makes sense to call this
3154+
* function again. The function could do the job, see the return value.
3155+
* And some consoles still make progress.
3156+
*
3157+
* Returns true when the function could do the job. Some consoles are usable,
3158+
* and there was no takeover and no panic_on_other_cpu().
31573159
*
31583160
* Requires the console_lock.
31593161
*/
3160-
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
3162+
static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
3163+
bool *try_again)
31613164
{
31623165
struct console_flush_type ft;
31633166
bool any_usable = false;
31643167
struct console *con;
3165-
bool any_progress;
31663168
int cookie;
31673169

3168-
*next_seq = 0;
3169-
*handover = false;
3170+
*try_again = false;
31703171

3171-
do {
3172-
any_progress = false;
3172+
printk_get_console_flush_type(&ft);
31733173

3174-
printk_get_console_flush_type(&ft);
3174+
cookie = console_srcu_read_lock();
3175+
for_each_console_srcu(con) {
3176+
short flags = console_srcu_read_flags(con);
3177+
u64 printk_seq;
3178+
bool progress;
31753179

3176-
cookie = console_srcu_read_lock();
3177-
for_each_console_srcu(con) {
3178-
short flags = console_srcu_read_flags(con);
3179-
u64 printk_seq;
3180-
bool progress;
3180+
/*
3181+
* console_flush_one_record() is only responsible for
3182+
* nbcon consoles when the nbcon consoles cannot print via
3183+
* their atomic or threaded flushing.
3184+
*/
3185+
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3186+
continue;
31813187

3182-
/*
3183-
* console_flush_all() is only responsible for nbcon
3184-
* consoles when the nbcon consoles cannot print via
3185-
* their atomic or threaded flushing.
3186-
*/
3187-
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3188-
continue;
3188+
if (!console_is_usable(con, flags, !do_cond_resched))
3189+
continue;
3190+
any_usable = true;
31893191

3190-
if (!console_is_usable(con, flags, !do_cond_resched))
3191-
continue;
3192-
any_usable = true;
3192+
if (flags & CON_NBCON) {
3193+
progress = nbcon_legacy_emit_next_record(con, handover, cookie,
3194+
!do_cond_resched);
3195+
printk_seq = nbcon_seq_read(con);
3196+
} else {
3197+
progress = console_emit_next_record(con, handover, cookie);
3198+
printk_seq = con->seq;
3199+
}
31933200

3194-
if (flags & CON_NBCON) {
3195-
progress = nbcon_legacy_emit_next_record(con, handover, cookie,
3196-
!do_cond_resched);
3197-
printk_seq = nbcon_seq_read(con);
3198-
} else {
3199-
progress = console_emit_next_record(con, handover, cookie);
3200-
printk_seq = con->seq;
3201-
}
3201+
/*
3202+
* If a handover has occurred, the SRCU read lock
3203+
* is already released.
3204+
*/
3205+
if (*handover)
3206+
goto fail;
32023207

3203-
/*
3204-
* If a handover has occurred, the SRCU read lock
3205-
* is already released.
3206-
*/
3207-
if (*handover)
3208-
return false;
3208+
/* Track the next of the highest seq flushed. */
3209+
if (printk_seq > *next_seq)
3210+
*next_seq = printk_seq;
32093211

3210-
/* Track the next of the highest seq flushed. */
3211-
if (printk_seq > *next_seq)
3212-
*next_seq = printk_seq;
3212+
if (!progress)
3213+
continue;
32133214

3214-
if (!progress)
3215-
continue;
3216-
any_progress = true;
3215+
/*
3216+
* An usable console made a progress. There might still be
3217+
* pending messages.
3218+
*/
3219+
*try_again = true;
32173220

3218-
/* Allow panic_cpu to take over the consoles safely. */
3219-
if (panic_on_other_cpu())
3220-
goto abandon;
3221+
/* Allow panic_cpu to take over the consoles safely. */
3222+
if (panic_on_other_cpu())
3223+
goto fail_srcu;
32213224

3222-
if (do_cond_resched)
3223-
cond_resched();
3224-
}
3225-
console_srcu_read_unlock(cookie);
3226-
} while (any_progress);
3225+
if (do_cond_resched)
3226+
cond_resched();
3227+
}
3228+
console_srcu_read_unlock(cookie);
32273229

32283230
return any_usable;
32293231

3230-
abandon:
3232+
fail_srcu:
32313233
console_srcu_read_unlock(cookie);
3234+
fail:
3235+
*try_again = false;
32323236
return false;
32333237
}
32343238

3239+
/*
3240+
* Print out all remaining records to all consoles.
3241+
*
3242+
* @do_cond_resched is set by the caller. It can be true only in schedulable
3243+
* context.
3244+
*
3245+
* @next_seq is set to the sequence number after the last available record.
3246+
* The value is valid only when this function returns true. It means that all
3247+
* usable consoles are completely flushed.
3248+
*
3249+
* @handover will be set to true if a printk waiter has taken over the
3250+
* console_lock, in which case the caller is no longer holding the
3251+
* console_lock. Otherwise it is set to false.
3252+
*
3253+
* Returns true when there was at least one usable console and all messages
3254+
* were flushed to all usable consoles. A returned false informs the caller
3255+
* that everything was not flushed (either there were no usable consoles or
3256+
* another context has taken over printing or it is a panic situation and this
3257+
* is not the panic CPU). Regardless the reason, the caller should assume it
3258+
* is not useful to immediately try again.
3259+
*
3260+
* Requires the console_lock.
3261+
*/
3262+
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
3263+
{
3264+
bool try_again;
3265+
bool ret;
3266+
3267+
*next_seq = 0;
3268+
*handover = false;
3269+
3270+
do {
3271+
ret = console_flush_one_record(do_cond_resched, next_seq,
3272+
handover, &try_again);
3273+
} while (try_again);
3274+
3275+
return ret;
3276+
}
3277+
32353278
static void __console_flush_and_unlock(void)
32363279
{
32373280
bool do_cond_resched;
@@ -3597,17 +3640,26 @@ static bool legacy_kthread_should_wakeup(void)
35973640

35983641
static int legacy_kthread_func(void *unused)
35993642
{
3600-
for (;;) {
3601-
wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
3643+
bool try_again;
3644+
3645+
wait_for_event:
3646+
wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
3647+
3648+
do {
3649+
bool handover = false;
3650+
u64 next_seq = 0;
36023651

36033652
if (kthread_should_stop())
3604-
break;
3653+
return 0;
36053654

36063655
console_lock();
3607-
__console_flush_and_unlock();
3608-
}
3656+
console_flush_one_record(true, &next_seq, &handover, &try_again);
3657+
if (!handover)
3658+
__console_unlock();
36093659

3610-
return 0;
3660+
} while (try_again);
3661+
3662+
goto wait_for_event;
36113663
}
36123664

36133665
static bool legacy_kthread_create(void)

0 commit comments

Comments
 (0)