@@ -707,14 +707,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
707707 /*
708708 * Since irq_time is only updated on {soft,}irq_exit, we might run into
709709 * this case when a previous update_rq_clock() happened inside a
710- * {soft,}irq region.
710+ * {soft,}IRQ region.
711711 *
712712 * When this happens, we stop ->clock_task and only update the
713713 * prev_irq_time stamp to account for the part that fit, so that a next
714714 * update will consume the rest. This ensures ->clock_task is
715715 * monotonic.
716716 *
717- * It does however cause some slight miss-attribution of {soft,}irq
717+ * It does however cause some slight miss-attribution of {soft,}IRQ
718718 * time, a more accurate solution would be to update the irq_time using
719719 * the current rq->clock timestamp, except that would require using
720720 * atomic ops.
@@ -827,7 +827,7 @@ static void __hrtick_start(void *arg)
827827/*
828828 * Called to set the hrtick timer state.
829829 *
830- * called with rq->lock held and irqs disabled
830+ * called with rq->lock held and IRQs disabled
831831 */
832832void hrtick_start (struct rq * rq , u64 delay )
833833{
@@ -851,7 +851,7 @@ void hrtick_start(struct rq *rq, u64 delay)
851851/*
852852 * Called to set the hrtick timer state.
853853 *
854- * called with rq->lock held and irqs disabled
854+ * called with rq->lock held and IRQs disabled
855855 */
856856void hrtick_start (struct rq * rq , u64 delay )
857857{
@@ -885,7 +885,7 @@ static inline void hrtick_rq_init(struct rq *rq)
885885#endif /* CONFIG_SCHED_HRTICK */
886886
887887/*
888- * cmpxchg based fetch_or, macro so it works for different integer types
888+ * try_cmpxchg based fetch_or() macro so it works for different integer types:
889889 */
890890#define fetch_or (ptr , mask ) \
891891 ({ \
@@ -1082,7 +1082,7 @@ void resched_cpu(int cpu)
10821082 *
10831083 * We don't do similar optimization for completely idle system, as
10841084 * selecting an idle CPU will add more delays to the timers than intended
1085- * (as that CPU's timer base may not be uptodate wrt jiffies etc).
1085+ * (as that CPU's timer base may not be up to date wrt jiffies etc).
10861086 */
10871087int get_nohz_timer_target (void )
10881088{
@@ -1142,7 +1142,7 @@ static void wake_up_idle_cpu(int cpu)
11421142 * nohz functions that would need to follow TIF_NR_POLLING
11431143 * clearing:
11441144 *
1145- * - On most archs , a simple fetch_or on ti::flags with a
1145+ * - On most architectures , a simple fetch_or on ti::flags with a
11461146 * "0" value would be enough to know if an IPI needs to be sent.
11471147 *
11481148 * - x86 needs to perform a last need_resched() check between
@@ -1651,7 +1651,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
16511651 rq_clamp = uclamp_rq_get (rq , clamp_id );
16521652 /*
16531653 * Defensive programming: this should never happen. If it happens,
1654- * e.g. due to future modification, warn and fixup the expected value.
1654+ * e.g. due to future modification, warn and fix up the expected value.
16551655 */
16561656 SCHED_WARN_ON (bucket -> value > rq_clamp );
16571657 if (bucket -> value >= rq_clamp ) {
@@ -2227,7 +2227,7 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
22272227 return ;
22282228
22292229 /*
2230- * Violates locking rules! see comment in __do_set_cpus_allowed().
2230+ * Violates locking rules! See comment in __do_set_cpus_allowed().
22312231 */
22322232 __do_set_cpus_allowed (p , & ac );
22332233}
@@ -2394,7 +2394,7 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
23942394}
23952395
23962396/*
2397- * migration_cpu_stop - this will be executed by a highprio stopper thread
2397+ * migration_cpu_stop - this will be executed by a high-prio stopper thread
23982398 * and performs thread migration by bumping thread off CPU then
23992399 * 'pushing' onto another runqueue.
24002400 */
@@ -3694,8 +3694,8 @@ void sched_ttwu_pending(void *arg)
36943694 * it is possible for select_idle_siblings() to stack a number
36953695 * of tasks on this CPU during that window.
36963696 *
3697- * It is ok to clear ttwu_pending when another task pending.
3698- * We will receive IPI after local irq enabled and then enqueue it.
3697+ * It is OK to clear ttwu_pending when another task pending.
3698+ * We will receive IPI after local IRQ enabled and then enqueue it.
36993699 * Since now nr_running > 0, idle_cpu() will always get correct result.
37003700 */
37013701 WRITE_ONCE (rq -> ttwu_pending , 0 );
@@ -5017,7 +5017,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
50175017 *
50185018 * The context switch have flipped the stack from under us and restored the
50195019 * local variables which were saved when this task called schedule() in the
5020- * past. prev == current is still correct but we need to recalculate this_rq
5020+ * past. ' prev == current' is still correct but we need to recalculate this_rq
50215021 * because prev may have moved to another CPU.
50225022 */
50235023static struct rq * finish_task_switch (struct task_struct * prev )
@@ -5363,7 +5363,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
53635363 /*
53645364 * 64-bit doesn't need locks to atomically read a 64-bit value.
53655365 * So we have a optimization chance when the task's delta_exec is 0.
5366- * Reading ->on_cpu is racy, but this is ok .
5366+ * Reading ->on_cpu is racy, but this is OK .
53675367 *
53685368 * If we race with it leaving CPU, we'll take a lock. So we're correct.
53695369 * If we race with it entering CPU, unaccounted time is 0. This is
@@ -6637,7 +6637,7 @@ void __sched schedule_idle(void)
66376637{
66386638 /*
66396639 * As this skips calling sched_submit_work(), which the idle task does
6640- * regardless because that function is a nop when the task is in a
6640+ * regardless because that function is a NOP when the task is in a
66416641 * TASK_RUNNING state, make sure this isn't used someplace that the
66426642 * current task can be in any other state. Note, idle is always in the
66436643 * TASK_RUNNING state.
@@ -6832,9 +6832,9 @@ EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
68326832
68336833/*
68346834 * This is the entry point to schedule() from kernel preemption
6835- * off of irq context.
6836- * Note, that this is called and return with irqs disabled. This will
6837- * protect us against recursive calling from irq .
6835+ * off of IRQ context.
6836+ * Note, that this is called and return with IRQs disabled. This will
6837+ * protect us against recursive calling from IRQ contexts .
68386838 */
68396839asmlinkage __visible void __sched preempt_schedule_irq (void )
68406840{
@@ -6953,7 +6953,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
69536953 goto out_unlock ;
69546954
69556955 /*
6956- * Idle task boosting is a nono in general. There is one
6956+ * Idle task boosting is a no-no in general. There is one
69576957 * exception, when PREEMPT_RT and NOHZ is active:
69586958 *
69596959 * The idle task calls get_next_timer_interrupt() and holds
@@ -7356,11 +7356,11 @@ PREEMPT_MODEL_ACCESSOR(none);
73567356PREEMPT_MODEL_ACCESSOR (voluntary );
73577357PREEMPT_MODEL_ACCESSOR (full );
73587358
7359- #else /* !CONFIG_PREEMPT_DYNAMIC */
7359+ #else /* !CONFIG_PREEMPT_DYNAMIC: */
73607360
73617361static inline void preempt_dynamic_init (void ) { }
73627362
7363- #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
7363+ #endif /* CONFIG_PREEMPT_DYNAMIC */
73647364
73657365int io_schedule_prepare (void )
73667366{
@@ -7970,7 +7970,7 @@ int sched_cpu_deactivate(unsigned int cpu)
79707970 * Specifically, we rely on ttwu to no longer target this CPU, see
79717971 * ttwu_queue_cond() and is_cpu_allowed().
79727972 *
7973- * Do sync before park smpboot threads to take care the rcu boost case.
7973+ * Do sync before park smpboot threads to take care the RCU boost case.
79747974 */
79757975 synchronize_rcu ();
79767976
@@ -8045,7 +8045,7 @@ int sched_cpu_wait_empty(unsigned int cpu)
80458045 * Since this CPU is going 'away' for a while, fold any nr_active delta we
80468046 * might have. Called from the CPU stopper task after ensuring that the
80478047 * stopper is the last running task on the CPU, so nr_active count is
8048- * stable. We need to take the teardown thread which is calling this into
8048+ * stable. We need to take the tear-down thread which is calling this into
80498049 * account, so we hand in adjust = 1 to the load calculation.
80508050 *
80518051 * Also see the comment "Global load-average calculations".
@@ -8239,7 +8239,7 @@ void __init sched_init(void)
82398239 /*
82408240 * How much CPU bandwidth does root_task_group get?
82418241 *
8242- * In case of task-groups formed thr' the cgroup filesystem, it
8242+ * In case of task-groups formed through the cgroup filesystem, it
82438243 * gets 100% of the CPU resources in the system. This overall
82448244 * system CPU resource is divided among the tasks of
82458245 * root_task_group and its child task-groups in a fair manner,
@@ -8541,7 +8541,7 @@ void normalize_rt_tasks(void)
85418541
85428542#if defined(CONFIG_KGDB_KDB )
85438543/*
8544- * These functions are only useful for kdb .
8544+ * These functions are only useful for KDB .
85458545 *
85468546 * They can only be called when the whole system has been
85478547 * stopped - every CPU needs to be quiescent, and no scheduling
@@ -8649,7 +8649,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
86498649 online_fair_sched_group (tg );
86508650}
86518651
8652- /* rcu callback to free various structures associated with a task group */
8652+ /* RCU callback to free various structures associated with a task group */
86538653static void sched_unregister_group_rcu (struct rcu_head * rhp )
86548654{
86558655 /* Now it should be safe to free those cfs_rqs: */
@@ -9767,10 +9767,10 @@ const int sched_prio_to_weight[40] = {
97679767};
97689768
97699769/*
9770- * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated .
9770+ * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated .
97719771 *
97729772 * In cases where the weight does not change often, we can use the
9773- * precalculated inverse to speed up arithmetics by turning divisions
9773+ * pre-calculated inverse to speed up arithmetics by turning divisions
97749774 * into multiplications:
97759775 */
97769776const u32 sched_prio_to_wmult [40 ] = {
@@ -10026,16 +10026,16 @@ void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
1002610026 /*
1002710027 * Move the src cid if the dst cid is unset. This keeps id
1002810028 * allocation closest to 0 in cases where few threads migrate around
10029- * many cpus .
10029+ * many CPUs .
1003010030 *
1003110031 * If destination cid is already set, we may have to just clear
1003210032 * the src cid to ensure compactness in frequent migrations
1003310033 * scenarios.
1003410034 *
1003510035 * It is not useful to clear the src cid when the number of threads is
10036- * greater or equal to the number of allowed cpus , because user-space
10036+ * greater or equal to the number of allowed CPUs , because user-space
1003710037 * can expect that the number of allowed cids can reach the number of
10038- * allowed cpus .
10038+ * allowed CPUs .
1003910039 */
1004010040 dst_pcpu_cid = per_cpu_ptr (mm -> pcpu_cid , cpu_of (dst_rq ));
1004110041 dst_cid = READ_ONCE (dst_pcpu_cid -> cid );
0 commit comments