@@ -228,9 +228,10 @@ static u8 shmem_get_epp(struct amd_cpudata *cpudata)
228228 return FIELD_GET (AMD_CPPC_EPP_PERF_MASK , epp );
229229}
230230
231- static int msr_update_perf (struct amd_cpudata * cpudata , u8 min_perf ,
231+ static int msr_update_perf (struct cpufreq_policy * policy , u8 min_perf ,
232232 u8 des_perf , u8 max_perf , u8 epp , bool fast_switch )
233233{
234+ struct amd_cpudata * cpudata = policy -> driver_data ;
234235 u64 value , prev ;
235236
236237 value = prev = READ_ONCE (cpudata -> cppc_req_cached );
@@ -242,6 +243,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
242243 value |= FIELD_PREP (AMD_CPPC_MIN_PERF_MASK , min_perf );
243244 value |= FIELD_PREP (AMD_CPPC_EPP_PERF_MASK , epp );
244245
246+ if (trace_amd_pstate_epp_perf_enabled ()) {
247+ union perf_cached perf = READ_ONCE (cpudata -> perf );
248+
249+ trace_amd_pstate_epp_perf (cpudata -> cpu ,
250+ perf .highest_perf ,
251+ epp ,
252+ min_perf ,
253+ max_perf ,
254+ policy -> boost_enabled ,
255+ value != prev );
256+ }
257+
245258 if (value == prev )
246259 return 0 ;
247260
@@ -256,31 +269,46 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
256269 }
257270
258271 WRITE_ONCE (cpudata -> cppc_req_cached , value );
259- WRITE_ONCE (cpudata -> epp_cached , epp );
272+ if (epp != cpudata -> epp_cached )
273+ WRITE_ONCE (cpudata -> epp_cached , epp );
260274
261275 return 0 ;
262276}
263277
264278DEFINE_STATIC_CALL (amd_pstate_update_perf , msr_update_perf );
265279
266- static inline int amd_pstate_update_perf (struct amd_cpudata * cpudata ,
280+ static inline int amd_pstate_update_perf (struct cpufreq_policy * policy ,
267281 u8 min_perf , u8 des_perf ,
268282 u8 max_perf , u8 epp ,
269283 bool fast_switch )
270284{
271- return static_call (amd_pstate_update_perf )(cpudata , min_perf , des_perf ,
285+ return static_call (amd_pstate_update_perf )(policy , min_perf , des_perf ,
272286 max_perf , epp , fast_switch );
273287}
274288
275- static int msr_set_epp (struct amd_cpudata * cpudata , u8 epp )
289+ static int msr_set_epp (struct cpufreq_policy * policy , u8 epp )
276290{
291+ struct amd_cpudata * cpudata = policy -> driver_data ;
277292 u64 value , prev ;
278293 int ret ;
279294
280295 value = prev = READ_ONCE (cpudata -> cppc_req_cached );
281296 value &= ~AMD_CPPC_EPP_PERF_MASK ;
282297 value |= FIELD_PREP (AMD_CPPC_EPP_PERF_MASK , epp );
283298
299+ if (trace_amd_pstate_epp_perf_enabled ()) {
300+ union perf_cached perf = cpudata -> perf ;
301+
302+ trace_amd_pstate_epp_perf (cpudata -> cpu , perf .highest_perf ,
303+ epp ,
304+ FIELD_GET (AMD_CPPC_MIN_PERF_MASK ,
305+ cpudata -> cppc_req_cached ),
306+ FIELD_GET (AMD_CPPC_MAX_PERF_MASK ,
307+ cpudata -> cppc_req_cached ),
308+ policy -> boost_enabled ,
309+ value != prev );
310+ }
311+
284312 if (value == prev )
285313 return 0 ;
286314
@@ -299,15 +327,29 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
299327
300328DEFINE_STATIC_CALL (amd_pstate_set_epp , msr_set_epp );
301329
302- static inline int amd_pstate_set_epp (struct amd_cpudata * cpudata , u8 epp )
330+ static inline int amd_pstate_set_epp (struct cpufreq_policy * policy , u8 epp )
303331{
304- return static_call (amd_pstate_set_epp )(cpudata , epp );
332+ return static_call (amd_pstate_set_epp )(policy , epp );
305333}
306334
307- static int shmem_set_epp (struct amd_cpudata * cpudata , u8 epp )
335+ static int shmem_set_epp (struct cpufreq_policy * policy , u8 epp )
308336{
309- int ret ;
337+ struct amd_cpudata * cpudata = policy -> driver_data ;
310338 struct cppc_perf_ctrls perf_ctrls ;
339+ int ret ;
340+
341+ if (trace_amd_pstate_epp_perf_enabled ()) {
342+ union perf_cached perf = cpudata -> perf ;
343+
344+ trace_amd_pstate_epp_perf (cpudata -> cpu , perf .highest_perf ,
345+ epp ,
346+ FIELD_GET (AMD_CPPC_MIN_PERF_MASK ,
347+ cpudata -> cppc_req_cached ),
348+ FIELD_GET (AMD_CPPC_MAX_PERF_MASK ,
349+ cpudata -> cppc_req_cached ),
350+ policy -> boost_enabled ,
351+ epp != cpudata -> epp_cached );
352+ }
311353
312354 if (epp == cpudata -> epp_cached )
313355 return 0 ;
@@ -339,17 +381,7 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
339381 return - EBUSY ;
340382 }
341383
342- if (trace_amd_pstate_epp_perf_enabled ()) {
343- union perf_cached perf = READ_ONCE (cpudata -> perf );
344-
345- trace_amd_pstate_epp_perf (cpudata -> cpu , perf .highest_perf ,
346- epp ,
347- FIELD_GET (AMD_CPPC_MIN_PERF_MASK , cpudata -> cppc_req_cached ),
348- FIELD_GET (AMD_CPPC_MAX_PERF_MASK , cpudata -> cppc_req_cached ),
349- policy -> boost_enabled );
350- }
351-
352- return amd_pstate_set_epp (cpudata , epp );
384+ return amd_pstate_set_epp (policy , epp );
353385}
354386
355387static inline int msr_cppc_enable (bool enable )
@@ -492,15 +524,16 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
492524 return static_call (amd_pstate_init_perf )(cpudata );
493525}
494526
495- static int shmem_update_perf (struct amd_cpudata * cpudata , u8 min_perf ,
527+ static int shmem_update_perf (struct cpufreq_policy * policy , u8 min_perf ,
496528 u8 des_perf , u8 max_perf , u8 epp , bool fast_switch )
497529{
530+ struct amd_cpudata * cpudata = policy -> driver_data ;
498531 struct cppc_perf_ctrls perf_ctrls ;
499532 u64 value , prev ;
500533 int ret ;
501534
502535 if (cppc_state == AMD_PSTATE_ACTIVE ) {
503- int ret = shmem_set_epp (cpudata , epp );
536+ int ret = shmem_set_epp (policy , epp );
504537
505538 if (ret )
506539 return ret ;
@@ -515,6 +548,18 @@ static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
515548 value |= FIELD_PREP (AMD_CPPC_MIN_PERF_MASK , min_perf );
516549 value |= FIELD_PREP (AMD_CPPC_EPP_PERF_MASK , epp );
517550
551+ if (trace_amd_pstate_epp_perf_enabled ()) {
552+ union perf_cached perf = READ_ONCE (cpudata -> perf );
553+
554+ trace_amd_pstate_epp_perf (cpudata -> cpu ,
555+ perf .highest_perf ,
556+ epp ,
557+ min_perf ,
558+ max_perf ,
559+ policy -> boost_enabled ,
560+ value != prev );
561+ }
562+
518563 if (value == prev )
519564 return 0 ;
520565
@@ -592,7 +637,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
592637 cpudata -> cpu , fast_switch );
593638 }
594639
595- amd_pstate_update_perf (cpudata , min_perf , des_perf , max_perf , 0 , fast_switch );
640+ amd_pstate_update_perf (policy , min_perf , des_perf , max_perf , 0 , fast_switch );
596641}
597642
598643static int amd_pstate_verify (struct cpufreq_policy_data * policy_data )
@@ -1525,7 +1570,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
15251570 return ret ;
15261571 WRITE_ONCE (cpudata -> cppc_req_cached , value );
15271572 }
1528- ret = amd_pstate_set_epp (cpudata , cpudata -> epp_default );
1573+ ret = amd_pstate_set_epp (policy , cpudata -> epp_default );
15291574 if (ret )
15301575 return ret ;
15311576
@@ -1566,14 +1611,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
15661611 epp = READ_ONCE (cpudata -> epp_cached );
15671612
15681613 perf = READ_ONCE (cpudata -> perf );
1569- if (trace_amd_pstate_epp_perf_enabled ()) {
1570- trace_amd_pstate_epp_perf (cpudata -> cpu , perf .highest_perf , epp ,
1571- perf .min_limit_perf ,
1572- perf .max_limit_perf ,
1573- policy -> boost_enabled );
1574- }
15751614
1576- return amd_pstate_update_perf (cpudata , perf .min_limit_perf , 0U ,
1615+ return amd_pstate_update_perf (policy , perf .min_limit_perf , 0U ,
15771616 perf .max_limit_perf , epp , false);
15781617}
15791618
@@ -1605,20 +1644,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
16051644
16061645static int amd_pstate_epp_reenable (struct cpufreq_policy * policy )
16071646{
1608- struct amd_cpudata * cpudata = policy -> driver_data ;
1609- union perf_cached perf = READ_ONCE (cpudata -> perf );
16101647 int ret ;
16111648
16121649 ret = amd_pstate_cppc_enable (true);
16131650 if (ret )
16141651 pr_err ("failed to enable amd pstate during resume, return %d\n" , ret );
16151652
1616- if (trace_amd_pstate_epp_perf_enabled ()) {
1617- trace_amd_pstate_epp_perf (cpudata -> cpu , perf .highest_perf ,
1618- cpudata -> epp_cached ,
1619- FIELD_GET (AMD_CPPC_MIN_PERF_MASK , cpudata -> cppc_req_cached ),
1620- perf .highest_perf , policy -> boost_enabled );
1621- }
16221653
16231654 return amd_pstate_epp_update_limit (policy );
16241655}
@@ -1646,14 +1677,7 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
16461677 if (cpudata -> suspended )
16471678 return 0 ;
16481679
1649- if (trace_amd_pstate_epp_perf_enabled ()) {
1650- trace_amd_pstate_epp_perf (cpudata -> cpu , perf .highest_perf ,
1651- AMD_CPPC_EPP_BALANCE_POWERSAVE ,
1652- perf .lowest_perf , perf .lowest_perf ,
1653- policy -> boost_enabled );
1654- }
1655-
1656- return amd_pstate_update_perf (cpudata , perf .lowest_perf , 0 , perf .lowest_perf ,
1680+ return amd_pstate_update_perf (policy , perf .lowest_perf , 0 , perf .lowest_perf ,
16571681 AMD_CPPC_EPP_BALANCE_POWERSAVE , false);
16581682}
16591683
0 commit comments