@@ -209,10 +209,14 @@ static int amd_uncore_event_init(struct perf_event *event)
209209{
210210 struct amd_uncore * uncore ;
211211 struct hw_perf_event * hwc = & event -> hw ;
212+ u64 event_mask = AMD64_RAW_EVENT_MASK_NB ;
212213
213214 if (event -> attr .type != event -> pmu -> type )
214215 return - ENOENT ;
215216
217+ if (pmu_version >= 2 && is_nb_event (event ))
218+ event_mask = AMD64_PERFMON_V2_RAW_EVENT_MASK_NB ;
219+
216220 /*
217221 * NB and Last level cache counters (MSRs) are shared across all cores
218222 * that share the same NB / Last level cache. On family 16h and below,
@@ -221,7 +225,7 @@ static int amd_uncore_event_init(struct perf_event *event)
221225 * out. So we do not support sampling and per-thread events via
222226 * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
223227 */
224- hwc -> config = event -> attr .config & AMD64_RAW_EVENT_MASK_NB ;
228+ hwc -> config = event -> attr .config & event_mask ;
225229 hwc -> idx = -1 ;
226230
227231 if (event -> cpu < 0 )
@@ -300,8 +304,10 @@ static struct device_attribute format_attr_##_var = \
300304
301305DEFINE_UNCORE_FORMAT_ATTR (event12 , event , "config:0-7,32-35" );
302306DEFINE_UNCORE_FORMAT_ATTR (event14 , event , "config:0-7,32-35,59-60" ); /* F17h+ DF */
307+ DEFINE_UNCORE_FORMAT_ATTR (event14v2 , event , "config:0-7,32-37" ); /* PerfMonV2 DF */
303308DEFINE_UNCORE_FORMAT_ATTR (event8 , event , "config:0-7" ); /* F17h+ L3 */
304- DEFINE_UNCORE_FORMAT_ATTR (umask , umask , "config:8-15" );
309+ DEFINE_UNCORE_FORMAT_ATTR (umask8 , umask , "config:8-15" );
310+ DEFINE_UNCORE_FORMAT_ATTR (umask12 , umask , "config:8-15,24-27" ); /* PerfMonV2 DF */
305311DEFINE_UNCORE_FORMAT_ATTR (coreid , coreid , "config:42-44" ); /* F19h L3 */
306312DEFINE_UNCORE_FORMAT_ATTR (slicemask , slicemask , "config:48-51" ); /* F17h L3 */
307313DEFINE_UNCORE_FORMAT_ATTR (threadmask8 , threadmask , "config:56-63" ); /* F17h L3 */
@@ -313,14 +319,14 @@ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */
313319/* Common DF and NB attributes */
314320static struct attribute * amd_uncore_df_format_attr [] = {
315321 & format_attr_event12 .attr , /* event */
316- & format_attr_umask .attr , /* umask */
322+ & format_attr_umask8 .attr , /* umask */
317323 NULL ,
318324};
319325
320326/* Common L2 and L3 attributes */
321327static struct attribute * amd_uncore_l3_format_attr [] = {
322328 & format_attr_event12 .attr , /* event */
323- & format_attr_umask .attr , /* umask */
329+ & format_attr_umask8 .attr , /* umask */
324330 NULL , /* threadmask */
325331 NULL ,
326332};
@@ -659,8 +665,12 @@ static int __init amd_uncore_init(void)
659665 }
660666
661667 if (boot_cpu_has (X86_FEATURE_PERFCTR_NB )) {
662- if (boot_cpu_data .x86 >= 0x17 )
668+ if (pmu_version >= 2 ) {
669+ * df_attr ++ = & format_attr_event14v2 .attr ;
670+ * df_attr ++ = & format_attr_umask12 .attr ;
671+ } else if (boot_cpu_data .x86 >= 0x17 ) {
663672 * df_attr = & format_attr_event14 .attr ;
673+ }
664674
665675 amd_uncore_nb = alloc_percpu (struct amd_uncore * );
666676 if (!amd_uncore_nb ) {
@@ -686,11 +696,11 @@ static int __init amd_uncore_init(void)
686696 if (boot_cpu_has (X86_FEATURE_PERFCTR_LLC )) {
687697 if (boot_cpu_data .x86 >= 0x19 ) {
688698 * l3_attr ++ = & format_attr_event8 .attr ;
689- * l3_attr ++ = & format_attr_umask .attr ;
699+ * l3_attr ++ = & format_attr_umask8 .attr ;
690700 * l3_attr ++ = & format_attr_threadmask2 .attr ;
691701 } else if (boot_cpu_data .x86 >= 0x17 ) {
692702 * l3_attr ++ = & format_attr_event8 .attr ;
693- * l3_attr ++ = & format_attr_umask .attr ;
703+ * l3_attr ++ = & format_attr_umask8 .attr ;
694704 * l3_attr ++ = & format_attr_threadmask8 .attr ;
695705 }
696706
0 commit comments