Skip to content

Commit 4d13be8

Browse files
Alexander AntonovPeter Zijlstra
authored andcommitted
perf/x86/intel/uncore: Generalize IIO topology support
Current implementation of uncore mapping doesn't support different types of uncore PMUs which have its own topology context. This patch generalizes Intel uncore topology implementation to be able easily introduce support for new uncore blocks. Signed-off-by: Alexander Antonov <alexander.antonov@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Link: https://lore.kernel.org/r/20221117122833.3103580-2-alexander.antonov@linux.intel.com
1 parent bf480f9 commit 4d13be8

2 files changed

Lines changed: 122 additions & 44 deletions

File tree

arch/x86/events/intel/uncore.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ struct intel_uncore_type {
8888
* to identify which platform component each PMON block of that type is
8989
* supposed to monitor.
9090
*/
91-
struct intel_uncore_topology *topology;
91+
struct intel_uncore_topology **topology;
9292
/*
9393
* Optional callbacks for managing mapping of Uncore units to PMONs
9494
*/
@@ -178,11 +178,19 @@ struct freerunning_counters {
178178
unsigned *box_offsets;
179179
};
180180

181-
struct intel_uncore_topology {
182-
u64 configuration;
181+
struct uncore_iio_topology {
182+
int pci_bus_no;
183183
int segment;
184184
};
185185

186+
struct intel_uncore_topology {
187+
int pmu_idx;
188+
union {
189+
void *untyped;
190+
struct uncore_iio_topology *iio;
191+
};
192+
};
193+
186194
struct pci2phy_map {
187195
struct list_head list;
188196
int segment;

arch/x86/events/intel/uncore_snbep.c

Lines changed: 111 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -3699,19 +3699,26 @@ static struct intel_uncore_ops skx_uncore_iio_ops = {
36993699
.read_counter = uncore_msr_read_counter,
37003700
};
37013701

3702-
static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3702+
static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
37033703
{
3704-
return pmu->type->topology[die].configuration >>
3705-
(pmu->pmu_idx * BUS_NUM_STRIDE);
3704+
int idx;
3705+
3706+
for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3707+
if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3708+
return &pmu->type->topology[die][idx];
3709+
}
3710+
3711+
return NULL;
37063712
}
37073713

37083714
static umode_t
37093715
pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
37103716
int die, int zero_bus_pmu)
37113717
{
37123718
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3719+
struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
37133720

3714-
return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3721+
return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
37153722
}
37163723

37173724
static umode_t
@@ -3727,9 +3734,10 @@ static ssize_t skx_iio_mapping_show(struct device *dev,
37273734
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
37283735
struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
37293736
long die = (long)ea->var;
3737+
struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
37303738

3731-
return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3732-
skx_iio_stack(pmu, die));
3739+
return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3740+
pmut ? pmut->iio->pci_bus_no : 0);
37333741
}
37343742

37353743
static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
@@ -3764,31 +3772,90 @@ static int die_to_cpu(int die)
37643772
return res;
37653773
}
37663774

3775+
enum {
3776+
IIO_TOPOLOGY_TYPE,
3777+
TOPOLOGY_MAX
3778+
};
3779+
3780+
static const size_t topology_size[TOPOLOGY_MAX] = {
3781+
sizeof(*((struct intel_uncore_topology *)NULL)->iio)
3782+
};
3783+
3784+
static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3785+
{
3786+
int die, idx;
3787+
struct intel_uncore_topology **topology;
3788+
3789+
if (!type->num_boxes)
3790+
return -EPERM;
3791+
3792+
topology = kcalloc(uncore_max_dies(), sizeof(*topology), GFP_KERNEL);
3793+
if (!topology)
3794+
goto err;
3795+
3796+
for (die = 0; die < uncore_max_dies(); die++) {
3797+
topology[die] = kcalloc(type->num_boxes, sizeof(**topology), GFP_KERNEL);
3798+
if (!topology[die])
3799+
goto clear;
3800+
for (idx = 0; idx < type->num_boxes; idx++) {
3801+
topology[die][idx].untyped = kcalloc(type->num_boxes,
3802+
topology_size[topology_type],
3803+
GFP_KERNEL);
3804+
if (!topology[die][idx].untyped)
3805+
goto clear;
3806+
}
3807+
}
3808+
3809+
type->topology = topology;
3810+
3811+
return 0;
3812+
clear:
3813+
for (; die >= 0; die--) {
3814+
for (idx = 0; idx < type->num_boxes; idx++)
3815+
kfree(topology[die][idx].untyped);
3816+
kfree(topology[die]);
3817+
}
3818+
kfree(topology);
3819+
err:
3820+
return -ENOMEM;
3821+
}
3822+
3823+
static void pmu_free_topology(struct intel_uncore_type *type)
3824+
{
3825+
int die, idx;
3826+
3827+
if (type->topology) {
3828+
for (die = 0; die < uncore_max_dies(); die++) {
3829+
for (idx = 0; idx < type->num_boxes; idx++)
3830+
kfree(type->topology[die][idx].untyped);
3831+
kfree(type->topology[die]);
3832+
}
3833+
kfree(type->topology);
3834+
type->topology = NULL;
3835+
}
3836+
}
3837+
37673838
static int skx_iio_get_topology(struct intel_uncore_type *type)
37683839
{
37693840
int die, ret = -EPERM;
3770-
3771-
type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3772-
GFP_KERNEL);
3773-
if (!type->topology)
3774-
return -ENOMEM;
3841+
u64 configuration;
3842+
int idx;
37753843

37763844
for (die = 0; die < uncore_max_dies(); die++) {
3777-
ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3778-
&type->topology[die].configuration);
3845+
ret = skx_msr_cpu_bus_read(die_to_cpu(die), &configuration);
37793846
if (ret)
37803847
break;
37813848

37823849
ret = uncore_die_to_segment(die);
37833850
if (ret < 0)
37843851
break;
37853852

3786-
type->topology[die].segment = ret;
3787-
}
3788-
3789-
if (ret < 0) {
3790-
kfree(type->topology);
3791-
type->topology = NULL;
3853+
for (idx = 0; idx < type->num_boxes; idx++) {
3854+
type->topology[die][idx].pmu_idx = idx;
3855+
type->topology[die][idx].iio->segment = ret;
3856+
type->topology[die][idx].iio->pci_bus_no =
3857+
(configuration >> (idx * BUS_NUM_STRIDE)) & 0xff;
3858+
}
37923859
}
37933860

37943861
return ret;
@@ -3804,18 +3871,24 @@ static const struct attribute_group *skx_iio_attr_update[] = {
38043871
};
38053872

38063873
static int
3807-
pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3874+
pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3875+
ssize_t (*show)(struct device*, struct device_attribute*, char*),
3876+
int topology_type)
38083877
{
38093878
char buf[64];
38103879
int ret;
38113880
long die = -1;
38123881
struct attribute **attrs = NULL;
38133882
struct dev_ext_attribute *eas = NULL;
38143883

3815-
ret = type->get_topology(type);
3884+
ret = pmu_alloc_topology(type, topology_type);
38163885
if (ret < 0)
38173886
goto clear_attr_update;
38183887

3888+
ret = type->get_topology(type);
3889+
if (ret < 0)
3890+
goto clear_topology;
3891+
38193892
ret = -ENOMEM;
38203893

38213894
/* One more for NULL. */
@@ -3828,13 +3901,13 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
38283901
goto clear_attrs;
38293902

38303903
for (die = 0; die < uncore_max_dies(); die++) {
3831-
sprintf(buf, "die%ld", die);
3904+
snprintf(buf, sizeof(buf), "die%ld", die);
38323905
sysfs_attr_init(&eas[die].attr.attr);
38333906
eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
38343907
if (!eas[die].attr.attr.name)
38353908
goto err;
38363909
eas[die].attr.attr.mode = 0444;
3837-
eas[die].attr.show = skx_iio_mapping_show;
3910+
eas[die].attr.show = show;
38383911
eas[die].attr.store = NULL;
38393912
eas[die].var = (void *)die;
38403913
attrs[die] = &eas[die].attr.attr;
@@ -3849,14 +3922,14 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
38493922
clear_attrs:
38503923
kfree(attrs);
38513924
clear_topology:
3852-
kfree(type->topology);
3925+
pmu_free_topology(type);
38533926
clear_attr_update:
38543927
type->attr_update = NULL;
38553928
return ret;
38563929
}
38573930

38583931
static void
3859-
pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3932+
pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
38603933
{
38613934
struct attribute **attr = ag->attrs;
38623935

@@ -3868,7 +3941,13 @@ pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *
38683941
kfree(attr_to_ext_attr(*ag->attrs));
38693942
kfree(ag->attrs);
38703943
ag->attrs = NULL;
3871-
kfree(type->topology);
3944+
pmu_free_topology(type);
3945+
}
3946+
3947+
static int
3948+
pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3949+
{
3950+
return pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
38723951
}
38733952

38743953
static int skx_iio_set_mapping(struct intel_uncore_type *type)
@@ -3878,7 +3957,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
38783957

38793958
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
38803959
{
3881-
pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
3960+
pmu_cleanup_mapping(type, &skx_iio_mapping_group);
38823961
}
38833962

38843963
static struct intel_uncore_type skx_uncore_iio = {
@@ -4461,11 +4540,6 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map
44614540
int die, stack_id, ret = -EPERM;
44624541
struct pci_dev *dev = NULL;
44634542

4464-
type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
4465-
GFP_KERNEL);
4466-
if (!type->topology)
4467-
return -ENOMEM;
4468-
44694543
while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
44704544
ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
44714545
if (ret) {
@@ -4483,13 +4557,9 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map
44834557
/* Convert stack id from SAD_CONTROL to PMON notation. */
44844558
stack_id = sad_pmon_mapping[stack_id];
44854559

4486-
((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
4487-
type->topology[die].segment = pci_domain_nr(dev->bus);
4488-
}
4489-
4490-
if (ret) {
4491-
kfree(type->topology);
4492-
type->topology = NULL;
4560+
type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4561+
type->topology[die][stack_id].pmu_idx = stack_id;
4562+
type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
44934563
}
44944564

44954565
return ret;
@@ -4526,7 +4596,7 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type)
45264596

45274597
static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
45284598
{
4529-
pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
4599+
pmu_cleanup_mapping(type, &snr_iio_mapping_group);
45304600
}
45314601

45324602
static struct event_constraint snr_uncore_iio_constraints[] = {
@@ -5144,7 +5214,7 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type)
51445214

51455215
static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
51465216
{
5147-
pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
5217+
pmu_cleanup_mapping(type, &icx_iio_mapping_group);
51485218
}
51495219

51505220
static struct intel_uncore_type icx_uncore_iio = {

0 commit comments

Comments
 (0)