Code Review
/
kvmfornfv.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Upgrade to 4.4.50-rt62
[kvmfornfv.git]
/
kernel
/
arch
/
x86
/
kernel
/
cpu
/
perf_event_intel_cqm.c
diff --git
a/kernel/arch/x86/kernel/cpu/perf_event_intel_cqm.c
b/kernel/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index
a316ca9
..
fc704ed
100644
(file)
--- a/
kernel/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/
kernel/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@
-211,6
+211,20
@@
static void __put_rmid(u32 rmid)
list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
}
list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
}
+static void cqm_cleanup(void)
+{
+ int i;
+
+ if (!cqm_rmid_ptrs)
+ return;
+
+ for (i = 0; i < cqm_max_rmid; i++)
+ kfree(cqm_rmid_ptrs[i]);
+
+ kfree(cqm_rmid_ptrs);
+ cqm_rmid_ptrs = NULL;
+}
+
static int intel_cqm_setup_rmid_cache(void)
{
struct cqm_rmid_entry *entry;
static int intel_cqm_setup_rmid_cache(void)
{
struct cqm_rmid_entry *entry;
@@
-218,7
+232,7
@@
static int intel_cqm_setup_rmid_cache(void)
int r = 0;
nr_rmids = cqm_max_rmid + 1;
int r = 0;
nr_rmids = cqm_max_rmid + 1;
- cqm_rmid_ptrs = k
m
alloc(sizeof(struct cqm_rmid_entry *) *
+ cqm_rmid_ptrs = k
z
alloc(sizeof(struct cqm_rmid_entry *) *
nr_rmids, GFP_KERNEL);
if (!cqm_rmid_ptrs)
return -ENOMEM;
nr_rmids, GFP_KERNEL);
if (!cqm_rmid_ptrs)
return -ENOMEM;
@@
-249,11
+263,9
@@
static int intel_cqm_setup_rmid_cache(void)
mutex_unlock(&cache_mutex);
return 0;
mutex_unlock(&cache_mutex);
return 0;
-fail:
- while (r--)
- kfree(cqm_rmid_ptrs[r]);
- kfree(cqm_rmid_ptrs);
+fail:
+ cqm_cleanup();
return -ENOMEM;
}
return -ENOMEM;
}
@@
-281,9
+293,13
@@
static bool __match_event(struct perf_event *a, struct perf_event *b)
/*
* Events that target same task are placed into the same cache group.
/*
* Events that target same task are placed into the same cache group.
+ * Mark it as a multi event group, so that we update ->count
+ * for every event rather than just the group leader later.
*/
*/
- if (a->hw.target == b->hw.target)
+ if (a->hw.target == b->hw.target) {
+ b->hw.is_group_event = true;
return true;
return true;
+ }
/*
* Are we an inherited event?
/*
* Are we an inherited event?
@@
-849,6
+865,7
@@
static void intel_cqm_setup_event(struct perf_event *event,
bool conflict = false;
u32 rmid;
bool conflict = false;
u32 rmid;
+ event->hw.is_group_event = false;
list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
rmid = iter->hw.cqm_rmid;
list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
rmid = iter->hw.cqm_rmid;
@@
-940,7
+957,9
@@
static u64 intel_cqm_event_count(struct perf_event *event)
return __perf_event_count(event);
/*
return __perf_event_count(event);
/*
- * Only the group leader gets to report values. This stops us
+ * Only the group leader gets to report values except in case of
+ * multiple events in the same group, we still need to read the
+ * other events.This stops us
* reporting duplicate values to userspace, and gives us a clear
* rule for which task gets to report the values.
*
* reporting duplicate values to userspace, and gives us a clear
* rule for which task gets to report the values.
*
@@
-948,7
+967,7
@@
static u64 intel_cqm_event_count(struct perf_event *event)
* specific packages - we forfeit that ability when we create
* task events.
*/
* specific packages - we forfeit that ability when we create
* task events.
*/
- if (!cqm_group_leader(event))
+ if (!cqm_group_leader(event)
&& !event->hw.is_group_event
)
return 0;
/*
return 0;
/*
@@
-1315,7
+1334,7
@@
static const struct x86_cpu_id intel_cqm_match[] = {
static int __init intel_cqm_init(void)
{
static int __init intel_cqm_init(void)
{
- char *str, scale[20];
+ char *str
= NULL
, scale[20];
int i, cpu, ret;
if (!x86_match_cpu(intel_cqm_match))
int i, cpu, ret;
if (!x86_match_cpu(intel_cqm_match))
@@
-1375,16
+1394,25
@@
static int __init intel_cqm_init(void)
cqm_pick_event_reader(i);
}
cqm_pick_event_reader(i);
}
- __perf_cpu_notifier(intel_cqm_cpu_notifier);
-
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
- if (ret)
+ if (ret)
{
pr_err("Intel CQM perf registration failed: %d\n", ret);
pr_err("Intel CQM perf registration failed: %d\n", ret);
- else
- pr_info("Intel CQM monitoring enabled\n");
+ goto out;
+ }
+
+ pr_info("Intel CQM monitoring enabled\n");
+ /*
+ * Register the hot cpu notifier once we are sure cqm
+ * is enabled to avoid notifier leak.
+ */
+ __perf_cpu_notifier(intel_cqm_cpu_notifier);
out:
cpu_notifier_register_done();
out:
cpu_notifier_register_done();
+ if (ret) {
+ kfree(str);
+ cqm_cleanup();
+ }
return ret;
}
return ret;
}