2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
26 #define IA32_QM_EVTSEL 0xC8D
27 #define IA32_QM_CTR 0xC8E
28 #define IA32_QM_ASSOC 0xC8F
29 #define IA32_QM_L3CA_START 0xC90
30 #define IA32_QM_L3CA_END 0xD0F
32 #define L3_CACHE_OCCUPANCY 1
33 #define L3_TOTAL_EXTERNAL_BANDWIDTH 2
34 #define L3_LOCAL_EXTERNAL_BANDWIDTH 3
36 static struct rdt_features rdt_features;
37 static int cat_features = 0;
48 static void cpuid(struct reg* r, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
51 : "=a" (r->eax), "=b" (r->ebx), "=c" (r->ecx), "=d" (r->edx)
52 : "a" (a), "b" (b), "c" (c), "d" (d));
55 void read_rdt_info(void)
62 cpuid(&r, 0x7, 0x0, 0x0, 0x0);
63 if ((r.ebx >> 12) & 1) {
64 plog_info("\tRDT-M. Supports Intel RDT Monitoring capability\n");
65 rdt_features.rdtm_supported = 1;
67 plog_info("\tDoes not support Intel RDT Monitoring capability\n");
70 if ((r.ebx >> 15) & 1) {
71 plog_info("\tRDT-A. Supports Intel RDT Allocation capability\n");
72 rdt_features.rdta_supported = 1;
74 plog_info("\tDoes not support Intel RDT Allocation capability\n");
77 cpuid(&r, 0xf, 0x0, 0x0, 0x0);
78 if ((r.edx >> 1) & 1) {
79 plog_info("\tSupports L3 Cache Intel RDT Monitoring\n");
80 rdt_features.cmt_supported = 1;
82 plog_info("\tIntel RDT Monitoring has %d maximum RMID\n", r.ebx);
83 rdt_features.rdtm_max_rmid = r.ebx;
85 cpuid(&r, 0xf, 0x0, 0x1, 0x0);
86 if ((r.edx >> 0) & 1) {
87 plog_info("\tSupports L3 occupancy monitoring\n");
88 rdt_features.cmt_supported = 1;
90 if ((r.edx >> 1) & 1) {
91 plog_info("\tSupports L3 Total bandwidth monitoring\n");
92 rdt_features.mbm_tot_supported = 1;
94 if ((r.edx >> 2) & 1) {
95 plog_info("\tSupports L3 Local bandwidth monitoring\n");
96 rdt_features.mbm_loc_supported = 1;
98 rdt_features.cmt_max_rmid = r.ecx;
99 rdt_features.upscaling_factor = r.ebx;
100 rdt_features.event_types = r.edx;
102 plog_info("\tL3 Cache Intel RDT Monitoring Capability has %d maximum RMID\n", r.ecx);
103 plog_info("\tUpscaling_factor = %d\n", rdt_features.upscaling_factor);
105 cpuid(&r, 0x10, 0x0, 0x0, 0x0);
106 if ((r.ebx >> 1) & 1) {
107 plog_info("\tSupports L3 Cache Allocation Technology\n");
108 rdt_features.l3_cat_supported = 1;
110 if ((r.ebx >> 2) & 1) {
111 plog_info("\tSupports L2 Cache Allocation Technology\n");
112 rdt_features.l2_cat_supported = 1;
114 if ((r.ebx >> 3) & 1) {
115 plog_info("\tSupports MBA Allocation Technology\n");
116 rdt_features.mba_supported = 1;
119 cpuid(&r, 0x10, 0x0, 0x1, 0x0);
120 if ((r.ecx >> 2) & 1)
121 plog_info("\tCode and Data Prioritization Technology supported\n");
122 plog_info("\tL3 Cache Allocation Technology Enumeration Highest COS number = %d\n", r.edx & 0xffff);
123 rdt_features.cat_max_rmid = r.edx & 0xffff;
124 rdt_features.cat_num_ways = r.eax + 1;
126 cpuid(&r, 0x10, 0x0, 0x2, 0x0);
127 plog_info("\tL2 Cache Allocation Technology Enumeration COS number = %d\n", r.edx & 0xffff);
129 cpuid(&r, 0x10, 0x0, 0x3, 0x0);
130 plog_info("\tMemory Bandwidth Allocation Enumeration COS number = %d\n", r.edx & 0xffff);
131 rdt_features.mba_max_rmid = r.ecx;
133 int mbm_is_supported(void)
135 return (rdt_features.rdtm_supported && rdt_features.mbm_tot_supported && rdt_features.mbm_loc_supported);
138 int mba_is_supported(void)
140 return (rdt_features.rdta_supported && rdt_features.mba_supported);
143 int cmt_is_supported(void)
145 if ((rdt_features.rdtm_supported || rdt_features.rdta_supported) && (prox_cfg.flags & DSF_DISABLE_CMT)) {
146 rdt_features.rdtm_supported = rdt_features.rdta_supported = 0;
147 plog_info("cqm and cat features disabled by config file\n");
149 return (rdt_features.rdtm_supported && rdt_features.cmt_supported);
152 int cat_is_supported(void)
154 if ((rdt_features.rdtm_supported || rdt_features.rdta_supported) && (prox_cfg.flags & DSF_DISABLE_CMT)) {
155 rdt_features.rdtm_supported = rdt_features.rdta_supported = 0;
156 plog_info("cqm and cat features disabled by config file\n");
158 return (rdt_features.rdta_supported && rdt_features.l3_cat_supported);
161 int rdt_is_supported(void)
163 return (cmt_is_supported() || cat_is_supported());
166 int rdt_get_features(struct rdt_features* feat)
168 if (!cmt_is_supported() && !cat_is_supported())
171 *feat = rdt_features;
175 int cqm_assoc(uint8_t lcore_id, uint64_t rmid)
179 ret = msr_read(&val, lcore_id, IA32_QM_ASSOC);
181 plog_err("Unable to read msr %x on core %u\n", IA32_QM_ASSOC, lcore_id);
184 plog_dbg("core %u, rmid was %lu, now setting to %lu\n", lcore_id, val, rmid);
185 val |= (uint64_t)(rmid & 0x3FFULL);
186 ret = msr_write(lcore_id, rmid, IA32_QM_ASSOC);
188 plog_err("Unable to set msr %x on core %u to value %lx\n", IA32_QM_ASSOC, lcore_id, val);
193 int cqm_assoc_read(uint8_t lcore_id, uint64_t *rmid)
195 return msr_read(rmid, lcore_id, IA32_QM_ASSOC);
198 void rdt_init_stat_core(uint8_t lcore_id)
200 stat_core = lcore_id;
203 /* read a specific rmid value using core 0 */
204 int cmt_read_ctr(uint64_t* ret, uint64_t rmid, uint8_t lcore_id)
206 uint64_t event_id = L3_CACHE_OCCUPANCY;
209 es = (es << 32) | event_id;
211 if (msr_write(lcore_id, es, IA32_QM_EVTSEL) < 0) {
215 if (msr_read(ret, lcore_id, IA32_QM_CTR) < 0) {
222 int mbm_read_tot_bdw(uint64_t* ret, uint64_t rmid, uint8_t lcore_id)
224 uint64_t event_id = L3_TOTAL_EXTERNAL_BANDWIDTH;
227 es = (es << 32) | event_id;
229 if (msr_write(lcore_id, es, IA32_QM_EVTSEL) < 0) {
233 if (msr_read(ret, lcore_id, IA32_QM_CTR) < 0) {
239 int mbm_read_loc_bdw(uint64_t* ret, uint64_t rmid, uint8_t lcore_id)
241 uint64_t event_id = L3_LOCAL_EXTERNAL_BANDWIDTH;
244 es = (es << 32) | event_id;
246 if (msr_write(lcore_id, es, IA32_QM_EVTSEL) < 0) {
250 if (msr_read(ret, lcore_id, IA32_QM_CTR) < 0) {
256 int cat_log_init(uint8_t lcore_id)
260 for (i = 0; i < IA32_QM_L3CA_END - IA32_QM_L3CA_START; i++) {
261 rc = msr_read(&tmp_rmid,lcore_id,IA32_QM_L3CA_START + i);
265 plog_info("\tAt initialization: Cache allocation set %d (msr %x): mask %lx\n", i, IA32_QM_L3CA_START + i, tmp_rmid);
270 int cat_set_class_mask(uint8_t lcore_id, uint32_t set, uint32_t mask)
274 rc = msr_write(lcore_id, mask, IA32_QM_L3CA_START + set);
276 plog_err("Failed to write Cache allocation\n");
282 int cat_get_class_mask(uint8_t lcore_id, uint32_t set, uint32_t *mask)
286 rc = msr_read(&tmp_rmid,lcore_id,IA32_QM_L3CA_START + set);
288 plog_err("Failed to read Cache allocation\n");
291 *mask = tmp_rmid & 0xffffffff;
295 void cat_reset_cache(uint32_t lcore_id)
298 uint32_t mask = (1 << rdt_features.cat_num_ways) -1;
299 for (uint32_t set = 0; set <= rdt_features.cat_max_rmid; set++) {
300 rc = msr_write(lcore_id, mask, IA32_QM_L3CA_START + set);
302 plog_err("Failed to reset Cache allocation\n");
307 int cat_get_num_ways(void)
309 return rdt_features.cat_num_ways;