Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / hwtracing / coresight / coresight-etm3x.c
1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
24 #include <linux/sysfs.h>
25 #include <linux/stat.h>
26 #include <linux/clk.h>
27 #include <linux/cpu.h>
28 #include <linux/of.h>
29 #include <linux/coresight.h>
30 #include <linux/amba/bus.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <asm/sections.h>
34
35 #include "coresight-etm.h"
36
37 static int boot_enable;
38 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
39
40 /* The number of ETM/PTM currently registered */
41 static int etm_count;
42 static struct etm_drvdata *etmdrvdata[NR_CPUS];
43
44 static inline void etm_writel(struct etm_drvdata *drvdata,
45                               u32 val, u32 off)
46 {
47         if (drvdata->use_cp14) {
48                 if (etm_writel_cp14(off, val)) {
49                         dev_err(drvdata->dev,
50                                 "invalid CP14 access to ETM reg: %#x", off);
51                 }
52         } else {
53                 writel_relaxed(val, drvdata->base + off);
54         }
55 }
56
57 static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
58 {
59         u32 val;
60
61         if (drvdata->use_cp14) {
62                 if (etm_readl_cp14(off, &val)) {
63                         dev_err(drvdata->dev,
64                                 "invalid CP14 access to ETM reg: %#x", off);
65                 }
66         } else {
67                 val = readl_relaxed(drvdata->base + off);
68         }
69
70         return val;
71 }
72
73 /*
74  * Memory mapped writes to clear os lock are not supported on some processors
75  * and OS lock must be unlocked before any memory mapped access on such
76  * processors, otherwise memory mapped reads/writes will be invalid.
77  */
78 static void etm_os_unlock(void *info)
79 {
80         struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
81         /* Writing any value to ETMOSLAR unlocks the trace registers */
82         etm_writel(drvdata, 0x0, ETMOSLAR);
83         isb();
84 }
85
86 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
87 {
88         u32 etmcr;
89
90         /* Ensure pending cp14 accesses complete before setting pwrdwn */
91         mb();
92         isb();
93         etmcr = etm_readl(drvdata, ETMCR);
94         etmcr |= ETMCR_PWD_DWN;
95         etm_writel(drvdata, etmcr, ETMCR);
96 }
97
98 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
99 {
100         u32 etmcr;
101
102         etmcr = etm_readl(drvdata, ETMCR);
103         etmcr &= ~ETMCR_PWD_DWN;
104         etm_writel(drvdata, etmcr, ETMCR);
105         /* Ensure pwrup completes before subsequent cp14 accesses */
106         mb();
107         isb();
108 }
109
110 static void etm_set_pwrup(struct etm_drvdata *drvdata)
111 {
112         u32 etmpdcr;
113
114         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
115         etmpdcr |= ETMPDCR_PWD_UP;
116         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
117         /* Ensure pwrup completes before subsequent cp14 accesses */
118         mb();
119         isb();
120 }
121
122 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
123 {
124         u32 etmpdcr;
125
126         /* Ensure pending cp14 accesses complete before clearing pwrup */
127         mb();
128         isb();
129         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
130         etmpdcr &= ~ETMPDCR_PWD_UP;
131         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
132 }
133
134 /**
135  * coresight_timeout_etm - loop until a bit has changed to a specific state.
136  * @drvdata: etm's private data structure.
137  * @offset: address of a register, starting from @addr.
138  * @position: the position of the bit of interest.
139  * @value: the value the bit should have.
140  *
141  * Basically the same as @coresight_timeout except for the register access
142  * method where we have to account for CP14 configurations.
143
144  * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
145  * TIMEOUT_US has elapsed, which ever happens first.
146  */
147
148 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
149                                   int position, int value)
150 {
151         int i;
152         u32 val;
153
154         for (i = TIMEOUT_US; i > 0; i--) {
155                 val = etm_readl(drvdata, offset);
156                 /* Waiting on the bit to go from 0 to 1 */
157                 if (value) {
158                         if (val & BIT(position))
159                                 return 0;
160                 /* Waiting on the bit to go from 1 to 0 */
161                 } else {
162                         if (!(val & BIT(position)))
163                                 return 0;
164                 }
165
166                 /*
167                  * Delay is arbitrary - the specification doesn't say how long
168                  * we are expected to wait.  Extra check required to make sure
169                  * we don't wait needlessly on the last iteration.
170                  */
171                 if (i - 1)
172                         udelay(1);
173         }
174
175         return -EAGAIN;
176 }
177
178
179 static void etm_set_prog(struct etm_drvdata *drvdata)
180 {
181         u32 etmcr;
182
183         etmcr = etm_readl(drvdata, ETMCR);
184         etmcr |= ETMCR_ETM_PRG;
185         etm_writel(drvdata, etmcr, ETMCR);
186         /*
187          * Recommended by spec for cp14 accesses to ensure etmcr write is
188          * complete before polling etmsr
189          */
190         isb();
191         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
192                 dev_err(drvdata->dev,
193                         "timeout observed when probing at offset %#x\n", ETMSR);
194         }
195 }
196
197 static void etm_clr_prog(struct etm_drvdata *drvdata)
198 {
199         u32 etmcr;
200
201         etmcr = etm_readl(drvdata, ETMCR);
202         etmcr &= ~ETMCR_ETM_PRG;
203         etm_writel(drvdata, etmcr, ETMCR);
204         /*
205          * Recommended by spec for cp14 accesses to ensure etmcr write is
206          * complete before polling etmsr
207          */
208         isb();
209         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
210                 dev_err(drvdata->dev,
211                         "timeout observed when probing at offset %#x\n", ETMSR);
212         }
213 }
214
215 static void etm_set_default(struct etm_drvdata *drvdata)
216 {
217         int i;
218
219         drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
220         drvdata->enable_event = ETM_HARD_WIRE_RES_A;
221
222         drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
223         drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
224         drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
225         drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
226         drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
227         drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
228         drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
229
230         for (i = 0; i < drvdata->nr_cntr; i++) {
231                 drvdata->cntr_rld_val[i] = 0x0;
232                 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
233                 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
234                 drvdata->cntr_val[i] = 0x0;
235         }
236
237         drvdata->seq_curr_state = 0x0;
238         drvdata->ctxid_idx = 0x0;
239         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
240                 drvdata->ctxid_val[i] = 0x0;
241         drvdata->ctxid_mask = 0x0;
242 }
243
244 static void etm_enable_hw(void *info)
245 {
246         int i;
247         u32 etmcr;
248         struct etm_drvdata *drvdata = info;
249
250         CS_UNLOCK(drvdata->base);
251
252         /* Turn engine on */
253         etm_clr_pwrdwn(drvdata);
254         /* Apply power to trace registers */
255         etm_set_pwrup(drvdata);
256         /* Make sure all registers are accessible */
257         etm_os_unlock(drvdata);
258
259         etm_set_prog(drvdata);
260
261         etmcr = etm_readl(drvdata, ETMCR);
262         etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
263         etmcr |= drvdata->port_size;
264         etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
265         etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
266         etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
267         etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
268         etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
269         etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
270         for (i = 0; i < drvdata->nr_addr_cmp; i++) {
271                 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
272                 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
273         }
274         for (i = 0; i < drvdata->nr_cntr; i++) {
275                 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
276                 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
277                 etm_writel(drvdata, drvdata->cntr_rld_event[i],
278                            ETMCNTRLDEVRn(i));
279                 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
280         }
281         etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
282         etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
283         etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
284         etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
285         etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
286         etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
287         etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
288         for (i = 0; i < drvdata->nr_ext_out; i++)
289                 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
290         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
291                 etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
292         etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
293         etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
294         /* No external input selected */
295         etm_writel(drvdata, 0x0, ETMEXTINSELR);
296         etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
297         /* No auxiliary control selected */
298         etm_writel(drvdata, 0x0, ETMAUXCR);
299         etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
300         /* No VMID comparator value selected */
301         etm_writel(drvdata, 0x0, ETMVMIDCVR);
302
303         /* Ensures trace output is enabled from this ETM */
304         etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
305
306         etm_clr_prog(drvdata);
307         CS_LOCK(drvdata->base);
308
309         dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
310 }
311
312 static int etm_trace_id_simple(struct etm_drvdata *drvdata)
313 {
314         if (!drvdata->enable)
315                 return drvdata->traceid;
316
317         return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
318 }
319
320 static int etm_trace_id(struct coresight_device *csdev)
321 {
322         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
323         unsigned long flags;
324         int trace_id = -1;
325
326         if (!drvdata->enable)
327                 return drvdata->traceid;
328
329         if (clk_prepare_enable(drvdata->clk))
330                 goto out;
331
332         spin_lock_irqsave(&drvdata->spinlock, flags);
333
334         CS_UNLOCK(drvdata->base);
335         trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
336         CS_LOCK(drvdata->base);
337
338         spin_unlock_irqrestore(&drvdata->spinlock, flags);
339         clk_disable_unprepare(drvdata->clk);
340 out:
341         return trace_id;
342 }
343
344 static int etm_enable(struct coresight_device *csdev)
345 {
346         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
347         int ret;
348
349         ret = clk_prepare_enable(drvdata->clk);
350         if (ret)
351                 goto err_clk;
352
353         spin_lock(&drvdata->spinlock);
354
355         /*
356          * Configure the ETM only if the CPU is online.  If it isn't online
357          * hw configuration will take place when 'CPU_STARTING' is received
358          * in @etm_cpu_callback.
359          */
360         if (cpu_online(drvdata->cpu)) {
361                 ret = smp_call_function_single(drvdata->cpu,
362                                                etm_enable_hw, drvdata, 1);
363                 if (ret)
364                         goto err;
365         }
366
367         drvdata->enable = true;
368         drvdata->sticky_enable = true;
369
370         spin_unlock(&drvdata->spinlock);
371
372         dev_info(drvdata->dev, "ETM tracing enabled\n");
373         return 0;
374 err:
375         spin_unlock(&drvdata->spinlock);
376         clk_disable_unprepare(drvdata->clk);
377 err_clk:
378         return ret;
379 }
380
381 static void etm_disable_hw(void *info)
382 {
383         int i;
384         struct etm_drvdata *drvdata = info;
385
386         CS_UNLOCK(drvdata->base);
387         etm_set_prog(drvdata);
388
389         /* Program trace enable to low by using always false event */
390         etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
391
392         /* Read back sequencer and counters for post trace analysis */
393         drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
394
395         for (i = 0; i < drvdata->nr_cntr; i++)
396                 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
397
398         etm_set_pwrdwn(drvdata);
399         CS_LOCK(drvdata->base);
400
401         dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
402 }
403
404 static void etm_disable(struct coresight_device *csdev)
405 {
406         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
407
408         /*
409          * Taking hotplug lock here protects from clocks getting disabled
410          * with tracing being left on (crash scenario) if user disable occurs
411          * after cpu online mask indicates the cpu is offline but before the
412          * DYING hotplug callback is serviced by the ETM driver.
413          */
414         get_online_cpus();
415         spin_lock(&drvdata->spinlock);
416
417         /*
418          * Executing etm_disable_hw on the cpu whose ETM is being disabled
419          * ensures that register writes occur when cpu is powered.
420          */
421         smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
422         drvdata->enable = false;
423
424         spin_unlock(&drvdata->spinlock);
425         put_online_cpus();
426
427         clk_disable_unprepare(drvdata->clk);
428
429         dev_info(drvdata->dev, "ETM tracing disabled\n");
430 }
431
432 static const struct coresight_ops_source etm_source_ops = {
433         .trace_id       = etm_trace_id,
434         .enable         = etm_enable,
435         .disable        = etm_disable,
436 };
437
438 static const struct coresight_ops etm_cs_ops = {
439         .source_ops     = &etm_source_ops,
440 };
441
442 static ssize_t nr_addr_cmp_show(struct device *dev,
443                                 struct device_attribute *attr, char *buf)
444 {
445         unsigned long val;
446         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
447
448         val = drvdata->nr_addr_cmp;
449         return sprintf(buf, "%#lx\n", val);
450 }
451 static DEVICE_ATTR_RO(nr_addr_cmp);
452
453 static ssize_t nr_cntr_show(struct device *dev,
454                             struct device_attribute *attr, char *buf)
455 {       unsigned long val;
456         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
457
458         val = drvdata->nr_cntr;
459         return sprintf(buf, "%#lx\n", val);
460 }
461 static DEVICE_ATTR_RO(nr_cntr);
462
463 static ssize_t nr_ctxid_cmp_show(struct device *dev,
464                                  struct device_attribute *attr, char *buf)
465 {
466         unsigned long val;
467         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
468
469         val = drvdata->nr_ctxid_cmp;
470         return sprintf(buf, "%#lx\n", val);
471 }
472 static DEVICE_ATTR_RO(nr_ctxid_cmp);
473
474 static ssize_t etmsr_show(struct device *dev,
475                           struct device_attribute *attr, char *buf)
476 {
477         int ret;
478         unsigned long flags, val;
479         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
480
481         ret = clk_prepare_enable(drvdata->clk);
482         if (ret)
483                 return ret;
484
485         spin_lock_irqsave(&drvdata->spinlock, flags);
486         CS_UNLOCK(drvdata->base);
487
488         val = etm_readl(drvdata, ETMSR);
489
490         CS_LOCK(drvdata->base);
491         spin_unlock_irqrestore(&drvdata->spinlock, flags);
492         clk_disable_unprepare(drvdata->clk);
493
494         return sprintf(buf, "%#lx\n", val);
495 }
496 static DEVICE_ATTR_RO(etmsr);
497
498 static ssize_t reset_store(struct device *dev,
499                            struct device_attribute *attr,
500                            const char *buf, size_t size)
501 {
502         int i, ret;
503         unsigned long val;
504         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
505
506         ret = kstrtoul(buf, 16, &val);
507         if (ret)
508                 return ret;
509
510         if (val) {
511                 spin_lock(&drvdata->spinlock);
512                 drvdata->mode = ETM_MODE_EXCLUDE;
513                 drvdata->ctrl = 0x0;
514                 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
515                 drvdata->startstop_ctrl = 0x0;
516                 drvdata->addr_idx = 0x0;
517                 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
518                         drvdata->addr_val[i] = 0x0;
519                         drvdata->addr_acctype[i] = 0x0;
520                         drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
521                 }
522                 drvdata->cntr_idx = 0x0;
523
524                 etm_set_default(drvdata);
525                 spin_unlock(&drvdata->spinlock);
526         }
527
528         return size;
529 }
530 static DEVICE_ATTR_WO(reset);
531
532 static ssize_t mode_show(struct device *dev,
533                          struct device_attribute *attr, char *buf)
534 {
535         unsigned long val;
536         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
537
538         val = drvdata->mode;
539         return sprintf(buf, "%#lx\n", val);
540 }
541
542 static ssize_t mode_store(struct device *dev,
543                           struct device_attribute *attr,
544                           const char *buf, size_t size)
545 {
546         int ret;
547         unsigned long val;
548         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
549
550         ret = kstrtoul(buf, 16, &val);
551         if (ret)
552                 return ret;
553
554         spin_lock(&drvdata->spinlock);
555         drvdata->mode = val & ETM_MODE_ALL;
556
557         if (drvdata->mode & ETM_MODE_EXCLUDE)
558                 drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
559         else
560                 drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
561
562         if (drvdata->mode & ETM_MODE_CYCACC)
563                 drvdata->ctrl |= ETMCR_CYC_ACC;
564         else
565                 drvdata->ctrl &= ~ETMCR_CYC_ACC;
566
567         if (drvdata->mode & ETM_MODE_STALL) {
568                 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
569                         dev_warn(drvdata->dev, "stall mode not supported\n");
570                         ret = -EINVAL;
571                         goto err_unlock;
572                 }
573                 drvdata->ctrl |= ETMCR_STALL_MODE;
574          } else
575                 drvdata->ctrl &= ~ETMCR_STALL_MODE;
576
577         if (drvdata->mode & ETM_MODE_TIMESTAMP) {
578                 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
579                         dev_warn(drvdata->dev, "timestamp not supported\n");
580                         ret = -EINVAL;
581                         goto err_unlock;
582                 }
583                 drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
584         } else
585                 drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
586
587         if (drvdata->mode & ETM_MODE_CTXID)
588                 drvdata->ctrl |= ETMCR_CTXID_SIZE;
589         else
590                 drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
591         spin_unlock(&drvdata->spinlock);
592
593         return size;
594
595 err_unlock:
596         spin_unlock(&drvdata->spinlock);
597         return ret;
598 }
599 static DEVICE_ATTR_RW(mode);
600
601 static ssize_t trigger_event_show(struct device *dev,
602                                   struct device_attribute *attr, char *buf)
603 {
604         unsigned long val;
605         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
606
607         val = drvdata->trigger_event;
608         return sprintf(buf, "%#lx\n", val);
609 }
610
611 static ssize_t trigger_event_store(struct device *dev,
612                                    struct device_attribute *attr,
613                                    const char *buf, size_t size)
614 {
615         int ret;
616         unsigned long val;
617         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
618
619         ret = kstrtoul(buf, 16, &val);
620         if (ret)
621                 return ret;
622
623         drvdata->trigger_event = val & ETM_EVENT_MASK;
624
625         return size;
626 }
627 static DEVICE_ATTR_RW(trigger_event);
628
629 static ssize_t enable_event_show(struct device *dev,
630                                  struct device_attribute *attr, char *buf)
631 {
632         unsigned long val;
633         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
634
635         val = drvdata->enable_event;
636         return sprintf(buf, "%#lx\n", val);
637 }
638
639 static ssize_t enable_event_store(struct device *dev,
640                                   struct device_attribute *attr,
641                                   const char *buf, size_t size)
642 {
643         int ret;
644         unsigned long val;
645         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
646
647         ret = kstrtoul(buf, 16, &val);
648         if (ret)
649                 return ret;
650
651         drvdata->enable_event = val & ETM_EVENT_MASK;
652
653         return size;
654 }
655 static DEVICE_ATTR_RW(enable_event);
656
657 static ssize_t fifofull_level_show(struct device *dev,
658                                    struct device_attribute *attr, char *buf)
659 {
660         unsigned long val;
661         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
662
663         val = drvdata->fifofull_level;
664         return sprintf(buf, "%#lx\n", val);
665 }
666
667 static ssize_t fifofull_level_store(struct device *dev,
668                                     struct device_attribute *attr,
669                                     const char *buf, size_t size)
670 {
671         int ret;
672         unsigned long val;
673         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
674
675         ret = kstrtoul(buf, 16, &val);
676         if (ret)
677                 return ret;
678
679         drvdata->fifofull_level = val;
680
681         return size;
682 }
683 static DEVICE_ATTR_RW(fifofull_level);
684
685 static ssize_t addr_idx_show(struct device *dev,
686                              struct device_attribute *attr, char *buf)
687 {
688         unsigned long val;
689         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
690
691         val = drvdata->addr_idx;
692         return sprintf(buf, "%#lx\n", val);
693 }
694
695 static ssize_t addr_idx_store(struct device *dev,
696                               struct device_attribute *attr,
697                               const char *buf, size_t size)
698 {
699         int ret;
700         unsigned long val;
701         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
702
703         ret = kstrtoul(buf, 16, &val);
704         if (ret)
705                 return ret;
706
707         if (val >= drvdata->nr_addr_cmp)
708                 return -EINVAL;
709
710         /*
711          * Use spinlock to ensure index doesn't change while it gets
712          * dereferenced multiple times within a spinlock block elsewhere.
713          */
714         spin_lock(&drvdata->spinlock);
715         drvdata->addr_idx = val;
716         spin_unlock(&drvdata->spinlock);
717
718         return size;
719 }
720 static DEVICE_ATTR_RW(addr_idx);
721
722 static ssize_t addr_single_show(struct device *dev,
723                                 struct device_attribute *attr, char *buf)
724 {
725         u8 idx;
726         unsigned long val;
727         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
728
729         spin_lock(&drvdata->spinlock);
730         idx = drvdata->addr_idx;
731         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
732               drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
733                 spin_unlock(&drvdata->spinlock);
734                 return -EINVAL;
735         }
736
737         val = drvdata->addr_val[idx];
738         spin_unlock(&drvdata->spinlock);
739
740         return sprintf(buf, "%#lx\n", val);
741 }
742
743 static ssize_t addr_single_store(struct device *dev,
744                                  struct device_attribute *attr,
745                                  const char *buf, size_t size)
746 {
747         u8 idx;
748         int ret;
749         unsigned long val;
750         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
751
752         ret = kstrtoul(buf, 16, &val);
753         if (ret)
754                 return ret;
755
756         spin_lock(&drvdata->spinlock);
757         idx = drvdata->addr_idx;
758         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
759               drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
760                 spin_unlock(&drvdata->spinlock);
761                 return -EINVAL;
762         }
763
764         drvdata->addr_val[idx] = val;
765         drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
766         spin_unlock(&drvdata->spinlock);
767
768         return size;
769 }
770 static DEVICE_ATTR_RW(addr_single);
771
772 static ssize_t addr_range_show(struct device *dev,
773                                struct device_attribute *attr, char *buf)
774 {
775         u8 idx;
776         unsigned long val1, val2;
777         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
778
779         spin_lock(&drvdata->spinlock);
780         idx = drvdata->addr_idx;
781         if (idx % 2 != 0) {
782                 spin_unlock(&drvdata->spinlock);
783                 return -EPERM;
784         }
785         if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
786                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
787               (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
788                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
789                 spin_unlock(&drvdata->spinlock);
790                 return -EPERM;
791         }
792
793         val1 = drvdata->addr_val[idx];
794         val2 = drvdata->addr_val[idx + 1];
795         spin_unlock(&drvdata->spinlock);
796
797         return sprintf(buf, "%#lx %#lx\n", val1, val2);
798 }
799
800 static ssize_t addr_range_store(struct device *dev,
801                               struct device_attribute *attr,
802                               const char *buf, size_t size)
803 {
804         u8 idx;
805         unsigned long val1, val2;
806         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
807
808         if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
809                 return -EINVAL;
810         /* Lower address comparator cannot have a higher address value */
811         if (val1 > val2)
812                 return -EINVAL;
813
814         spin_lock(&drvdata->spinlock);
815         idx = drvdata->addr_idx;
816         if (idx % 2 != 0) {
817                 spin_unlock(&drvdata->spinlock);
818                 return -EPERM;
819         }
820         if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
821                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
822               (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
823                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
824                 spin_unlock(&drvdata->spinlock);
825                 return -EPERM;
826         }
827
828         drvdata->addr_val[idx] = val1;
829         drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
830         drvdata->addr_val[idx + 1] = val2;
831         drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
832         drvdata->enable_ctrl1 |= (1 << (idx/2));
833         spin_unlock(&drvdata->spinlock);
834
835         return size;
836 }
837 static DEVICE_ATTR_RW(addr_range);
838
839 static ssize_t addr_start_show(struct device *dev,
840                                struct device_attribute *attr, char *buf)
841 {
842         u8 idx;
843         unsigned long val;
844         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
845
846         spin_lock(&drvdata->spinlock);
847         idx = drvdata->addr_idx;
848         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
849               drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
850                 spin_unlock(&drvdata->spinlock);
851                 return -EPERM;
852         }
853
854         val = drvdata->addr_val[idx];
855         spin_unlock(&drvdata->spinlock);
856
857         return sprintf(buf, "%#lx\n", val);
858 }
859
860 static ssize_t addr_start_store(struct device *dev,
861                                 struct device_attribute *attr,
862                                 const char *buf, size_t size)
863 {
864         u8 idx;
865         int ret;
866         unsigned long val;
867         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
868
869         ret = kstrtoul(buf, 16, &val);
870         if (ret)
871                 return ret;
872
873         spin_lock(&drvdata->spinlock);
874         idx = drvdata->addr_idx;
875         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
876               drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
877                 spin_unlock(&drvdata->spinlock);
878                 return -EPERM;
879         }
880
881         drvdata->addr_val[idx] = val;
882         drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
883         drvdata->startstop_ctrl |= (1 << idx);
884         drvdata->enable_ctrl1 |= BIT(25);
885         spin_unlock(&drvdata->spinlock);
886
887         return size;
888 }
889 static DEVICE_ATTR_RW(addr_start);
890
891 static ssize_t addr_stop_show(struct device *dev,
892                               struct device_attribute *attr, char *buf)
893 {
894         u8 idx;
895         unsigned long val;
896         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
897
898         spin_lock(&drvdata->spinlock);
899         idx = drvdata->addr_idx;
900         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
901               drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
902                 spin_unlock(&drvdata->spinlock);
903                 return -EPERM;
904         }
905
906         val = drvdata->addr_val[idx];
907         spin_unlock(&drvdata->spinlock);
908
909         return sprintf(buf, "%#lx\n", val);
910 }
911
912 static ssize_t addr_stop_store(struct device *dev,
913                                struct device_attribute *attr,
914                                const char *buf, size_t size)
915 {
916         u8 idx;
917         int ret;
918         unsigned long val;
919         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
920
921         ret = kstrtoul(buf, 16, &val);
922         if (ret)
923                 return ret;
924
925         spin_lock(&drvdata->spinlock);
926         idx = drvdata->addr_idx;
927         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
928               drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
929                 spin_unlock(&drvdata->spinlock);
930                 return -EPERM;
931         }
932
933         drvdata->addr_val[idx] = val;
934         drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
935         drvdata->startstop_ctrl |= (1 << (idx + 16));
936         drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
937         spin_unlock(&drvdata->spinlock);
938
939         return size;
940 }
941 static DEVICE_ATTR_RW(addr_stop);
942
943 static ssize_t addr_acctype_show(struct device *dev,
944                                  struct device_attribute *attr, char *buf)
945 {
946         unsigned long val;
947         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
948
949         spin_lock(&drvdata->spinlock);
950         val = drvdata->addr_acctype[drvdata->addr_idx];
951         spin_unlock(&drvdata->spinlock);
952
953         return sprintf(buf, "%#lx\n", val);
954 }
955
956 static ssize_t addr_acctype_store(struct device *dev,
957                                   struct device_attribute *attr,
958                                   const char *buf, size_t size)
959 {
960         int ret;
961         unsigned long val;
962         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
963
964         ret = kstrtoul(buf, 16, &val);
965         if (ret)
966                 return ret;
967
968         spin_lock(&drvdata->spinlock);
969         drvdata->addr_acctype[drvdata->addr_idx] = val;
970         spin_unlock(&drvdata->spinlock);
971
972         return size;
973 }
974 static DEVICE_ATTR_RW(addr_acctype);
975
976 static ssize_t cntr_idx_show(struct device *dev,
977                              struct device_attribute *attr, char *buf)
978 {
979         unsigned long val;
980         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
981
982         val = drvdata->cntr_idx;
983         return sprintf(buf, "%#lx\n", val);
984 }
985
986 static ssize_t cntr_idx_store(struct device *dev,
987                               struct device_attribute *attr,
988                               const char *buf, size_t size)
989 {
990         int ret;
991         unsigned long val;
992         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
993
994         ret = kstrtoul(buf, 16, &val);
995         if (ret)
996                 return ret;
997
998         if (val >= drvdata->nr_cntr)
999                 return -EINVAL;
1000         /*
1001          * Use spinlock to ensure index doesn't change while it gets
1002          * dereferenced multiple times within a spinlock block elsewhere.
1003          */
1004         spin_lock(&drvdata->spinlock);
1005         drvdata->cntr_idx = val;
1006         spin_unlock(&drvdata->spinlock);
1007
1008         return size;
1009 }
1010 static DEVICE_ATTR_RW(cntr_idx);
1011
1012 static ssize_t cntr_rld_val_show(struct device *dev,
1013                                  struct device_attribute *attr, char *buf)
1014 {
1015         unsigned long val;
1016         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1017
1018         spin_lock(&drvdata->spinlock);
1019         val = drvdata->cntr_rld_val[drvdata->cntr_idx];
1020         spin_unlock(&drvdata->spinlock);
1021
1022         return sprintf(buf, "%#lx\n", val);
1023 }
1024
1025 static ssize_t cntr_rld_val_store(struct device *dev,
1026                                   struct device_attribute *attr,
1027                                   const char *buf, size_t size)
1028 {
1029         int ret;
1030         unsigned long val;
1031         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1032
1033         ret = kstrtoul(buf, 16, &val);
1034         if (ret)
1035                 return ret;
1036
1037         spin_lock(&drvdata->spinlock);
1038         drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
1039         spin_unlock(&drvdata->spinlock);
1040
1041         return size;
1042 }
1043 static DEVICE_ATTR_RW(cntr_rld_val);
1044
1045 static ssize_t cntr_event_show(struct device *dev,
1046                                struct device_attribute *attr, char *buf)
1047 {
1048         unsigned long val;
1049         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1050
1051         spin_lock(&drvdata->spinlock);
1052         val = drvdata->cntr_event[drvdata->cntr_idx];
1053         spin_unlock(&drvdata->spinlock);
1054
1055         return sprintf(buf, "%#lx\n", val);
1056 }
1057
1058 static ssize_t cntr_event_store(struct device *dev,
1059                                 struct device_attribute *attr,
1060                                 const char *buf, size_t size)
1061 {
1062         int ret;
1063         unsigned long val;
1064         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1065
1066         ret = kstrtoul(buf, 16, &val);
1067         if (ret)
1068                 return ret;
1069
1070         spin_lock(&drvdata->spinlock);
1071         drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1072         spin_unlock(&drvdata->spinlock);
1073
1074         return size;
1075 }
1076 static DEVICE_ATTR_RW(cntr_event);
1077
1078 static ssize_t cntr_rld_event_show(struct device *dev,
1079                                    struct device_attribute *attr, char *buf)
1080 {
1081         unsigned long val;
1082         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1083
1084         spin_lock(&drvdata->spinlock);
1085         val = drvdata->cntr_rld_event[drvdata->cntr_idx];
1086         spin_unlock(&drvdata->spinlock);
1087
1088         return sprintf(buf, "%#lx\n", val);
1089 }
1090
1091 static ssize_t cntr_rld_event_store(struct device *dev,
1092                                     struct device_attribute *attr,
1093                                     const char *buf, size_t size)
1094 {
1095         int ret;
1096         unsigned long val;
1097         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1098
1099         ret = kstrtoul(buf, 16, &val);
1100         if (ret)
1101                 return ret;
1102
1103         spin_lock(&drvdata->spinlock);
1104         drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1105         spin_unlock(&drvdata->spinlock);
1106
1107         return size;
1108 }
1109 static DEVICE_ATTR_RW(cntr_rld_event);
1110
1111 static ssize_t cntr_val_show(struct device *dev,
1112                              struct device_attribute *attr, char *buf)
1113 {
1114         int i, ret = 0;
1115         u32 val;
1116         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1117
1118         if (!drvdata->enable) {
1119                 spin_lock(&drvdata->spinlock);
1120                 for (i = 0; i < drvdata->nr_cntr; i++)
1121                         ret += sprintf(buf, "counter %d: %x\n",
1122                                        i, drvdata->cntr_val[i]);
1123                 spin_unlock(&drvdata->spinlock);
1124                 return ret;
1125         }
1126
1127         for (i = 0; i < drvdata->nr_cntr; i++) {
1128                 val = etm_readl(drvdata, ETMCNTVRn(i));
1129                 ret += sprintf(buf, "counter %d: %x\n", i, val);
1130         }
1131
1132         return ret;
1133 }
1134
1135 static ssize_t cntr_val_store(struct device *dev,
1136                               struct device_attribute *attr,
1137                               const char *buf, size_t size)
1138 {
1139         int ret;
1140         unsigned long val;
1141         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1142
1143         ret = kstrtoul(buf, 16, &val);
1144         if (ret)
1145                 return ret;
1146
1147         spin_lock(&drvdata->spinlock);
1148         drvdata->cntr_val[drvdata->cntr_idx] = val;
1149         spin_unlock(&drvdata->spinlock);
1150
1151         return size;
1152 }
1153 static DEVICE_ATTR_RW(cntr_val);
1154
1155 static ssize_t seq_12_event_show(struct device *dev,
1156                                  struct device_attribute *attr, char *buf)
1157 {
1158         unsigned long val;
1159         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1160
1161         val = drvdata->seq_12_event;
1162         return sprintf(buf, "%#lx\n", val);
1163 }
1164
1165 static ssize_t seq_12_event_store(struct device *dev,
1166                                   struct device_attribute *attr,
1167                                   const char *buf, size_t size)
1168 {
1169         int ret;
1170         unsigned long val;
1171         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1172
1173         ret = kstrtoul(buf, 16, &val);
1174         if (ret)
1175                 return ret;
1176
1177         drvdata->seq_12_event = val & ETM_EVENT_MASK;
1178         return size;
1179 }
1180 static DEVICE_ATTR_RW(seq_12_event);
1181
1182 static ssize_t seq_21_event_show(struct device *dev,
1183                                  struct device_attribute *attr, char *buf)
1184 {
1185         unsigned long val;
1186         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1187
1188         val = drvdata->seq_21_event;
1189         return sprintf(buf, "%#lx\n", val);
1190 }
1191
1192 static ssize_t seq_21_event_store(struct device *dev,
1193                                   struct device_attribute *attr,
1194                                   const char *buf, size_t size)
1195 {
1196         int ret;
1197         unsigned long val;
1198         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1199
1200         ret = kstrtoul(buf, 16, &val);
1201         if (ret)
1202                 return ret;
1203
1204         drvdata->seq_21_event = val & ETM_EVENT_MASK;
1205         return size;
1206 }
1207 static DEVICE_ATTR_RW(seq_21_event);
1208
1209 static ssize_t seq_23_event_show(struct device *dev,
1210                                  struct device_attribute *attr, char *buf)
1211 {
1212         unsigned long val;
1213         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1214
1215         val = drvdata->seq_23_event;
1216         return sprintf(buf, "%#lx\n", val);
1217 }
1218
1219 static ssize_t seq_23_event_store(struct device *dev,
1220                                   struct device_attribute *attr,
1221                                   const char *buf, size_t size)
1222 {
1223         int ret;
1224         unsigned long val;
1225         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1226
1227         ret = kstrtoul(buf, 16, &val);
1228         if (ret)
1229                 return ret;
1230
1231         drvdata->seq_23_event = val & ETM_EVENT_MASK;
1232         return size;
1233 }
1234 static DEVICE_ATTR_RW(seq_23_event);
1235
1236 static ssize_t seq_31_event_show(struct device *dev,
1237                                  struct device_attribute *attr, char *buf)
1238 {
1239         unsigned long val;
1240         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1241
1242         val = drvdata->seq_31_event;
1243         return sprintf(buf, "%#lx\n", val);
1244 }
1245
1246 static ssize_t seq_31_event_store(struct device *dev,
1247                                   struct device_attribute *attr,
1248                                   const char *buf, size_t size)
1249 {
1250         int ret;
1251         unsigned long val;
1252         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1253
1254         ret = kstrtoul(buf, 16, &val);
1255         if (ret)
1256                 return ret;
1257
1258         drvdata->seq_31_event = val & ETM_EVENT_MASK;
1259         return size;
1260 }
1261 static DEVICE_ATTR_RW(seq_31_event);
1262
1263 static ssize_t seq_32_event_show(struct device *dev,
1264                                  struct device_attribute *attr, char *buf)
1265 {
1266         unsigned long val;
1267         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1268
1269         val = drvdata->seq_32_event;
1270         return sprintf(buf, "%#lx\n", val);
1271 }
1272
1273 static ssize_t seq_32_event_store(struct device *dev,
1274                                   struct device_attribute *attr,
1275                                   const char *buf, size_t size)
1276 {
1277         int ret;
1278         unsigned long val;
1279         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1280
1281         ret = kstrtoul(buf, 16, &val);
1282         if (ret)
1283                 return ret;
1284
1285         drvdata->seq_32_event = val & ETM_EVENT_MASK;
1286         return size;
1287 }
1288 static DEVICE_ATTR_RW(seq_32_event);
1289
1290 static ssize_t seq_13_event_show(struct device *dev,
1291                                  struct device_attribute *attr, char *buf)
1292 {
1293         unsigned long val;
1294         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1295
1296         val = drvdata->seq_13_event;
1297         return sprintf(buf, "%#lx\n", val);
1298 }
1299
1300 static ssize_t seq_13_event_store(struct device *dev,
1301                                   struct device_attribute *attr,
1302                                   const char *buf, size_t size)
1303 {
1304         int ret;
1305         unsigned long val;
1306         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1307
1308         ret = kstrtoul(buf, 16, &val);
1309         if (ret)
1310                 return ret;
1311
1312         drvdata->seq_13_event = val & ETM_EVENT_MASK;
1313         return size;
1314 }
1315 static DEVICE_ATTR_RW(seq_13_event);
1316
1317 static ssize_t seq_curr_state_show(struct device *dev,
1318                                    struct device_attribute *attr, char *buf)
1319 {
1320         int ret;
1321         unsigned long val, flags;
1322         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1323
1324         if (!drvdata->enable) {
1325                 val = drvdata->seq_curr_state;
1326                 goto out;
1327         }
1328
1329         ret = clk_prepare_enable(drvdata->clk);
1330         if (ret)
1331                 return ret;
1332
1333         spin_lock_irqsave(&drvdata->spinlock, flags);
1334
1335         CS_UNLOCK(drvdata->base);
1336         val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
1337         CS_LOCK(drvdata->base);
1338
1339         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1340         clk_disable_unprepare(drvdata->clk);
1341 out:
1342         return sprintf(buf, "%#lx\n", val);
1343 }
1344
1345 static ssize_t seq_curr_state_store(struct device *dev,
1346                                     struct device_attribute *attr,
1347                                     const char *buf, size_t size)
1348 {
1349         int ret;
1350         unsigned long val;
1351         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1352
1353         ret = kstrtoul(buf, 16, &val);
1354         if (ret)
1355                 return ret;
1356
1357         if (val > ETM_SEQ_STATE_MAX_VAL)
1358                 return -EINVAL;
1359
1360         drvdata->seq_curr_state = val;
1361
1362         return size;
1363 }
1364 static DEVICE_ATTR_RW(seq_curr_state);
1365
1366 static ssize_t ctxid_idx_show(struct device *dev,
1367                               struct device_attribute *attr, char *buf)
1368 {
1369         unsigned long val;
1370         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1371
1372         val = drvdata->ctxid_idx;
1373         return sprintf(buf, "%#lx\n", val);
1374 }
1375
1376 static ssize_t ctxid_idx_store(struct device *dev,
1377                                 struct device_attribute *attr,
1378                                 const char *buf, size_t size)
1379 {
1380         int ret;
1381         unsigned long val;
1382         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1383
1384         ret = kstrtoul(buf, 16, &val);
1385         if (ret)
1386                 return ret;
1387
1388         if (val >= drvdata->nr_ctxid_cmp)
1389                 return -EINVAL;
1390
1391         /*
1392          * Use spinlock to ensure index doesn't change while it gets
1393          * dereferenced multiple times within a spinlock block elsewhere.
1394          */
1395         spin_lock(&drvdata->spinlock);
1396         drvdata->ctxid_idx = val;
1397         spin_unlock(&drvdata->spinlock);
1398
1399         return size;
1400 }
1401 static DEVICE_ATTR_RW(ctxid_idx);
1402
1403 static ssize_t ctxid_val_show(struct device *dev,
1404                               struct device_attribute *attr, char *buf)
1405 {
1406         unsigned long val;
1407         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1408
1409         spin_lock(&drvdata->spinlock);
1410         val = drvdata->ctxid_val[drvdata->ctxid_idx];
1411         spin_unlock(&drvdata->spinlock);
1412
1413         return sprintf(buf, "%#lx\n", val);
1414 }
1415
1416 static ssize_t ctxid_val_store(struct device *dev,
1417                                struct device_attribute *attr,
1418                                const char *buf, size_t size)
1419 {
1420         int ret;
1421         unsigned long val;
1422         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1423
1424         ret = kstrtoul(buf, 16, &val);
1425         if (ret)
1426                 return ret;
1427
1428         spin_lock(&drvdata->spinlock);
1429         drvdata->ctxid_val[drvdata->ctxid_idx] = val;
1430         spin_unlock(&drvdata->spinlock);
1431
1432         return size;
1433 }
1434 static DEVICE_ATTR_RW(ctxid_val);
1435
1436 static ssize_t ctxid_mask_show(struct device *dev,
1437                                struct device_attribute *attr, char *buf)
1438 {
1439         unsigned long val;
1440         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1441
1442         val = drvdata->ctxid_mask;
1443         return sprintf(buf, "%#lx\n", val);
1444 }
1445
1446 static ssize_t ctxid_mask_store(struct device *dev,
1447                                 struct device_attribute *attr,
1448                                 const char *buf, size_t size)
1449 {
1450         int ret;
1451         unsigned long val;
1452         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1453
1454         ret = kstrtoul(buf, 16, &val);
1455         if (ret)
1456                 return ret;
1457
1458         drvdata->ctxid_mask = val;
1459         return size;
1460 }
1461 static DEVICE_ATTR_RW(ctxid_mask);
1462
1463 static ssize_t sync_freq_show(struct device *dev,
1464                               struct device_attribute *attr, char *buf)
1465 {
1466         unsigned long val;
1467         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1468
1469         val = drvdata->sync_freq;
1470         return sprintf(buf, "%#lx\n", val);
1471 }
1472
1473 static ssize_t sync_freq_store(struct device *dev,
1474                                struct device_attribute *attr,
1475                                const char *buf, size_t size)
1476 {
1477         int ret;
1478         unsigned long val;
1479         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1480
1481         ret = kstrtoul(buf, 16, &val);
1482         if (ret)
1483                 return ret;
1484
1485         drvdata->sync_freq = val & ETM_SYNC_MASK;
1486         return size;
1487 }
1488 static DEVICE_ATTR_RW(sync_freq);
1489
1490 static ssize_t timestamp_event_show(struct device *dev,
1491                                     struct device_attribute *attr, char *buf)
1492 {
1493         unsigned long val;
1494         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1495
1496         val = drvdata->timestamp_event;
1497         return sprintf(buf, "%#lx\n", val);
1498 }
1499
1500 static ssize_t timestamp_event_store(struct device *dev,
1501                                      struct device_attribute *attr,
1502                                      const char *buf, size_t size)
1503 {
1504         int ret;
1505         unsigned long val;
1506         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1507
1508         ret = kstrtoul(buf, 16, &val);
1509         if (ret)
1510                 return ret;
1511
1512         drvdata->timestamp_event = val & ETM_EVENT_MASK;
1513         return size;
1514 }
1515 static DEVICE_ATTR_RW(timestamp_event);
1516
1517 static ssize_t status_show(struct device *dev,
1518                            struct device_attribute *attr, char *buf)
1519 {
1520         int ret;
1521         unsigned long flags;
1522         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1523
1524         ret = clk_prepare_enable(drvdata->clk);
1525         if (ret)
1526                 return ret;
1527
1528         spin_lock_irqsave(&drvdata->spinlock, flags);
1529
1530         CS_UNLOCK(drvdata->base);
1531         ret = sprintf(buf,
1532                       "ETMCCR: 0x%08x\n"
1533                       "ETMCCER: 0x%08x\n"
1534                       "ETMSCR: 0x%08x\n"
1535                       "ETMIDR: 0x%08x\n"
1536                       "ETMCR: 0x%08x\n"
1537                       "ETMTRACEIDR: 0x%08x\n"
1538                       "Enable event: 0x%08x\n"
1539                       "Enable start/stop: 0x%08x\n"
1540                       "Enable control: CR1 0x%08x CR2 0x%08x\n"
1541                       "CPU affinity: %d\n",
1542                       drvdata->etmccr, drvdata->etmccer,
1543                       etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
1544                       etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
1545                       etm_readl(drvdata, ETMTEEVR),
1546                       etm_readl(drvdata, ETMTSSCR),
1547                       etm_readl(drvdata, ETMTECR1),
1548                       etm_readl(drvdata, ETMTECR2),
1549                       drvdata->cpu);
1550         CS_LOCK(drvdata->base);
1551
1552         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1553         clk_disable_unprepare(drvdata->clk);
1554
1555         return ret;
1556 }
1557 static DEVICE_ATTR_RO(status);
1558
1559 static ssize_t traceid_show(struct device *dev,
1560                             struct device_attribute *attr, char *buf)
1561 {
1562         int ret;
1563         unsigned long val, flags;
1564         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1565
1566         if (!drvdata->enable) {
1567                 val = drvdata->traceid;
1568                 goto out;
1569         }
1570
1571         ret = clk_prepare_enable(drvdata->clk);
1572         if (ret)
1573                 return ret;
1574
1575         spin_lock_irqsave(&drvdata->spinlock, flags);
1576         CS_UNLOCK(drvdata->base);
1577
1578         val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
1579
1580         CS_LOCK(drvdata->base);
1581         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1582         clk_disable_unprepare(drvdata->clk);
1583 out:
1584         return sprintf(buf, "%#lx\n", val);
1585 }
1586
1587 static ssize_t traceid_store(struct device *dev,
1588                              struct device_attribute *attr,
1589                              const char *buf, size_t size)
1590 {
1591         int ret;
1592         unsigned long val;
1593         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1594
1595         ret = kstrtoul(buf, 16, &val);
1596         if (ret)
1597                 return ret;
1598
1599         drvdata->traceid = val & ETM_TRACEID_MASK;
1600         return size;
1601 }
1602 static DEVICE_ATTR_RW(traceid);
1603
1604 static struct attribute *coresight_etm_attrs[] = {
1605         &dev_attr_nr_addr_cmp.attr,
1606         &dev_attr_nr_cntr.attr,
1607         &dev_attr_nr_ctxid_cmp.attr,
1608         &dev_attr_etmsr.attr,
1609         &dev_attr_reset.attr,
1610         &dev_attr_mode.attr,
1611         &dev_attr_trigger_event.attr,
1612         &dev_attr_enable_event.attr,
1613         &dev_attr_fifofull_level.attr,
1614         &dev_attr_addr_idx.attr,
1615         &dev_attr_addr_single.attr,
1616         &dev_attr_addr_range.attr,
1617         &dev_attr_addr_start.attr,
1618         &dev_attr_addr_stop.attr,
1619         &dev_attr_addr_acctype.attr,
1620         &dev_attr_cntr_idx.attr,
1621         &dev_attr_cntr_rld_val.attr,
1622         &dev_attr_cntr_event.attr,
1623         &dev_attr_cntr_rld_event.attr,
1624         &dev_attr_cntr_val.attr,
1625         &dev_attr_seq_12_event.attr,
1626         &dev_attr_seq_21_event.attr,
1627         &dev_attr_seq_23_event.attr,
1628         &dev_attr_seq_31_event.attr,
1629         &dev_attr_seq_32_event.attr,
1630         &dev_attr_seq_13_event.attr,
1631         &dev_attr_seq_curr_state.attr,
1632         &dev_attr_ctxid_idx.attr,
1633         &dev_attr_ctxid_val.attr,
1634         &dev_attr_ctxid_mask.attr,
1635         &dev_attr_sync_freq.attr,
1636         &dev_attr_timestamp_event.attr,
1637         &dev_attr_status.attr,
1638         &dev_attr_traceid.attr,
1639         NULL,
1640 };
1641 ATTRIBUTE_GROUPS(coresight_etm);
1642
1643 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
1644                             void *hcpu)
1645 {
1646         unsigned int cpu = (unsigned long)hcpu;
1647
1648         if (!etmdrvdata[cpu])
1649                 goto out;
1650
1651         switch (action & (~CPU_TASKS_FROZEN)) {
1652         case CPU_STARTING:
1653                 spin_lock(&etmdrvdata[cpu]->spinlock);
1654                 if (!etmdrvdata[cpu]->os_unlock) {
1655                         etm_os_unlock(etmdrvdata[cpu]);
1656                         etmdrvdata[cpu]->os_unlock = true;
1657                 }
1658
1659                 if (etmdrvdata[cpu]->enable)
1660                         etm_enable_hw(etmdrvdata[cpu]);
1661                 spin_unlock(&etmdrvdata[cpu]->spinlock);
1662                 break;
1663
1664         case CPU_ONLINE:
1665                 if (etmdrvdata[cpu]->boot_enable &&
1666                     !etmdrvdata[cpu]->sticky_enable)
1667                         coresight_enable(etmdrvdata[cpu]->csdev);
1668                 break;
1669
1670         case CPU_DYING:
1671                 spin_lock(&etmdrvdata[cpu]->spinlock);
1672                 if (etmdrvdata[cpu]->enable)
1673                         etm_disable_hw(etmdrvdata[cpu]);
1674                 spin_unlock(&etmdrvdata[cpu]->spinlock);
1675                 break;
1676         }
1677 out:
1678         return NOTIFY_OK;
1679 }
1680
1681 static struct notifier_block etm_cpu_notifier = {
1682         .notifier_call = etm_cpu_callback,
1683 };
1684
1685 static bool etm_arch_supported(u8 arch)
1686 {
1687         switch (arch) {
1688         case ETM_ARCH_V3_3:
1689                 break;
1690         case ETM_ARCH_V3_5:
1691                 break;
1692         case PFT_ARCH_V1_0:
1693                 break;
1694         case PFT_ARCH_V1_1:
1695                 break;
1696         default:
1697                 return false;
1698         }
1699         return true;
1700 }
1701
1702 static void etm_init_arch_data(void *info)
1703 {
1704         u32 etmidr;
1705         u32 etmccr;
1706         struct etm_drvdata *drvdata = info;
1707
1708         CS_UNLOCK(drvdata->base);
1709
1710         /* First dummy read */
1711         (void)etm_readl(drvdata, ETMPDSR);
1712         /* Provide power to ETM: ETMPDCR[3] == 1 */
1713         etm_set_pwrup(drvdata);
1714         /*
1715          * Clear power down bit since when this bit is set writes to
1716          * certain registers might be ignored.
1717          */
1718         etm_clr_pwrdwn(drvdata);
1719         /*
1720          * Set prog bit. It will be set from reset but this is included to
1721          * ensure it is set
1722          */
1723         etm_set_prog(drvdata);
1724
1725         /* Find all capabilities */
1726         etmidr = etm_readl(drvdata, ETMIDR);
1727         drvdata->arch = BMVAL(etmidr, 4, 11);
1728         drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
1729
1730         drvdata->etmccer = etm_readl(drvdata, ETMCCER);
1731         etmccr = etm_readl(drvdata, ETMCCR);
1732         drvdata->etmccr = etmccr;
1733         drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
1734         drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
1735         drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
1736         drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
1737         drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
1738
1739         etm_set_pwrdwn(drvdata);
1740         etm_clr_pwrup(drvdata);
1741         CS_LOCK(drvdata->base);
1742 }
1743
1744 static void etm_init_default_data(struct etm_drvdata *drvdata)
1745 {
1746         /*
1747          * A trace ID of value 0 is invalid, so let's start at some
1748          * random value that fits in 7 bits and will be just as good.
1749          */
1750         static int etm3x_traceid = 0x10;
1751
1752         u32 flags = (1 << 0 | /* instruction execute*/
1753                      3 << 3 | /* ARM instruction */
1754                      0 << 5 | /* No data value comparison */
1755                      0 << 7 | /* No exact mach */
1756                      0 << 8 | /* Ignore context ID */
1757                      0 << 10); /* Security ignored */
1758
1759         /*
1760          * Initial configuration only - guarantees sources handled by
1761          * this driver have a unique ID at startup time but not between
1762          * all other types of sources.  For that we lean on the core
1763          * framework.
1764          */
1765         drvdata->traceid = etm3x_traceid++;
1766         drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
1767         drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
1768         if (drvdata->nr_addr_cmp >= 2) {
1769                 drvdata->addr_val[0] = (u32) _stext;
1770                 drvdata->addr_val[1] = (u32) _etext;
1771                 drvdata->addr_acctype[0] = flags;
1772                 drvdata->addr_acctype[1] = flags;
1773                 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
1774                 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
1775         }
1776
1777         etm_set_default(drvdata);
1778 }
1779
1780 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1781 {
1782         int ret;
1783         void __iomem *base;
1784         struct device *dev = &adev->dev;
1785         struct coresight_platform_data *pdata = NULL;
1786         struct etm_drvdata *drvdata;
1787         struct resource *res = &adev->res;
1788         struct coresight_desc *desc;
1789         struct device_node *np = adev->dev.of_node;
1790
1791         desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
1792         if (!desc)
1793                 return -ENOMEM;
1794
1795         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1796         if (!drvdata)
1797                 return -ENOMEM;
1798
1799         if (np) {
1800                 pdata = of_get_coresight_platform_data(dev, np);
1801                 if (IS_ERR(pdata))
1802                         return PTR_ERR(pdata);
1803
1804                 adev->dev.platform_data = pdata;
1805                 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
1806         }
1807
1808         drvdata->dev = &adev->dev;
1809         dev_set_drvdata(dev, drvdata);
1810
1811         /* Validity for the resource is already checked by the AMBA core */
1812         base = devm_ioremap_resource(dev, res);
1813         if (IS_ERR(base))
1814                 return PTR_ERR(base);
1815
1816         drvdata->base = base;
1817
1818         spin_lock_init(&drvdata->spinlock);
1819
1820         drvdata->clk = adev->pclk;
1821         ret = clk_prepare_enable(drvdata->clk);
1822         if (ret)
1823                 return ret;
1824
1825         drvdata->cpu = pdata ? pdata->cpu : 0;
1826
1827         get_online_cpus();
1828         etmdrvdata[drvdata->cpu] = drvdata;
1829
1830         if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
1831                 drvdata->os_unlock = true;
1832
1833         if (smp_call_function_single(drvdata->cpu,
1834                                      etm_init_arch_data,  drvdata, 1))
1835                 dev_err(dev, "ETM arch init failed\n");
1836
1837         if (!etm_count++)
1838                 register_hotcpu_notifier(&etm_cpu_notifier);
1839
1840         put_online_cpus();
1841
1842         if (etm_arch_supported(drvdata->arch) == false) {
1843                 ret = -EINVAL;
1844                 goto err_arch_supported;
1845         }
1846         etm_init_default_data(drvdata);
1847
1848         clk_disable_unprepare(drvdata->clk);
1849
1850         desc->type = CORESIGHT_DEV_TYPE_SOURCE;
1851         desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1852         desc->ops = &etm_cs_ops;
1853         desc->pdata = pdata;
1854         desc->dev = dev;
1855         desc->groups = coresight_etm_groups;
1856         drvdata->csdev = coresight_register(desc);
1857         if (IS_ERR(drvdata->csdev)) {
1858                 ret = PTR_ERR(drvdata->csdev);
1859                 goto err_arch_supported;
1860         }
1861
1862         dev_info(dev, "ETM initialized\n");
1863
1864         if (boot_enable) {
1865                 coresight_enable(drvdata->csdev);
1866                 drvdata->boot_enable = true;
1867         }
1868
1869         return 0;
1870
1871 err_arch_supported:
1872         clk_disable_unprepare(drvdata->clk);
1873         if (--etm_count == 0)
1874                 unregister_hotcpu_notifier(&etm_cpu_notifier);
1875         return ret;
1876 }
1877
1878 static int etm_remove(struct amba_device *adev)
1879 {
1880         struct etm_drvdata *drvdata = amba_get_drvdata(adev);
1881
1882         coresight_unregister(drvdata->csdev);
1883         if (--etm_count == 0)
1884                 unregister_hotcpu_notifier(&etm_cpu_notifier);
1885
1886         return 0;
1887 }
1888
1889 static struct amba_id etm_ids[] = {
1890         {       /* ETM 3.3 */
1891                 .id     = 0x0003b921,
1892                 .mask   = 0x0003ffff,
1893         },
1894         {       /* ETM 3.5 */
1895                 .id     = 0x0003b956,
1896                 .mask   = 0x0003ffff,
1897         },
1898         {       /* PTM 1.0 */
1899                 .id     = 0x0003b950,
1900                 .mask   = 0x0003ffff,
1901         },
1902         {       /* PTM 1.1 */
1903                 .id     = 0x0003b95f,
1904                 .mask   = 0x0003ffff,
1905         },
1906         { 0, 0},
1907 };
1908
1909 static struct amba_driver etm_driver = {
1910         .drv = {
1911                 .name   = "coresight-etm3x",
1912                 .owner  = THIS_MODULE,
1913         },
1914         .probe          = etm_probe,
1915         .remove         = etm_remove,
1916         .id_table       = etm_ids,
1917 };
1918
1919 int __init etm_init(void)
1920 {
1921         return amba_driver_register(&etm_driver);
1922 }
1923 module_init(etm_init);
1924
1925 void __exit etm_exit(void)
1926 {
1927         amba_driver_unregister(&etm_driver);
1928 }
1929 module_exit(etm_exit);
1930
1931 MODULE_LICENSE("GPL v2");
1932 MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");