Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / arch / x86 / kernel / cpu / perf_event_intel_rapl.c
index ed446bd..d2ac364 100644 (file)
@@ -117,7 +117,7 @@ static struct perf_pmu_events_attr event_attr_##v = {                       \
 };
 
 struct rapl_pmu {
-       spinlock_t       lock;
+       raw_spinlock_t   lock;
        int              n_active; /* number of active events */
        struct list_head active_list;
        struct pmu       *pmu; /* pointer to rapl_pmu_class */
@@ -220,13 +220,13 @@ static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
        if (!pmu->n_active)
                return HRTIMER_NORESTART;
 
-       spin_lock_irqsave(&pmu->lock, flags);
+       raw_spin_lock_irqsave(&pmu->lock, flags);
 
        list_for_each_entry(event, &pmu->active_list, active_entry) {
                rapl_event_update(event);
        }
 
-       spin_unlock_irqrestore(&pmu->lock, flags);
+       raw_spin_unlock_irqrestore(&pmu->lock, flags);
 
        hrtimer_forward_now(hrtimer, pmu->timer_interval);
 
@@ -263,9 +263,9 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
        struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
        unsigned long flags;
 
-       spin_lock_irqsave(&pmu->lock, flags);
+       raw_spin_lock_irqsave(&pmu->lock, flags);
        __rapl_pmu_event_start(pmu, event);
-       spin_unlock_irqrestore(&pmu->lock, flags);
+       raw_spin_unlock_irqrestore(&pmu->lock, flags);
 }
 
 static void rapl_pmu_event_stop(struct perf_event *event, int mode)
@@ -274,7 +274,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
        struct hw_perf_event *hwc = &event->hw;
        unsigned long flags;
 
-       spin_lock_irqsave(&pmu->lock, flags);
+       raw_spin_lock_irqsave(&pmu->lock, flags);
 
        /* mark event as deactivated and stopped */
        if (!(hwc->state & PERF_HES_STOPPED)) {
@@ -299,7 +299,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
                hwc->state |= PERF_HES_UPTODATE;
        }
 
-       spin_unlock_irqrestore(&pmu->lock, flags);
+       raw_spin_unlock_irqrestore(&pmu->lock, flags);
 }
 
 static int rapl_pmu_event_add(struct perf_event *event, int mode)
@@ -308,14 +308,14 @@ static int rapl_pmu_event_add(struct perf_event *event, int mode)
        struct hw_perf_event *hwc = &event->hw;
        unsigned long flags;
 
-       spin_lock_irqsave(&pmu->lock, flags);
+       raw_spin_lock_irqsave(&pmu->lock, flags);
 
        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 
        if (mode & PERF_EF_START)
                __rapl_pmu_event_start(pmu, event);
 
-       spin_unlock_irqrestore(&pmu->lock, flags);
+       raw_spin_unlock_irqrestore(&pmu->lock, flags);
 
        return 0;
 }
@@ -603,7 +603,7 @@ static int rapl_cpu_prepare(int cpu)
        pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
        if (!pmu)
                return -1;
-       spin_lock_init(&pmu->lock);
+       raw_spin_lock_init(&pmu->lock);
 
        INIT_LIST_HEAD(&pmu->active_list);