Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / virt / kvm / arm / arch_timer.c
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/of_irq.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24
25 #include <clocksource/arm_arch_timer.h>
26 #include <asm/arch_timer.h>
27
28 #include <kvm/arm_vgic.h>
29 #include <kvm/arm_arch_timer.h>
30
31 #include "trace.h"
32
33 static struct timecounter *timecounter;
34 static struct workqueue_struct *wqueue;
35 static unsigned int host_vtimer_irq;
36
37 static cycle_t kvm_phys_timer_read(void)
38 {
39         return timecounter->cc->read(timecounter->cc);
40 }
41
42 static bool timer_is_armed(struct arch_timer_cpu *timer)
43 {
44         return timer->armed;
45 }
46
47 /* timer_arm: as in "arm the timer", not as in ARM the company */
48 static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
49 {
50         timer->armed = true;
51         hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
52                       HRTIMER_MODE_ABS);
53 }
54
55 static void timer_disarm(struct arch_timer_cpu *timer)
56 {
57         if (timer_is_armed(timer)) {
58                 hrtimer_cancel(&timer->timer);
59                 cancel_work_sync(&timer->expired);
60                 timer->armed = false;
61         }
62 }
63
64 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
65 {
66         struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
67
68         /*
69          * We disable the timer in the world switch and let it be
70          * handled by kvm_timer_sync_hwstate(). Getting a timer
71          * interrupt at this point is a sure sign of some major
72          * breakage.
73          */
74         pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
75         return IRQ_HANDLED;
76 }
77
78 /*
79  * Work function for handling the backup timer that we schedule when a vcpu is
80  * no longer running, but had a timer programmed to fire in the future.
81  */
82 static void kvm_timer_inject_irq_work(struct work_struct *work)
83 {
84         struct kvm_vcpu *vcpu;
85
86         vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
87         vcpu->arch.timer_cpu.armed = false;
88
89         WARN_ON(!kvm_timer_should_fire(vcpu));
90
91         /*
92          * If the vcpu is blocked we want to wake it up so that it will see
93          * the timer has expired when entering the guest.
94          */
95         kvm_vcpu_kick(vcpu);
96 }
97
98 static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
99 {
100         cycle_t cval, now;
101
102         cval = vcpu->arch.timer_cpu.cntv_cval;
103         now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
104
105         if (now < cval) {
106                 u64 ns;
107
108                 ns = cyclecounter_cyc2ns(timecounter->cc,
109                                          cval - now,
110                                          timecounter->mask,
111                                          &timecounter->frac);
112                 return ns;
113         }
114
115         return 0;
116 }
117
118 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
119 {
120         struct arch_timer_cpu *timer;
121         struct kvm_vcpu *vcpu;
122         u64 ns;
123
124         timer = container_of(hrt, struct arch_timer_cpu, timer);
125         vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
126
127         /*
128          * Check that the timer has really expired from the guest's
129          * PoV (NTP on the host may have forced it to expire
130          * early). If we should have slept longer, restart it.
131          */
132         ns = kvm_timer_compute_delta(vcpu);
133         if (unlikely(ns)) {
134                 hrtimer_forward_now(hrt, ns_to_ktime(ns));
135                 return HRTIMER_RESTART;
136         }
137
138         queue_work(wqueue, &timer->expired);
139         return HRTIMER_NORESTART;
140 }
141
142 static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
143 {
144         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
145
146         return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
147                 (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE);
148 }
149
150 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
151 {
152         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
153         cycle_t cval, now;
154
155         if (!kvm_timer_irq_can_fire(vcpu))
156                 return false;
157
158         cval = timer->cntv_cval;
159         now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
160
161         return cval <= now;
162 }
163
164 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
165 {
166         int ret;
167         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
168
169         BUG_ON(!vgic_initialized(vcpu->kvm));
170
171         timer->irq.level = new_level;
172         trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
173                                    timer->irq.level);
174         ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
175                                          timer->map,
176                                          timer->irq.level);
177         WARN_ON(ret);
178 }
179
180 /*
181  * Check if there was a change in the timer state (should we raise or lower
182  * the line level to the GIC).
183  */
184 static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
185 {
186         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
187
188         /*
189          * If userspace modified the timer registers via SET_ONE_REG before
190          * the vgic was initialized, we mustn't set the timer->irq.level value
191          * because the guest would never see the interrupt.  Instead wait
192          * until we call this function from kvm_timer_flush_hwstate.
193          */
194         if (!vgic_initialized(vcpu->kvm))
195                 return -ENODEV;
196
197         if (kvm_timer_should_fire(vcpu) != timer->irq.level)
198                 kvm_timer_update_irq(vcpu, !timer->irq.level);
199
200         return 0;
201 }
202
203 /*
204  * Schedule the background timer before calling kvm_vcpu_block, so that this
205  * thread is removed from its waitqueue and made runnable when there's a timer
206  * interrupt to handle.
207  */
208 void kvm_timer_schedule(struct kvm_vcpu *vcpu)
209 {
210         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
211
212         BUG_ON(timer_is_armed(timer));
213
214         /*
215          * No need to schedule a background timer if the guest timer has
216          * already expired, because kvm_vcpu_block will return before putting
217          * the thread to sleep.
218          */
219         if (kvm_timer_should_fire(vcpu))
220                 return;
221
222         /*
223          * If the timer is not capable of raising interrupts (disabled or
224          * masked), then there's no more work for us to do.
225          */
226         if (!kvm_timer_irq_can_fire(vcpu))
227                 return;
228
229         /*  The timer has not yet expired, schedule a background timer */
230         timer_arm(timer, kvm_timer_compute_delta(vcpu));
231 }
232
233 void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
234 {
235         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
236         timer_disarm(timer);
237 }
238
239 /**
240  * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
241  * @vcpu: The vcpu pointer
242  *
243  * Check if the virtual timer has expired while we were running in the host,
244  * and inject an interrupt if that was the case.
245  */
246 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
247 {
248         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
249         bool phys_active;
250         int ret;
251
252         if (kvm_timer_update_state(vcpu))
253                 return;
254
255         /*
256         * If we enter the guest with the virtual input level to the VGIC
257         * asserted, then we have already told the VGIC what we need to, and
258         * we don't need to exit from the guest until the guest deactivates
259         * the already injected interrupt, so therefore we should set the
260         * hardware active state to prevent unnecessary exits from the guest.
261         *
262         * Also, if we enter the guest with the virtual timer interrupt active,
263         * then it must be active on the physical distributor, because we set
264         * the HW bit and the guest must be able to deactivate the virtual and
265         * physical interrupt at the same time.
266         *
267         * Conversely, if the virtual input level is deasserted and the virtual
268         * interrupt is not active, then always clear the hardware active state
269         * to ensure that hardware interrupts from the timer triggers a guest
270         * exit.
271         */
272         if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
273                 phys_active = true;
274         else
275                 phys_active = false;
276
277         ret = irq_set_irqchip_state(timer->map->irq,
278                                     IRQCHIP_STATE_ACTIVE,
279                                     phys_active);
280         WARN_ON(ret);
281 }
282
283 /**
284  * kvm_timer_sync_hwstate - sync timer state from cpu
285  * @vcpu: The vcpu pointer
286  *
287  * Check if the virtual timer has expired while we were running in the guest,
288  * and inject an interrupt if that was the case.
289  */
290 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
291 {
292         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
293
294         BUG_ON(timer_is_armed(timer));
295
296         /*
297          * The guest could have modified the timer registers or the timer
298          * could have expired, update the timer state.
299          */
300         kvm_timer_update_state(vcpu);
301 }
302
303 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
304                          const struct kvm_irq_level *irq)
305 {
306         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
307         struct irq_phys_map *map;
308
309         /*
310          * The vcpu timer irq number cannot be determined in
311          * kvm_timer_vcpu_init() because it is called much before
312          * kvm_vcpu_set_target(). To handle this, we determine
313          * vcpu timer irq number when the vcpu is reset.
314          */
315         timer->irq.irq = irq->irq;
316
317         /*
318          * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
319          * and to 0 for ARMv7.  We provide an implementation that always
320          * resets the timer to be disabled and unmasked and is compliant with
321          * the ARMv7 architecture.
322          */
323         timer->cntv_ctl = 0;
324         kvm_timer_update_state(vcpu);
325
326         /*
327          * Tell the VGIC that the virtual interrupt is tied to a
328          * physical interrupt. We do that once per VCPU.
329          */
330         map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
331         if (WARN_ON(IS_ERR(map)))
332                 return PTR_ERR(map);
333
334         timer->map = map;
335         return 0;
336 }
337
338 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
339 {
340         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
341
342         INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
343         hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
344         timer->timer.function = kvm_timer_expire;
345 }
346
347 static void kvm_timer_init_interrupt(void *info)
348 {
349         enable_percpu_irq(host_vtimer_irq, 0);
350 }
351
352 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
353 {
354         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
355
356         switch (regid) {
357         case KVM_REG_ARM_TIMER_CTL:
358                 timer->cntv_ctl = value;
359                 break;
360         case KVM_REG_ARM_TIMER_CNT:
361                 vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
362                 break;
363         case KVM_REG_ARM_TIMER_CVAL:
364                 timer->cntv_cval = value;
365                 break;
366         default:
367                 return -1;
368         }
369
370         kvm_timer_update_state(vcpu);
371         return 0;
372 }
373
374 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
375 {
376         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
377
378         switch (regid) {
379         case KVM_REG_ARM_TIMER_CTL:
380                 return timer->cntv_ctl;
381         case KVM_REG_ARM_TIMER_CNT:
382                 return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
383         case KVM_REG_ARM_TIMER_CVAL:
384                 return timer->cntv_cval;
385         }
386         return (u64)-1;
387 }
388
389 static int kvm_timer_cpu_notify(struct notifier_block *self,
390                                 unsigned long action, void *cpu)
391 {
392         switch (action) {
393         case CPU_STARTING:
394         case CPU_STARTING_FROZEN:
395                 kvm_timer_init_interrupt(NULL);
396                 break;
397         case CPU_DYING:
398         case CPU_DYING_FROZEN:
399                 disable_percpu_irq(host_vtimer_irq);
400                 break;
401         }
402
403         return NOTIFY_OK;
404 }
405
406 static struct notifier_block kvm_timer_cpu_nb = {
407         .notifier_call = kvm_timer_cpu_notify,
408 };
409
410 static const struct of_device_id arch_timer_of_match[] = {
411         { .compatible   = "arm,armv7-timer",    },
412         { .compatible   = "arm,armv8-timer",    },
413         {},
414 };
415
416 int kvm_timer_hyp_init(void)
417 {
418         struct device_node *np;
419         unsigned int ppi;
420         int err;
421
422         timecounter = arch_timer_get_timecounter();
423         if (!timecounter)
424                 return -ENODEV;
425
426         np = of_find_matching_node(NULL, arch_timer_of_match);
427         if (!np) {
428                 kvm_err("kvm_arch_timer: can't find DT node\n");
429                 return -ENODEV;
430         }
431
432         ppi = irq_of_parse_and_map(np, 2);
433         if (!ppi) {
434                 kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
435                 err = -EINVAL;
436                 goto out;
437         }
438
439         err = request_percpu_irq(ppi, kvm_arch_timer_handler,
440                                  "kvm guest timer", kvm_get_running_vcpus());
441         if (err) {
442                 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
443                         ppi, err);
444                 goto out;
445         }
446
447         host_vtimer_irq = ppi;
448
449         err = __register_cpu_notifier(&kvm_timer_cpu_nb);
450         if (err) {
451                 kvm_err("Cannot register timer CPU notifier\n");
452                 goto out_free;
453         }
454
455         wqueue = create_singlethread_workqueue("kvm_arch_timer");
456         if (!wqueue) {
457                 err = -ENOMEM;
458                 goto out_free;
459         }
460
461         kvm_info("%s IRQ%d\n", np->name, ppi);
462         on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
463
464         goto out;
465 out_free:
466         free_percpu_irq(ppi, kvm_get_running_vcpus());
467 out:
468         of_node_put(np);
469         return err;
470 }
471
472 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
473 {
474         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
475
476         timer_disarm(timer);
477         if (timer->map)
478                 kvm_vgic_unmap_phys_irq(vcpu, timer->map);
479 }
480
481 void kvm_timer_enable(struct kvm *kvm)
482 {
483         if (kvm->arch.timer.enabled)
484                 return;
485
486         /*
487          * There is a potential race here between VCPUs starting for the first
488          * time, which may be enabling the timer multiple times.  That doesn't
489          * hurt though, because we're just setting a variable to the same
490          * variable that it already was.  The important thing is that all
491          * VCPUs have the enabled variable set, before entering the guest, if
492          * the arch timers are enabled.
493          */
494         if (timecounter && wqueue)
495                 kvm->arch.timer.enabled = 1;
496 }
497
498 void kvm_timer_init(struct kvm *kvm)
499 {
500         kvm->arch.timer.cntvoff = kvm_phys_timer_read();
501 }