put_cpu();
return 0;
}
+EXPORT_SYMBOL_GPL(vcpu_load);
void vcpu_put(struct kvm_vcpu *vcpu)
{
preempt_enable();
mutex_unlock(&vcpu->mutex);
}
+EXPORT_SYMBOL_GPL(vcpu_put);
static void ack_flush(void *_completed)
{
if (!kvm)
return ERR_PTR(-ENOMEM);
+ spin_lock_init(&kvm->mmu_lock);
+ atomic_inc(¤t->mm->mm_count);
+ kvm->mm = current->mm;
+ kvm_eventfd_init(kvm);
+ mutex_init(&kvm->lock);
+ mutex_init(&kvm->irq_lock);
+ mutex_init(&kvm->slots_lock);
+ atomic_set(&kvm->users_count, 1);
+ INIT_LIST_HEAD(&kvm->devices);
+
r = kvm_arch_init_vm(kvm, type);
if (r)
goto out_err_no_disable;
goto out_err;
}
- spin_lock_init(&kvm->mmu_lock);
- kvm->mm = current->mm;
- atomic_inc(&kvm->mm->mm_count);
- kvm_eventfd_init(kvm);
- mutex_init(&kvm->lock);
- mutex_init(&kvm->irq_lock);
- mutex_init(&kvm->slots_lock);
- atomic_set(&kvm->users_count, 1);
- INIT_LIST_HEAD(&kvm->devices);
-
r = kvm_init_mmu_notifier(kvm);
if (r)
goto out_err;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_free_memslots(kvm, kvm->memslots[i]);
kvm_arch_free_vm(kvm);
+ mmdrop(current->mm);
return ERR_PTR(r);
}
if (copy_from_user(&routing, argp, sizeof(routing)))
goto out;
r = -EINVAL;
- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
+ if (routing.nr > KVM_MAX_IRQ_ROUTES)
goto out;
if (routing.flags)
goto out;