X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Fdrivers%2Fdca%2Fdca-core.c;fp=kernel%2Fdrivers%2Fdca%2Fdca-core.c;h=819dfda8823623fb8c7741f4ed299626393db044;hb=9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00;hp=0000000000000000000000000000000000000000;hpb=98260f3884f4a202f9ca5eabed40b1354c489b29;p=kvmfornfv.git diff --git a/kernel/drivers/dca/dca-core.c b/kernel/drivers/dca/dca-core.c new file mode 100644 index 000000000..819dfda88 --- /dev/null +++ b/kernel/drivers/dca/dca-core.c @@ -0,0 +1,472 @@ +/* + * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ + +/* + * This driver supports an interface for DCA clients and providers to meet. + */ + +#include +#include +#include +#include +#include +#include + +#define DCA_VERSION "1.12.1" + +MODULE_VERSION(DCA_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel Corporation"); + +static DEFINE_RAW_SPINLOCK(dca_lock); + +static LIST_HEAD(dca_domains); + +static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); + +static int dca_providers_blocked; + +static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus *bus = pdev->bus; + + while (bus->parent) + bus = bus->parent; + + return bus; +} + +static struct dca_domain *dca_allocate_domain(struct pci_bus *rc) +{ + struct dca_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_NOWAIT); + if (!domain) + return NULL; + + INIT_LIST_HEAD(&domain->dca_providers); + domain->pci_rc = rc; + + return domain; +} + +static void dca_free_domain(struct dca_domain *domain) +{ + list_del(&domain->node); + kfree(domain); +} + +static int dca_provider_ioat_ver_3_0(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return ((pdev->vendor == PCI_VENDOR_ID_INTEL) && + ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7))); +} + +static void unregister_dca_providers(void) +{ + struct dca_provider *dca, *_dca; + struct list_head unregistered_providers; + struct dca_domain *domain; + unsigned long flags; + + blocking_notifier_call_chain(&dca_provider_chain, + DCA_PROVIDER_REMOVE, NULL); + + INIT_LIST_HEAD(&unregistered_providers); + + raw_spin_lock_irqsave(&dca_lock, flags); + + if (list_empty(&dca_domains)) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + return; + } + + /* at this point only one domain in the list is expected */ + domain = list_first_entry(&dca_domains, struct dca_domain, node); + + list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) + list_move(&dca->node, &unregistered_providers); + + dca_free_domain(domain); + + raw_spin_unlock_irqrestore(&dca_lock, flags); + + list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { + dca_sysfs_remove_provider(dca); + list_del(&dca->node); + } +} + +static struct dca_domain *dca_find_domain(struct pci_bus *rc) +{ + struct dca_domain *domain; + + list_for_each_entry(domain, &dca_domains, node) + if (domain->pci_rc == rc) + return domain; + + return NULL; +} + +static struct dca_domain *dca_get_domain(struct device *dev) +{ + struct pci_bus *rc; + struct dca_domain *domain; + + rc = dca_pci_rc_from_dev(dev); + domain = dca_find_domain(rc); + + if (!domain) { + if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) + dca_providers_blocked = 1; + } + + return domain; +} + +static struct dca_provider *dca_find_provider_by_dev(struct device *dev) +{ + struct dca_provider *dca; + struct pci_bus *rc; + struct dca_domain *domain; + + if (dev) { + rc = dca_pci_rc_from_dev(dev); + domain = dca_find_domain(rc); + if (!domain) + return NULL; + } else { + if (!list_empty(&dca_domains)) + domain = list_first_entry(&dca_domains, + struct dca_domain, + node); + else + return NULL; + } + + list_for_each_entry(dca, &domain->dca_providers, node) + if ((!dev) || (dca->ops->dev_managed(dca, dev))) + return dca; + + return NULL; +} + +/** + * dca_add_requester - add a dca client to the list + * @dev - the device that wants dca service + */ +int dca_add_requester(struct device *dev) +{ + struct dca_provider *dca; + int err, slot = -ENODEV; + unsigned long flags; + struct pci_bus *pci_rc; + struct dca_domain *domain; + + if (!dev) + return -EFAULT; + + raw_spin_lock_irqsave(&dca_lock, flags); + + /* check if the requester has not been added already */ + dca = dca_find_provider_by_dev(dev); + if (dca) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + return -EEXIST; + } + + pci_rc = dca_pci_rc_from_dev(dev); + domain = dca_find_domain(pci_rc); + if (!domain) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + return -ENODEV; + } + + list_for_each_entry(dca, &domain->dca_providers, node) { + slot = dca->ops->add_requester(dca, dev); + if (slot >= 0) + break; + } + + raw_spin_unlock_irqrestore(&dca_lock, flags); + + if (slot < 0) + return slot; + + err = dca_sysfs_add_req(dca, dev, slot); + if (err) { + raw_spin_lock_irqsave(&dca_lock, flags); + if (dca == dca_find_provider_by_dev(dev)) + dca->ops->remove_requester(dca, dev); + raw_spin_unlock_irqrestore(&dca_lock, flags); + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(dca_add_requester); + +/** + * dca_remove_requester - remove a dca client from the list + * @dev - the device that wants dca service + */ +int dca_remove_requester(struct device *dev) +{ + struct dca_provider *dca; + int slot; + unsigned long flags; + + if (!dev) + return -EFAULT; + + raw_spin_lock_irqsave(&dca_lock, flags); + dca = dca_find_provider_by_dev(dev); + if (!dca) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + return -ENODEV; + } + slot = dca->ops->remove_requester(dca, dev); + raw_spin_unlock_irqrestore(&dca_lock, flags); + + if (slot < 0) + return slot; + + dca_sysfs_remove_req(dca, slot); + + return 0; +} +EXPORT_SYMBOL_GPL(dca_remove_requester); + +/** + * dca_common_get_tag - return the dca tag (serves both new and old api) + * @dev - the device that wants dca service + * @cpu - the cpuid as returned by get_cpu() + */ +u8 dca_common_get_tag(struct device *dev, int cpu) +{ + struct dca_provider *dca; + u8 tag; + unsigned long flags; + + raw_spin_lock_irqsave(&dca_lock, flags); + + dca = dca_find_provider_by_dev(dev); + if (!dca) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + return -ENODEV; + } + tag = dca->ops->get_tag(dca, dev, cpu); + + raw_spin_unlock_irqrestore(&dca_lock, flags); + return tag; +} + +/** + * dca3_get_tag - return the dca tag to the requester device + * for the given cpu (new api) + * @dev - the device that wants dca service + * @cpu - the cpuid as returned by get_cpu() + */ +u8 dca3_get_tag(struct device *dev, int cpu) +{ + if (!dev) + return -EFAULT; + + return dca_common_get_tag(dev, cpu); +} +EXPORT_SYMBOL_GPL(dca3_get_tag); + +/** + * dca_get_tag - return the dca tag for the given cpu (old api) + * @cpu - the cpuid as returned by get_cpu() + */ +u8 dca_get_tag(int cpu) +{ + struct device *dev = NULL; + + return dca_common_get_tag(dev, cpu); +} +EXPORT_SYMBOL_GPL(dca_get_tag); + +/** + * alloc_dca_provider - get data struct for describing a dca provider + * @ops - pointer to struct of dca operation function pointers + * @priv_size - size of extra mem to be added for provider's needs + */ +struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size) +{ + struct dca_provider *dca; + int alloc_size; + + alloc_size = (sizeof(*dca) + priv_size); + dca = kzalloc(alloc_size, GFP_KERNEL); + if (!dca) + return NULL; + dca->ops = ops; + + return dca; +} +EXPORT_SYMBOL_GPL(alloc_dca_provider); + +/** + * free_dca_provider - release the dca provider data struct + * @ops - pointer to struct of dca operation function pointers + * @priv_size - size of extra mem to be added for provider's needs + */ +void free_dca_provider(struct dca_provider *dca) +{ + kfree(dca); +} +EXPORT_SYMBOL_GPL(free_dca_provider); + +/** + * register_dca_provider - register a dca provider + * @dca - struct created by alloc_dca_provider() + * @dev - device providing dca services + */ +int register_dca_provider(struct dca_provider *dca, struct device *dev) +{ + int err; + unsigned long flags; + struct dca_domain *domain, *newdomain = NULL; + + raw_spin_lock_irqsave(&dca_lock, flags); + if (dca_providers_blocked) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + return -ENODEV; + } + raw_spin_unlock_irqrestore(&dca_lock, flags); + + err = dca_sysfs_add_provider(dca, dev); + if (err) + return err; + + raw_spin_lock_irqsave(&dca_lock, flags); + domain = dca_get_domain(dev); + if (!domain) { + struct pci_bus *rc; + + if (dca_providers_blocked) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + dca_sysfs_remove_provider(dca); + unregister_dca_providers(); + return -ENODEV; + } + + raw_spin_unlock_irqrestore(&dca_lock, flags); + rc = dca_pci_rc_from_dev(dev); + newdomain = dca_allocate_domain(rc); + if (!newdomain) + return -ENODEV; + raw_spin_lock_irqsave(&dca_lock, flags); + /* Recheck, we might have raced after dropping the lock */ + domain = dca_get_domain(dev); + if (!domain) { + domain = newdomain; + newdomain = NULL; + list_add(&domain->node, &dca_domains); + } + } + list_add(&dca->node, &domain->dca_providers); + raw_spin_unlock_irqrestore(&dca_lock, flags); + + blocking_notifier_call_chain(&dca_provider_chain, + DCA_PROVIDER_ADD, NULL); + kfree(newdomain); + return 0; +} +EXPORT_SYMBOL_GPL(register_dca_provider); + +/** + * unregister_dca_provider - remove a dca provider + * @dca - struct created by alloc_dca_provider() + */ +void unregister_dca_provider(struct dca_provider *dca, struct device *dev) +{ + unsigned long flags; + struct pci_bus *pci_rc; + struct dca_domain *domain; + + blocking_notifier_call_chain(&dca_provider_chain, + DCA_PROVIDER_REMOVE, NULL); + + raw_spin_lock_irqsave(&dca_lock, flags); + + if (list_empty(&dca_domains)) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + return; + } + + list_del(&dca->node); + + pci_rc = dca_pci_rc_from_dev(dev); + domain = dca_find_domain(pci_rc); + if (list_empty(&domain->dca_providers)) + dca_free_domain(domain); + + raw_spin_unlock_irqrestore(&dca_lock, flags); + + dca_sysfs_remove_provider(dca); +} +EXPORT_SYMBOL_GPL(unregister_dca_provider); + +/** + * dca_register_notify - register a client's notifier callback + */ +void dca_register_notify(struct notifier_block *nb) +{ + blocking_notifier_chain_register(&dca_provider_chain, nb); +} +EXPORT_SYMBOL_GPL(dca_register_notify); + +/** + * dca_unregister_notify - remove a client's notifier callback + */ +void dca_unregister_notify(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&dca_provider_chain, nb); +} +EXPORT_SYMBOL_GPL(dca_unregister_notify); + +static int __init dca_init(void) +{ + pr_info("dca service started, version %s\n", DCA_VERSION); + return dca_sysfs_init(); +} + +static void __exit dca_exit(void) +{ + dca_sysfs_exit(); +} + +arch_initcall(dca_init); +module_exit(dca_exit); +