X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?p=kvmfornfv.git;a=blobdiff_plain;f=kernel%2Fdrivers%2Fpci%2Fmsi.c;h=7eaa4c87fec71c8dd792ccffc919b322711d91dd;hp=c3e7dfcf9ff53b851a8dff2979b3bddc9c2905a3;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hpb=f93b97fd65072de626c074dbe099a1fff05ce060 diff --git a/kernel/drivers/pci/msi.c b/kernel/drivers/pci/msi.c index c3e7dfcf9..7eaa4c87f 100644 --- a/kernel/drivers/pci/msi.c +++ b/kernel/drivers/pci/msi.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "pci.h" @@ -39,14 +40,13 @@ struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev) static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev) { - struct irq_domain *domain = NULL; + struct irq_domain *domain; - if (dev->bus->msi) - domain = dev->bus->msi->domain; - if (!domain) - domain = arch_get_pci_msi_domain(dev); + domain = dev_get_msi_domain(&dev->dev); + if (domain) + return domain; - return domain; + return arch_get_pci_msi_domain(dev); } static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) @@ -54,7 +54,7 @@ static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) struct irq_domain *domain; domain = pci_msi_get_domain(dev); - if (domain) + if (domain && irq_domain_is_hierarchy(domain)) return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); return arch_setup_msi_irqs(dev, nvec, type); @@ -65,7 +65,7 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) struct irq_domain *domain; domain = pci_msi_get_domain(dev); - if (domain) + if (domain && irq_domain_is_hierarchy(domain)) pci_msi_domain_free_irqs(domain, dev); else arch_teardown_msi_irqs(dev); @@ -77,24 +77,9 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) /* Arch hooks */ -struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev) -{ - return NULL; -} - -static struct msi_controller *pci_msi_controller(struct pci_dev *dev) -{ - struct msi_controller *msi_ctrl = dev->bus->msi; - - if (msi_ctrl) - return msi_ctrl; - - return pcibios_msi_controller(dev); -} - int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) { - struct msi_controller *chip = pci_msi_controller(dev); + struct msi_controller *chip = dev->bus->msi; int err; if (!chip || !chip->setup_irq) @@ -121,9 +106,12 @@ void __weak arch_teardown_msi_irq(unsigned int irq) int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { + struct msi_controller *chip = dev->bus->msi; struct msi_desc *entry; int ret; + if (chip && chip->setup_irqs) + return chip->setup_irqs(chip, dev, nvec, type); /* * If an architecture wants to support multiple MSI, it needs to * override arch_setup_msi_irqs() @@ -131,7 +119,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; - list_for_each_entry(entry, &dev->msi_list, list) { + for_each_pci_msi_entry(entry, dev) { ret = arch_setup_msi_irq(dev, entry); if (ret < 0) return ret; @@ -151,7 +139,7 @@ void default_teardown_msi_irqs(struct pci_dev *dev) int i; struct msi_desc *entry; - list_for_each_entry(entry, &dev->msi_list, list) + for_each_pci_msi_entry(entry, dev) if (entry->irq) for (i = 0; i < entry->nvec_used; i++) arch_teardown_msi_irq(entry->irq + i); @@ -168,7 +156,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) entry = NULL; if (dev->msix_enabled) { - list_for_each_entry(entry, &dev->msi_list, list) { + for_each_pci_msi_entry(entry, dev) { if (irq == entry->irq) break; } @@ -185,27 +173,6 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev) return default_restore_msi_irqs(dev); } -static void msi_set_enable(struct pci_dev *dev, int enable) -{ - u16 control; - - pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); - control &= ~PCI_MSI_FLAGS_ENABLE; - if (enable) - control |= PCI_MSI_FLAGS_ENABLE; - pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); -} - -static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) -{ - u16 ctrl; - - pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); - ctrl &= ~clear; - ctrl |= set; - pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); -} - static inline __attribute_const__ u32 msi_mask(unsigned x) { /* Don't shift by >= width of type */ @@ -229,7 +196,8 @@ u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) mask_bits &= ~mask; mask_bits |= flag; - pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); + pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, + mask_bits); return mask_bits; } @@ -270,7 +238,7 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag) static void msi_set_mask_bit(struct irq_data *data, u32 flag) { - struct msi_desc *desc = irq_data_get_msi(data); + struct msi_desc *desc = irq_data_get_msi_desc(data); if (desc->msi_attrib.is_msix) { msix_mask_irq(desc, flag); @@ -303,13 +271,15 @@ void default_restore_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry; - list_for_each_entry(entry, &dev->msi_list, list) + for_each_pci_msi_entry(entry, dev) default_restore_msi_irq(dev, entry->irq); } void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - BUG_ON(entry->dev->current_state != PCI_D0); + struct pci_dev *dev = msi_desc_to_pci_dev(entry); + + BUG_ON(dev->current_state != PCI_D0); if (entry->msi_attrib.is_msix) { void __iomem *base = entry->mask_base + @@ -319,7 +289,6 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); msg->data = readl(base + PCI_MSIX_ENTRY_DATA); } else { - struct pci_dev *dev = entry->dev; int pos = dev->msi_cap; u16 data; @@ -339,7 +308,9 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - if (entry->dev->current_state != PCI_D0) { + struct pci_dev *dev = msi_desc_to_pci_dev(entry); + + if (dev->current_state != PCI_D0) { /* Don't touch the hardware now */ } else if (entry->msi_attrib.is_msix) { void __iomem *base; @@ -350,7 +321,6 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); writel(msg->data, base + PCI_MSIX_ENTRY_DATA); } else { - struct pci_dev *dev = entry->dev; int pos = dev->msi_cap; u16 msgctl; @@ -384,21 +354,22 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg); static void free_msi_irqs(struct pci_dev *dev) { + struct list_head *msi_list = dev_to_msi_list(&dev->dev); struct msi_desc *entry, *tmp; struct attribute **msi_attrs; struct device_attribute *dev_attr; int i, count = 0; - list_for_each_entry(entry, &dev->msi_list, list) + for_each_pci_msi_entry(entry, dev) if (entry->irq) for (i = 0; i < entry->nvec_used; i++) BUG_ON(irq_has_action(entry->irq + i)); pci_msi_teardown_msi_irqs(dev); - list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { + list_for_each_entry_safe(entry, tmp, msi_list, list) { if (entry->msi_attrib.is_msix) { - if (list_is_last(&entry->list, &dev->msi_list)) + if (list_is_last(&entry->list, msi_list)) iounmap(entry->mask_base); } @@ -423,18 +394,6 @@ static void free_msi_irqs(struct pci_dev *dev) } } -static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) -{ - struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); - if (!desc) - return NULL; - - INIT_LIST_HEAD(&desc->list); - desc->dev = dev; - - return desc; -} - static void pci_intx_for_msi(struct pci_dev *dev, int enable) { if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) @@ -452,7 +411,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) entry = irq_get_msi_desc(dev->irq); pci_intx_for_msi(dev, 0); - msi_set_enable(dev, 0); + pci_msi_set_enable(dev, 0); arch_restore_msi_irqs(dev); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); @@ -469,18 +428,18 @@ static void __pci_restore_msix_state(struct pci_dev *dev) if (!dev->msix_enabled) return; - BUG_ON(list_empty(&dev->msi_list)); + BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); /* route the table */ pci_intx_for_msi(dev, 0); - msix_clear_and_set_ctrl(dev, 0, + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); arch_restore_msi_irqs(dev); - list_for_each_entry(entry, &dev->msi_list, list) + for_each_pci_msi_entry(entry, dev) msix_mask_irq(entry, entry->masked); - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); } void pci_restore_msi_state(struct pci_dev *dev) @@ -520,10 +479,11 @@ static int populate_msi_sysfs(struct pci_dev *pdev) int ret = -ENOMEM; int num_msi = 0; int count = 0; + int i; /* Determine how many msi entries we have */ - list_for_each_entry(entry, &pdev->msi_list, list) - ++num_msi; + for_each_pci_msi_entry(entry, pdev) + num_msi += entry->nvec_used; if (!num_msi) return 0; @@ -531,20 +491,22 @@ static int populate_msi_sysfs(struct pci_dev *pdev) msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); if (!msi_attrs) return -ENOMEM; - list_for_each_entry(entry, &pdev->msi_list, list) { - msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); - if (!msi_dev_attr) - goto error_attrs; - msi_attrs[count] = &msi_dev_attr->attr; - - sysfs_attr_init(&msi_dev_attr->attr); - msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", - entry->irq); - if (!msi_dev_attr->attr.name) - goto error_attrs; - msi_dev_attr->attr.mode = S_IRUGO; - msi_dev_attr->show = msi_mode_show; - ++count; + for_each_pci_msi_entry(entry, pdev) { + for (i = 0; i < entry->nvec_used; i++) { + msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); + if (!msi_dev_attr) + goto error_attrs; + msi_attrs[count] = &msi_dev_attr->attr; + + sysfs_attr_init(&msi_dev_attr->attr); + msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", + entry->irq + i); + if (!msi_dev_attr->attr.name) + goto error_attrs; + msi_dev_attr->attr.mode = S_IRUGO; + msi_dev_attr->show = msi_mode_show; + ++count; + } } msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL); @@ -589,7 +551,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) struct msi_desc *entry; /* MSI Entry Initialization */ - entry = alloc_msi_entry(dev); + entry = alloc_msi_entry(&dev->dev); if (!entry) return NULL; @@ -620,7 +582,7 @@ static int msi_verify_entries(struct pci_dev *dev) { struct msi_desc *entry; - list_for_each_entry(entry, &dev->msi_list, list) { + for_each_pci_msi_entry(entry, dev) { if (!dev->no_64bit_msi || !entry->msg.address_hi) continue; dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" @@ -647,7 +609,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) int ret; unsigned mask; - msi_set_enable(dev, 0); /* Disable MSI during set up */ + pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ entry = msi_setup_entry(dev, nvec); if (!entry) @@ -657,7 +619,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) mask = msi_mask(entry->msi_attrib.multi_cap); msi_mask_irq(entry, mask, mask); - list_add_tail(&entry->list, &dev->msi_list); + list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); /* Configure MSI capability structure */ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); @@ -683,9 +645,10 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) /* Set MSI enabled bits */ pci_intx_for_msi(dev, 0); - msi_set_enable(dev, 1); + pci_msi_set_enable(dev, 1); dev->msi_enabled = 1; + pcibios_free_irq(dev); dev->irq = entry->irq; return 0; } @@ -717,7 +680,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, int i; for (i = 0; i < nvec; i++) { - entry = alloc_msi_entry(dev); + entry = alloc_msi_entry(&dev->dev); if (!entry) { if (!i) iounmap(base); @@ -734,7 +697,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->mask_base = base; entry->nvec_used = 1; - list_add_tail(&entry->list, &dev->msi_list); + list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); } return 0; @@ -746,7 +709,7 @@ static void msix_program_entries(struct pci_dev *dev, struct msi_desc *entry; int i = 0; - list_for_each_entry(entry, &dev->msi_list, list) { + for_each_pci_msi_entry(entry, dev) { int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; @@ -775,7 +738,7 @@ static int msix_capability_init(struct pci_dev *dev, void __iomem *base; /* Ensure MSI-X is disabled while it is set up */ - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); /* Request & Map MSI-X table region */ @@ -801,7 +764,7 @@ static int msix_capability_init(struct pci_dev *dev, * MSI-X registers. We need to mask all the vectors to prevent * interrupts coming in before they're fully set up. */ - msix_clear_and_set_ctrl(dev, 0, + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); msix_program_entries(dev, entries); @@ -813,9 +776,9 @@ static int msix_capability_init(struct pci_dev *dev, /* Set MSI-X enabled bits and unmask the function */ pci_intx_for_msi(dev, 0); dev->msix_enabled = 1; + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); - + pcibios_free_irq(dev); return 0; out_avail: @@ -827,7 +790,7 @@ out_avail: struct msi_desc *entry; int avail = 0; - list_for_each_entry(entry, &dev->msi_list, list) { + for_each_pci_msi_entry(entry, dev) { if (entry->irq != 0) avail++; } @@ -916,10 +879,10 @@ void pci_msi_shutdown(struct pci_dev *dev) if (!pci_msi_enable || !dev || !dev->msi_enabled) return; - BUG_ON(list_empty(&dev->msi_list)); - desc = list_first_entry(&dev->msi_list, struct msi_desc, list); + BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); + desc = first_pci_msi_entry(dev); - msi_set_enable(dev, 0); + pci_msi_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; @@ -930,6 +893,7 @@ void pci_msi_shutdown(struct pci_dev *dev) /* Restore dev->irq to its default pin-assertion irq */ dev->irq = desc->msi_attrib.default_irq; + pcibios_alloc_irq(dev); } void pci_disable_msi(struct pci_dev *dev) @@ -1022,14 +986,15 @@ void pci_msix_shutdown(struct pci_dev *dev) return; /* Return the device with MSI-X masked as initial states */ - list_for_each_entry(entry, &dev->msi_list, list) { + for_each_pci_msi_entry(entry, dev) { /* Keep cached states to be restored */ __pci_msix_desc_mask_irq(entry, 1); } - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; + pcibios_alloc_irq(dev); } void pci_disable_msix(struct pci_dev *dev) @@ -1061,19 +1026,6 @@ EXPORT_SYMBOL(pci_msi_enabled); void pci_msi_init_pci_dev(struct pci_dev *dev) { - INIT_LIST_HEAD(&dev->msi_list); - - /* Disable the msi hardware to avoid screaming interrupts - * during boot. This is the power on reset default so - * usually this should be a noop. - */ - dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); - if (dev->msi_cap) - msi_set_enable(dev, 0); - - dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); - if (dev->msix_cap) - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); } /** @@ -1170,6 +1122,19 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, } EXPORT_SYMBOL(pci_enable_msix_range); +struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) +{ + return to_pci_dev(desc->dev); +} + +void *msi_desc_to_pci_sysdata(struct msi_desc *desc) +{ + struct pci_dev *dev = msi_desc_to_pci_dev(desc); + + return dev->bus->sysdata; +} +EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata); + #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN /** * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space @@ -1178,7 +1143,7 @@ EXPORT_SYMBOL(pci_enable_msix_range); */ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) { - struct msi_desc *desc = irq_data->msi_desc; + struct msi_desc *desc = irq_data_get_msi_desc(irq_data); /* * For MSI-X desc->irq is always equal to irq_data->irq. For @@ -1285,11 +1250,15 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) BUG_ON(!chip); if (!chip->irq_write_msi_msg) chip->irq_write_msi_msg = pci_msi_domain_write_msg; + if (!chip->irq_mask) + chip->irq_mask = pci_msi_mask_irq; + if (!chip->irq_unmask) + chip->irq_unmask = pci_msi_unmask_irq; } /** - * pci_msi_create_irq_domain - Creat a MSI interrupt domain - * @node: Optional device-tree node of the interrupt controller + * pci_msi_create_irq_domain - Create a MSI interrupt domain + * @fwnode: Optional fwnode of the interrupt controller * @info: MSI domain info * @parent: Parent irq domain * @@ -1298,16 +1267,23 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) * Returns: * A domain pointer or NULL in case of failure. */ -struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, +struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent) { + struct irq_domain *domain; + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) pci_msi_domain_update_dom_ops(info); if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) pci_msi_domain_update_chip_ops(info); - return msi_create_irq_domain(node, info, parent); + domain = msi_create_irq_domain(fwnode, info, parent); + if (!domain) + return NULL; + + domain->bus_token = DOMAIN_BUS_PCI_MSI; + return domain; } /** @@ -1338,14 +1314,14 @@ void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) /** * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain - * @node: Optional device-tree node of the interrupt controller + * @fwnode: Optional fwnode of the interrupt controller * @info: MSI domain info * @parent: Parent irq domain * * Returns: A domain pointer or NULL in case of failure. If successful * the default PCI/MSI irqdomain pointer is updated. */ -struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, +struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent) { struct irq_domain *domain; @@ -1355,11 +1331,59 @@ struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); domain = NULL; } else { - domain = pci_msi_create_irq_domain(node, info, parent); + domain = pci_msi_create_irq_domain(fwnode, info, parent); pci_msi_default_domain = domain; } mutex_unlock(&pci_msi_domain_lock); return domain; } + +static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) +{ + u32 *pa = data; + + *pa = alias; + return 0; +} +/** + * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) + * @domain: The interrupt domain + * @pdev: The PCI device. + * + * The RID for a device is formed from the alias, with a firmware + * supplied mapping applied + * + * Returns: The RID. + */ +u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) +{ + struct device_node *of_node; + u32 rid = 0; + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + + of_node = irq_domain_get_of_node(domain); + if (of_node) + rid = of_msi_map_rid(&pdev->dev, of_node, rid); + + return rid; +} + +/** + * pci_msi_get_device_domain - Get the MSI domain for a given PCI device + * @pdev: The PCI device + * + * Use the firmware data to find a device-specific MSI domain + * (i.e. not one that is ste as a default). + * + * Returns: The coresponding MSI domain or NULL if none has been found. + */ +struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) +{ + u32 rid = 0; + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + return of_msi_map_get_device_domain(&pdev->dev, rid); +} #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */