5ecfaf29933ad4634e2124544e3c800b9b309d44
[kvmfornfv.git] / kernel / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  */
19
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <linux/dma-contiguous.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
46
47 #include "irq_remapping.h"
48
49 #define ROOT_SIZE               VTD_PAGE_SIZE
50 #define CONTEXT_SIZE            VTD_PAGE_SIZE
51
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
54 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
55 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56
57 #define IOAPIC_RANGE_START      (0xfee00000)
58 #define IOAPIC_RANGE_END        (0xfeefffff)
59 #define IOVA_START_ADDR         (0x1000)
60
61 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
63 #define MAX_AGAW_WIDTH 64
64 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
65
66 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
72                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
74
75 /* IO virtual address start page frame number */
76 #define IOVA_START_PFN          (1)
77
78 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
79 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
80 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
81
82 /* page table handling */
83 #define LEVEL_STRIDE            (9)
84 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
85
86 /*
87  * This bitmap is used to advertise the page sizes our hardware support
88  * to the IOMMU core, which will then use this information to split
89  * physically contiguous memory regions it is mapping into page sizes
90  * that we support.
91  *
92  * Traditionally the IOMMU core just handed us the mappings directly,
93  * after making sure the size is an order of a 4KiB page and that the
94  * mapping has natural alignment.
95  *
96  * To retain this behavior, we currently advertise that we support
97  * all page sizes that are an order of 4KiB.
98  *
99  * If at some point we'd like to utilize the IOMMU core's new behavior,
100  * we could change this to advertise the real page sizes we support.
101  */
102 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
103
104 static inline int agaw_to_level(int agaw)
105 {
106         return agaw + 2;
107 }
108
109 static inline int agaw_to_width(int agaw)
110 {
111         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
112 }
113
114 static inline int width_to_agaw(int width)
115 {
116         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
117 }
118
119 static inline unsigned int level_to_offset_bits(int level)
120 {
121         return (level - 1) * LEVEL_STRIDE;
122 }
123
124 static inline int pfn_level_offset(unsigned long pfn, int level)
125 {
126         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
127 }
128
129 static inline unsigned long level_mask(int level)
130 {
131         return -1UL << level_to_offset_bits(level);
132 }
133
134 static inline unsigned long level_size(int level)
135 {
136         return 1UL << level_to_offset_bits(level);
137 }
138
139 static inline unsigned long align_to_level(unsigned long pfn, int level)
140 {
141         return (pfn + level_size(level) - 1) & level_mask(level);
142 }
143
144 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
145 {
146         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
147 }
148
149 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
150    are never going to work. */
151 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
152 {
153         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
154 }
155
156 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
157 {
158         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
159 }
160 static inline unsigned long page_to_dma_pfn(struct page *pg)
161 {
162         return mm_to_dma_pfn(page_to_pfn(pg));
163 }
164 static inline unsigned long virt_to_dma_pfn(void *p)
165 {
166         return page_to_dma_pfn(virt_to_page(p));
167 }
168
169 /* global iommu list, set NULL for ignored DMAR units */
170 static struct intel_iommu **g_iommus;
171
172 static void __init check_tylersburg_isoch(void);
173 static int rwbf_quirk;
174
175 /*
176  * set to 1 to panic kernel if can't successfully enable VT-d
177  * (used when kernel is launched w/ TXT)
178  */
179 static int force_on = 0;
180
181 /*
182  * 0: Present
183  * 1-11: Reserved
184  * 12-63: Context Ptr (12 - (haw-1))
185  * 64-127: Reserved
186  */
187 struct root_entry {
188         u64     lo;
189         u64     hi;
190 };
191 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
192
193
194 /*
195  * low 64 bits:
196  * 0: present
197  * 1: fault processing disable
198  * 2-3: translation type
199  * 12-63: address space root
200  * high 64 bits:
201  * 0-2: address width
202  * 3-6: aval
203  * 8-23: domain id
204  */
205 struct context_entry {
206         u64 lo;
207         u64 hi;
208 };
209
210 static inline bool context_present(struct context_entry *context)
211 {
212         return (context->lo & 1);
213 }
214 static inline void context_set_present(struct context_entry *context)
215 {
216         context->lo |= 1;
217 }
218
219 static inline void context_set_fault_enable(struct context_entry *context)
220 {
221         context->lo &= (((u64)-1) << 2) | 1;
222 }
223
224 static inline void context_set_translation_type(struct context_entry *context,
225                                                 unsigned long value)
226 {
227         context->lo &= (((u64)-1) << 4) | 3;
228         context->lo |= (value & 3) << 2;
229 }
230
231 static inline void context_set_address_root(struct context_entry *context,
232                                             unsigned long value)
233 {
234         context->lo &= ~VTD_PAGE_MASK;
235         context->lo |= value & VTD_PAGE_MASK;
236 }
237
238 static inline void context_set_address_width(struct context_entry *context,
239                                              unsigned long value)
240 {
241         context->hi |= value & 7;
242 }
243
244 static inline void context_set_domain_id(struct context_entry *context,
245                                          unsigned long value)
246 {
247         context->hi |= (value & ((1 << 16) - 1)) << 8;
248 }
249
250 static inline void context_clear_entry(struct context_entry *context)
251 {
252         context->lo = 0;
253         context->hi = 0;
254 }
255
256 /*
257  * 0: readable
258  * 1: writable
259  * 2-6: reserved
260  * 7: super page
261  * 8-10: available
262  * 11: snoop behavior
263  * 12-63: Host physcial address
264  */
265 struct dma_pte {
266         u64 val;
267 };
268
269 static inline void dma_clear_pte(struct dma_pte *pte)
270 {
271         pte->val = 0;
272 }
273
274 static inline u64 dma_pte_addr(struct dma_pte *pte)
275 {
276 #ifdef CONFIG_64BIT
277         return pte->val & VTD_PAGE_MASK;
278 #else
279         /* Must have a full atomic 64-bit read */
280         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
281 #endif
282 }
283
284 static inline bool dma_pte_present(struct dma_pte *pte)
285 {
286         return (pte->val & 3) != 0;
287 }
288
289 static inline bool dma_pte_superpage(struct dma_pte *pte)
290 {
291         return (pte->val & DMA_PTE_LARGE_PAGE);
292 }
293
294 static inline int first_pte_in_page(struct dma_pte *pte)
295 {
296         return !((unsigned long)pte & ~VTD_PAGE_MASK);
297 }
298
299 /*
300  * This domain is a statically identity mapping domain.
301  *      1. This domain creats a static 1:1 mapping to all usable memory.
302  *      2. It maps to each iommu if successful.
303  *      3. Each iommu mapps to this domain if successful.
304  */
305 static struct dmar_domain *si_domain;
306 static int hw_pass_through = 1;
307
308 /* domain represents a virtual machine, more than one devices
309  * across iommus may be owned in one domain, e.g. kvm guest.
310  */
311 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
312
313 /* si_domain contains mulitple devices */
314 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
315
316 struct dmar_domain {
317         int     id;                     /* domain id */
318         int     nid;                    /* node id */
319         DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
320                                         /* bitmap of iommus this domain uses*/
321
322         struct list_head devices;       /* all devices' list */
323         struct iova_domain iovad;       /* iova's that belong to this domain */
324
325         struct dma_pte  *pgd;           /* virtual address */
326         int             gaw;            /* max guest address width */
327
328         /* adjusted guest address width, 0 is level 2 30-bit */
329         int             agaw;
330
331         int             flags;          /* flags to find out type of domain */
332
333         int             iommu_coherency;/* indicate coherency of iommu access */
334         int             iommu_snooping; /* indicate snooping control feature*/
335         int             iommu_count;    /* reference count of iommu */
336         int             iommu_superpage;/* Level of superpages supported:
337                                            0 == 4KiB (no superpages), 1 == 2MiB,
338                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
339         spinlock_t      iommu_lock;     /* protect iommu set in domain */
340         u64             max_addr;       /* maximum mapped address */
341
342         struct iommu_domain domain;     /* generic domain data structure for
343                                            iommu core */
344 };
345
346 /* PCI domain-device relationship */
347 struct device_domain_info {
348         struct list_head link;  /* link to domain siblings */
349         struct list_head global; /* link to global list */
350         u8 bus;                 /* PCI bus number */
351         u8 devfn;               /* PCI devfn number */
352         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
353         struct intel_iommu *iommu; /* IOMMU used by this device */
354         struct dmar_domain *domain; /* pointer to domain */
355 };
356
357 struct dmar_rmrr_unit {
358         struct list_head list;          /* list of rmrr units   */
359         struct acpi_dmar_header *hdr;   /* ACPI header          */
360         u64     base_address;           /* reserved base address*/
361         u64     end_address;            /* reserved end address */
362         struct dmar_dev_scope *devices; /* target devices */
363         int     devices_cnt;            /* target device count */
364 };
365
366 struct dmar_atsr_unit {
367         struct list_head list;          /* list of ATSR units */
368         struct acpi_dmar_header *hdr;   /* ACPI header */
369         struct dmar_dev_scope *devices; /* target devices */
370         int devices_cnt;                /* target device count */
371         u8 include_all:1;               /* include all ports */
372 };
373
374 static LIST_HEAD(dmar_atsr_units);
375 static LIST_HEAD(dmar_rmrr_units);
376
377 #define for_each_rmrr_units(rmrr) \
378         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
379
380 static void flush_unmaps_timeout(unsigned long data);
381
382 static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
383
384 #define HIGH_WATER_MARK 250
385 struct deferred_flush_tables {
386         int next;
387         struct iova *iova[HIGH_WATER_MARK];
388         struct dmar_domain *domain[HIGH_WATER_MARK];
389         struct page *freelist[HIGH_WATER_MARK];
390 };
391
392 static struct deferred_flush_tables *deferred_flush;
393
394 /* bitmap for indexing intel_iommus */
395 static int g_num_of_iommus;
396
397 static DEFINE_SPINLOCK(async_umap_flush_lock);
398 static LIST_HEAD(unmaps_to_do);
399
400 static int timer_on;
401 static long list_size;
402
403 static void domain_exit(struct dmar_domain *domain);
404 static void domain_remove_dev_info(struct dmar_domain *domain);
405 static void domain_remove_one_dev_info(struct dmar_domain *domain,
406                                        struct device *dev);
407 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
408                                            struct device *dev);
409 static int domain_detach_iommu(struct dmar_domain *domain,
410                                struct intel_iommu *iommu);
411
412 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
413 int dmar_disabled = 0;
414 #else
415 int dmar_disabled = 1;
416 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
417
418 int intel_iommu_enabled = 0;
419 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
420
421 static int dmar_map_gfx = 1;
422 static int dmar_forcedac;
423 static int intel_iommu_strict;
424 static int intel_iommu_superpage = 1;
425 static int intel_iommu_ecs = 1;
426
427 /* We only actually use ECS when PASID support (on the new bit 40)
428  * is also advertised. Some early implementations — the ones with
429  * PASID support on bit 28 — have issues even when we *only* use
430  * extended root/context tables. */
431 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
432                             ecap_pasid(iommu->ecap))
433
434 int intel_iommu_gfx_mapped;
435 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
436
437 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
438 static DEFINE_SPINLOCK(device_domain_lock);
439 static LIST_HEAD(device_domain_list);
440
441 static const struct iommu_ops intel_iommu_ops;
442
443 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
444 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
445 {
446         return container_of(dom, struct dmar_domain, domain);
447 }
448
449 static int __init intel_iommu_setup(char *str)
450 {
451         if (!str)
452                 return -EINVAL;
453         while (*str) {
454                 if (!strncmp(str, "on", 2)) {
455                         dmar_disabled = 0;
456                         printk(KERN_INFO "Intel-IOMMU: enabled\n");
457                 } else if (!strncmp(str, "off", 3)) {
458                         dmar_disabled = 1;
459                         printk(KERN_INFO "Intel-IOMMU: disabled\n");
460                 } else if (!strncmp(str, "igfx_off", 8)) {
461                         dmar_map_gfx = 0;
462                         printk(KERN_INFO
463                                 "Intel-IOMMU: disable GFX device mapping\n");
464                 } else if (!strncmp(str, "forcedac", 8)) {
465                         printk(KERN_INFO
466                                 "Intel-IOMMU: Forcing DAC for PCI devices\n");
467                         dmar_forcedac = 1;
468                 } else if (!strncmp(str, "strict", 6)) {
469                         printk(KERN_INFO
470                                 "Intel-IOMMU: disable batched IOTLB flush\n");
471                         intel_iommu_strict = 1;
472                 } else if (!strncmp(str, "sp_off", 6)) {
473                         printk(KERN_INFO
474                                 "Intel-IOMMU: disable supported super page\n");
475                         intel_iommu_superpage = 0;
476                 } else if (!strncmp(str, "ecs_off", 7)) {
477                         printk(KERN_INFO
478                                 "Intel-IOMMU: disable extended context table support\n");
479                         intel_iommu_ecs = 0;
480                 }
481
482                 str += strcspn(str, ",");
483                 while (*str == ',')
484                         str++;
485         }
486         return 0;
487 }
488 __setup("intel_iommu=", intel_iommu_setup);
489
490 static struct kmem_cache *iommu_domain_cache;
491 static struct kmem_cache *iommu_devinfo_cache;
492
493 static inline void *alloc_pgtable_page(int node)
494 {
495         struct page *page;
496         void *vaddr = NULL;
497
498         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
499         if (page)
500                 vaddr = page_address(page);
501         return vaddr;
502 }
503
504 static inline void free_pgtable_page(void *vaddr)
505 {
506         free_page((unsigned long)vaddr);
507 }
508
509 static inline void *alloc_domain_mem(void)
510 {
511         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
512 }
513
514 static void free_domain_mem(void *vaddr)
515 {
516         kmem_cache_free(iommu_domain_cache, vaddr);
517 }
518
519 static inline void * alloc_devinfo_mem(void)
520 {
521         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
522 }
523
524 static inline void free_devinfo_mem(void *vaddr)
525 {
526         kmem_cache_free(iommu_devinfo_cache, vaddr);
527 }
528
529 static inline int domain_type_is_vm(struct dmar_domain *domain)
530 {
531         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
532 }
533
534 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
535 {
536         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
537                                 DOMAIN_FLAG_STATIC_IDENTITY);
538 }
539
540 static inline int domain_pfn_supported(struct dmar_domain *domain,
541                                        unsigned long pfn)
542 {
543         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
544
545         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
546 }
547
548 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
549 {
550         unsigned long sagaw;
551         int agaw = -1;
552
553         sagaw = cap_sagaw(iommu->cap);
554         for (agaw = width_to_agaw(max_gaw);
555              agaw >= 0; agaw--) {
556                 if (test_bit(agaw, &sagaw))
557                         break;
558         }
559
560         return agaw;
561 }
562
563 /*
564  * Calculate max SAGAW for each iommu.
565  */
566 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
567 {
568         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
569 }
570
571 /*
572  * calculate agaw for each iommu.
573  * "SAGAW" may be different across iommus, use a default agaw, and
574  * get a supported less agaw for iommus that don't support the default agaw.
575  */
576 int iommu_calculate_agaw(struct intel_iommu *iommu)
577 {
578         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
579 }
580
581 /* This functionin only returns single iommu in a domain */
582 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
583 {
584         int iommu_id;
585
586         /* si_domain and vm domain should not get here. */
587         BUG_ON(domain_type_is_vm_or_si(domain));
588         iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
589         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
590                 return NULL;
591
592         return g_iommus[iommu_id];
593 }
594
595 static void domain_update_iommu_coherency(struct dmar_domain *domain)
596 {
597         struct dmar_drhd_unit *drhd;
598         struct intel_iommu *iommu;
599         bool found = false;
600         int i;
601
602         domain->iommu_coherency = 1;
603
604         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
605                 found = true;
606                 if (!ecap_coherent(g_iommus[i]->ecap)) {
607                         domain->iommu_coherency = 0;
608                         break;
609                 }
610         }
611         if (found)
612                 return;
613
614         /* No hardware attached; use lowest common denominator */
615         rcu_read_lock();
616         for_each_active_iommu(iommu, drhd) {
617                 if (!ecap_coherent(iommu->ecap)) {
618                         domain->iommu_coherency = 0;
619                         break;
620                 }
621         }
622         rcu_read_unlock();
623 }
624
625 static int domain_update_iommu_snooping(struct intel_iommu *skip)
626 {
627         struct dmar_drhd_unit *drhd;
628         struct intel_iommu *iommu;
629         int ret = 1;
630
631         rcu_read_lock();
632         for_each_active_iommu(iommu, drhd) {
633                 if (iommu != skip) {
634                         if (!ecap_sc_support(iommu->ecap)) {
635                                 ret = 0;
636                                 break;
637                         }
638                 }
639         }
640         rcu_read_unlock();
641
642         return ret;
643 }
644
645 static int domain_update_iommu_superpage(struct intel_iommu *skip)
646 {
647         struct dmar_drhd_unit *drhd;
648         struct intel_iommu *iommu;
649         int mask = 0xf;
650
651         if (!intel_iommu_superpage) {
652                 return 0;
653         }
654
655         /* set iommu_superpage to the smallest common denominator */
656         rcu_read_lock();
657         for_each_active_iommu(iommu, drhd) {
658                 if (iommu != skip) {
659                         mask &= cap_super_page_val(iommu->cap);
660                         if (!mask)
661                                 break;
662                 }
663         }
664         rcu_read_unlock();
665
666         return fls(mask);
667 }
668
669 /* Some capabilities may be different across iommus */
670 static void domain_update_iommu_cap(struct dmar_domain *domain)
671 {
672         domain_update_iommu_coherency(domain);
673         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
674         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
675 }
676
677 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
678                                                        u8 bus, u8 devfn, int alloc)
679 {
680         struct root_entry *root = &iommu->root_entry[bus];
681         struct context_entry *context;
682         u64 *entry;
683
684         if (ecs_enabled(iommu)) {
685                 if (devfn >= 0x80) {
686                         devfn -= 0x80;
687                         entry = &root->hi;
688                 }
689                 devfn *= 2;
690         }
691         entry = &root->lo;
692         if (*entry & 1)
693                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
694         else {
695                 unsigned long phy_addr;
696                 if (!alloc)
697                         return NULL;
698
699                 context = alloc_pgtable_page(iommu->node);
700                 if (!context)
701                         return NULL;
702
703                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
704                 phy_addr = virt_to_phys((void *)context);
705                 *entry = phy_addr | 1;
706                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
707         }
708         return &context[devfn];
709 }
710
711 static int iommu_dummy(struct device *dev)
712 {
713         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
714 }
715
716 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
717 {
718         struct dmar_drhd_unit *drhd = NULL;
719         struct intel_iommu *iommu;
720         struct device *tmp;
721         struct pci_dev *ptmp, *pdev = NULL;
722         u16 segment = 0;
723         int i;
724
725         if (iommu_dummy(dev))
726                 return NULL;
727
728         if (dev_is_pci(dev)) {
729                 pdev = to_pci_dev(dev);
730                 segment = pci_domain_nr(pdev->bus);
731         } else if (has_acpi_companion(dev))
732                 dev = &ACPI_COMPANION(dev)->dev;
733
734         rcu_read_lock();
735         for_each_active_iommu(iommu, drhd) {
736                 if (pdev && segment != drhd->segment)
737                         continue;
738
739                 for_each_active_dev_scope(drhd->devices,
740                                           drhd->devices_cnt, i, tmp) {
741                         if (tmp == dev) {
742                                 *bus = drhd->devices[i].bus;
743                                 *devfn = drhd->devices[i].devfn;
744                                 goto out;
745                         }
746
747                         if (!pdev || !dev_is_pci(tmp))
748                                 continue;
749
750                         ptmp = to_pci_dev(tmp);
751                         if (ptmp->subordinate &&
752                             ptmp->subordinate->number <= pdev->bus->number &&
753                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
754                                 goto got_pdev;
755                 }
756
757                 if (pdev && drhd->include_all) {
758                 got_pdev:
759                         *bus = pdev->bus->number;
760                         *devfn = pdev->devfn;
761                         goto out;
762                 }
763         }
764         iommu = NULL;
765  out:
766         rcu_read_unlock();
767
768         return iommu;
769 }
770
771 static void domain_flush_cache(struct dmar_domain *domain,
772                                void *addr, int size)
773 {
774         if (!domain->iommu_coherency)
775                 clflush_cache_range(addr, size);
776 }
777
778 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
779 {
780         struct context_entry *context;
781         int ret = 0;
782         unsigned long flags;
783
784         spin_lock_irqsave(&iommu->lock, flags);
785         context = iommu_context_addr(iommu, bus, devfn, 0);
786         if (context)
787                 ret = context_present(context);
788         spin_unlock_irqrestore(&iommu->lock, flags);
789         return ret;
790 }
791
792 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
793 {
794         struct context_entry *context;
795         unsigned long flags;
796
797         spin_lock_irqsave(&iommu->lock, flags);
798         context = iommu_context_addr(iommu, bus, devfn, 0);
799         if (context) {
800                 context_clear_entry(context);
801                 __iommu_flush_cache(iommu, context, sizeof(*context));
802         }
803         spin_unlock_irqrestore(&iommu->lock, flags);
804 }
805
806 static void free_context_table(struct intel_iommu *iommu)
807 {
808         int i;
809         unsigned long flags;
810         struct context_entry *context;
811
812         spin_lock_irqsave(&iommu->lock, flags);
813         if (!iommu->root_entry) {
814                 goto out;
815         }
816         for (i = 0; i < ROOT_ENTRY_NR; i++) {
817                 context = iommu_context_addr(iommu, i, 0, 0);
818                 if (context)
819                         free_pgtable_page(context);
820
821                 if (!ecs_enabled(iommu))
822                         continue;
823
824                 context = iommu_context_addr(iommu, i, 0x80, 0);
825                 if (context)
826                         free_pgtable_page(context);
827
828         }
829         free_pgtable_page(iommu->root_entry);
830         iommu->root_entry = NULL;
831 out:
832         spin_unlock_irqrestore(&iommu->lock, flags);
833 }
834
835 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
836                                       unsigned long pfn, int *target_level)
837 {
838         struct dma_pte *parent, *pte = NULL;
839         int level = agaw_to_level(domain->agaw);
840         int offset;
841
842         BUG_ON(!domain->pgd);
843
844         if (!domain_pfn_supported(domain, pfn))
845                 /* Address beyond IOMMU's addressing capabilities. */
846                 return NULL;
847
848         parent = domain->pgd;
849
850         while (1) {
851                 void *tmp_page;
852
853                 offset = pfn_level_offset(pfn, level);
854                 pte = &parent[offset];
855                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
856                         break;
857                 if (level == *target_level)
858                         break;
859
860                 if (!dma_pte_present(pte)) {
861                         uint64_t pteval;
862
863                         tmp_page = alloc_pgtable_page(domain->nid);
864
865                         if (!tmp_page)
866                                 return NULL;
867
868                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
869                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
870                         if (cmpxchg64(&pte->val, 0ULL, pteval))
871                                 /* Someone else set it while we were thinking; use theirs. */
872                                 free_pgtable_page(tmp_page);
873                         else
874                                 domain_flush_cache(domain, pte, sizeof(*pte));
875                 }
876                 if (level == 1)
877                         break;
878
879                 parent = phys_to_virt(dma_pte_addr(pte));
880                 level--;
881         }
882
883         if (!*target_level)
884                 *target_level = level;
885
886         return pte;
887 }
888
889
890 /* return address's pte at specific level */
891 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
892                                          unsigned long pfn,
893                                          int level, int *large_page)
894 {
895         struct dma_pte *parent, *pte = NULL;
896         int total = agaw_to_level(domain->agaw);
897         int offset;
898
899         parent = domain->pgd;
900         while (level <= total) {
901                 offset = pfn_level_offset(pfn, total);
902                 pte = &parent[offset];
903                 if (level == total)
904                         return pte;
905
906                 if (!dma_pte_present(pte)) {
907                         *large_page = total;
908                         break;
909                 }
910
911                 if (dma_pte_superpage(pte)) {
912                         *large_page = total;
913                         return pte;
914                 }
915
916                 parent = phys_to_virt(dma_pte_addr(pte));
917                 total--;
918         }
919         return NULL;
920 }
921
922 /* clear last level pte, a tlb flush should be followed */
923 static void dma_pte_clear_range(struct dmar_domain *domain,
924                                 unsigned long start_pfn,
925                                 unsigned long last_pfn)
926 {
927         unsigned int large_page = 1;
928         struct dma_pte *first_pte, *pte;
929
930         BUG_ON(!domain_pfn_supported(domain, start_pfn));
931         BUG_ON(!domain_pfn_supported(domain, last_pfn));
932         BUG_ON(start_pfn > last_pfn);
933
934         /* we don't need lock here; nobody else touches the iova range */
935         do {
936                 large_page = 1;
937                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
938                 if (!pte) {
939                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
940                         continue;
941                 }
942                 do {
943                         dma_clear_pte(pte);
944                         start_pfn += lvl_to_nr_pages(large_page);
945                         pte++;
946                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
947
948                 domain_flush_cache(domain, first_pte,
949                                    (void *)pte - (void *)first_pte);
950
951         } while (start_pfn && start_pfn <= last_pfn);
952 }
953
954 static void dma_pte_free_level(struct dmar_domain *domain, int level,
955                                struct dma_pte *pte, unsigned long pfn,
956                                unsigned long start_pfn, unsigned long last_pfn)
957 {
958         pfn = max(start_pfn, pfn);
959         pte = &pte[pfn_level_offset(pfn, level)];
960
961         do {
962                 unsigned long level_pfn;
963                 struct dma_pte *level_pte;
964
965                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
966                         goto next;
967
968                 level_pfn = pfn & level_mask(level - 1);
969                 level_pte = phys_to_virt(dma_pte_addr(pte));
970
971                 if (level > 2)
972                         dma_pte_free_level(domain, level - 1, level_pte,
973                                            level_pfn, start_pfn, last_pfn);
974
975                 /* If range covers entire pagetable, free it */
976                 if (!(start_pfn > level_pfn ||
977                       last_pfn < level_pfn + level_size(level) - 1)) {
978                         dma_clear_pte(pte);
979                         domain_flush_cache(domain, pte, sizeof(*pte));
980                         free_pgtable_page(level_pte);
981                 }
982 next:
983                 pfn += level_size(level);
984         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
985 }
986
987 /* free page table pages. last level pte should already be cleared */
988 static void dma_pte_free_pagetable(struct dmar_domain *domain,
989                                    unsigned long start_pfn,
990                                    unsigned long last_pfn)
991 {
992         BUG_ON(!domain_pfn_supported(domain, start_pfn));
993         BUG_ON(!domain_pfn_supported(domain, last_pfn));
994         BUG_ON(start_pfn > last_pfn);
995
996         dma_pte_clear_range(domain, start_pfn, last_pfn);
997
998         /* We don't need lock here; nobody else touches the iova range */
999         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1000                            domain->pgd, 0, start_pfn, last_pfn);
1001
1002         /* free pgd */
1003         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1004                 free_pgtable_page(domain->pgd);
1005                 domain->pgd = NULL;
1006         }
1007 }
1008
1009 /* When a page at a given level is being unlinked from its parent, we don't
1010    need to *modify* it at all. All we need to do is make a list of all the
1011    pages which can be freed just as soon as we've flushed the IOTLB and we
1012    know the hardware page-walk will no longer touch them.
1013    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1014    be freed. */
1015 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1016                                             int level, struct dma_pte *pte,
1017                                             struct page *freelist)
1018 {
1019         struct page *pg;
1020
1021         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1022         pg->freelist = freelist;
1023         freelist = pg;
1024
1025         if (level == 1)
1026                 return freelist;
1027
1028         pte = page_address(pg);
1029         do {
1030                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1031                         freelist = dma_pte_list_pagetables(domain, level - 1,
1032                                                            pte, freelist);
1033                 pte++;
1034         } while (!first_pte_in_page(pte));
1035
1036         return freelist;
1037 }
1038
1039 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1040                                         struct dma_pte *pte, unsigned long pfn,
1041                                         unsigned long start_pfn,
1042                                         unsigned long last_pfn,
1043                                         struct page *freelist)
1044 {
1045         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1046
1047         pfn = max(start_pfn, pfn);
1048         pte = &pte[pfn_level_offset(pfn, level)];
1049
1050         do {
1051                 unsigned long level_pfn;
1052
1053                 if (!dma_pte_present(pte))
1054                         goto next;
1055
1056                 level_pfn = pfn & level_mask(level);
1057
1058                 /* If range covers entire pagetable, free it */
1059                 if (start_pfn <= level_pfn &&
1060                     last_pfn >= level_pfn + level_size(level) - 1) {
1061                         /* These suborbinate page tables are going away entirely. Don't
1062                            bother to clear them; we're just going to *free* them. */
1063                         if (level > 1 && !dma_pte_superpage(pte))
1064                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1065
1066                         dma_clear_pte(pte);
1067                         if (!first_pte)
1068                                 first_pte = pte;
1069                         last_pte = pte;
1070                 } else if (level > 1) {
1071                         /* Recurse down into a level that isn't *entirely* obsolete */
1072                         freelist = dma_pte_clear_level(domain, level - 1,
1073                                                        phys_to_virt(dma_pte_addr(pte)),
1074                                                        level_pfn, start_pfn, last_pfn,
1075                                                        freelist);
1076                 }
1077 next:
1078                 pfn += level_size(level);
1079         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1080
1081         if (first_pte)
1082                 domain_flush_cache(domain, first_pte,
1083                                    (void *)++last_pte - (void *)first_pte);
1084
1085         return freelist;
1086 }
1087
1088 /* We can't just free the pages because the IOMMU may still be walking
1089    the page tables, and may have cached the intermediate levels. The
1090    pages can only be freed after the IOTLB flush has been done. */
1091 struct page *domain_unmap(struct dmar_domain *domain,
1092                           unsigned long start_pfn,
1093                           unsigned long last_pfn)
1094 {
1095         struct page *freelist = NULL;
1096
1097         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1098         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1099         BUG_ON(start_pfn > last_pfn);
1100
1101         /* we don't need lock here; nobody else touches the iova range */
1102         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1103                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1104
1105         /* free pgd */
1106         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1107                 struct page *pgd_page = virt_to_page(domain->pgd);
1108                 pgd_page->freelist = freelist;
1109                 freelist = pgd_page;
1110
1111                 domain->pgd = NULL;
1112         }
1113
1114         return freelist;
1115 }
1116
1117 void dma_free_pagelist(struct page *freelist)
1118 {
1119         struct page *pg;
1120
1121         while ((pg = freelist)) {
1122                 freelist = pg->freelist;
1123                 free_pgtable_page(page_address(pg));
1124         }
1125 }
1126
1127 /* iommu handling */
1128 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1129 {
1130         struct root_entry *root;
1131         unsigned long flags;
1132
1133         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1134         if (!root) {
1135                 pr_err("IOMMU: allocating root entry for %s failed\n",
1136                         iommu->name);
1137                 return -ENOMEM;
1138         }
1139
1140         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1141
1142         spin_lock_irqsave(&iommu->lock, flags);
1143         iommu->root_entry = root;
1144         spin_unlock_irqrestore(&iommu->lock, flags);
1145
1146         return 0;
1147 }
1148
1149 static void iommu_set_root_entry(struct intel_iommu *iommu)
1150 {
1151         u64 addr;
1152         u32 sts;
1153         unsigned long flag;
1154
1155         addr = virt_to_phys(iommu->root_entry);
1156         if (ecs_enabled(iommu))
1157                 addr |= DMA_RTADDR_RTT;
1158
1159         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1160         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1161
1162         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1163
1164         /* Make sure hardware complete it */
1165         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1166                       readl, (sts & DMA_GSTS_RTPS), sts);
1167
1168         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1169 }
1170
1171 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1172 {
1173         u32 val;
1174         unsigned long flag;
1175
1176         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1177                 return;
1178
1179         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1180         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1181
1182         /* Make sure hardware complete it */
1183         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1184                       readl, (!(val & DMA_GSTS_WBFS)), val);
1185
1186         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1187 }
1188
1189 /* return value determine if we need a write buffer flush */
1190 static void __iommu_flush_context(struct intel_iommu *iommu,
1191                                   u16 did, u16 source_id, u8 function_mask,
1192                                   u64 type)
1193 {
1194         u64 val = 0;
1195         unsigned long flag;
1196
1197         switch (type) {
1198         case DMA_CCMD_GLOBAL_INVL:
1199                 val = DMA_CCMD_GLOBAL_INVL;
1200                 break;
1201         case DMA_CCMD_DOMAIN_INVL:
1202                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1203                 break;
1204         case DMA_CCMD_DEVICE_INVL:
1205                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1206                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1207                 break;
1208         default:
1209                 BUG();
1210         }
1211         val |= DMA_CCMD_ICC;
1212
1213         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1214         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1215
1216         /* Make sure hardware complete it */
1217         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1218                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1219
1220         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1221 }
1222
1223 /* return value determine if we need a write buffer flush */
1224 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1225                                 u64 addr, unsigned int size_order, u64 type)
1226 {
1227         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1228         u64 val = 0, val_iva = 0;
1229         unsigned long flag;
1230
1231         switch (type) {
1232         case DMA_TLB_GLOBAL_FLUSH:
1233                 /* global flush doesn't need set IVA_REG */
1234                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1235                 break;
1236         case DMA_TLB_DSI_FLUSH:
1237                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1238                 break;
1239         case DMA_TLB_PSI_FLUSH:
1240                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1241                 /* IH bit is passed in as part of address */
1242                 val_iva = size_order | addr;
1243                 break;
1244         default:
1245                 BUG();
1246         }
1247         /* Note: set drain read/write */
1248 #if 0
1249         /*
1250          * This is probably to be super secure.. Looks like we can
1251          * ignore it without any impact.
1252          */
1253         if (cap_read_drain(iommu->cap))
1254                 val |= DMA_TLB_READ_DRAIN;
1255 #endif
1256         if (cap_write_drain(iommu->cap))
1257                 val |= DMA_TLB_WRITE_DRAIN;
1258
1259         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1260         /* Note: Only uses first TLB reg currently */
1261         if (val_iva)
1262                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1263         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1264
1265         /* Make sure hardware complete it */
1266         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1267                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1268
1269         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1270
1271         /* check IOTLB invalidation granularity */
1272         if (DMA_TLB_IAIG(val) == 0)
1273                 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1274         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1275                 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1276                         (unsigned long long)DMA_TLB_IIRG(type),
1277                         (unsigned long long)DMA_TLB_IAIG(val));
1278 }
1279
1280 static struct device_domain_info *
1281 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1282                          u8 bus, u8 devfn)
1283 {
1284         bool found = false;
1285         unsigned long flags;
1286         struct device_domain_info *info;
1287         struct pci_dev *pdev;
1288
1289         if (!ecap_dev_iotlb_support(iommu->ecap))
1290                 return NULL;
1291
1292         if (!iommu->qi)
1293                 return NULL;
1294
1295         spin_lock_irqsave(&device_domain_lock, flags);
1296         list_for_each_entry(info, &domain->devices, link)
1297                 if (info->iommu == iommu && info->bus == bus &&
1298                     info->devfn == devfn) {
1299                         found = true;
1300                         break;
1301                 }
1302         spin_unlock_irqrestore(&device_domain_lock, flags);
1303
1304         if (!found || !info->dev || !dev_is_pci(info->dev))
1305                 return NULL;
1306
1307         pdev = to_pci_dev(info->dev);
1308
1309         if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1310                 return NULL;
1311
1312         if (!dmar_find_matched_atsr_unit(pdev))
1313                 return NULL;
1314
1315         return info;
1316 }
1317
1318 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1319 {
1320         if (!info || !dev_is_pci(info->dev))
1321                 return;
1322
1323         pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1324 }
1325
1326 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1327 {
1328         if (!info->dev || !dev_is_pci(info->dev) ||
1329             !pci_ats_enabled(to_pci_dev(info->dev)))
1330                 return;
1331
1332         pci_disable_ats(to_pci_dev(info->dev));
1333 }
1334
1335 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1336                                   u64 addr, unsigned mask)
1337 {
1338         u16 sid, qdep;
1339         unsigned long flags;
1340         struct device_domain_info *info;
1341
1342         spin_lock_irqsave(&device_domain_lock, flags);
1343         list_for_each_entry(info, &domain->devices, link) {
1344                 struct pci_dev *pdev;
1345                 if (!info->dev || !dev_is_pci(info->dev))
1346                         continue;
1347
1348                 pdev = to_pci_dev(info->dev);
1349                 if (!pci_ats_enabled(pdev))
1350                         continue;
1351
1352                 sid = info->bus << 8 | info->devfn;
1353                 qdep = pci_ats_queue_depth(pdev);
1354                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1355         }
1356         spin_unlock_irqrestore(&device_domain_lock, flags);
1357 }
1358
1359 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1360                                   unsigned long pfn, unsigned int pages, int ih, int map)
1361 {
1362         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1363         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1364
1365         BUG_ON(pages == 0);
1366
1367         if (ih)
1368                 ih = 1 << 6;
1369         /*
1370          * Fallback to domain selective flush if no PSI support or the size is
1371          * too big.
1372          * PSI requires page size to be 2 ^ x, and the base address is naturally
1373          * aligned to the size
1374          */
1375         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1376                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1377                                                 DMA_TLB_DSI_FLUSH);
1378         else
1379                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1380                                                 DMA_TLB_PSI_FLUSH);
1381
1382         /*
1383          * In caching mode, changes of pages from non-present to present require
1384          * flush. However, device IOTLB doesn't need to be flushed in this case.
1385          */
1386         if (!cap_caching_mode(iommu->cap) || !map)
1387                 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1388 }
1389
1390 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1391 {
1392         u32 pmen;
1393         unsigned long flags;
1394
1395         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1396         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1397         pmen &= ~DMA_PMEN_EPM;
1398         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1399
1400         /* wait for the protected region status bit to clear */
1401         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1402                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1403
1404         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1405 }
1406
1407 static void iommu_enable_translation(struct intel_iommu *iommu)
1408 {
1409         u32 sts;
1410         unsigned long flags;
1411
1412         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1413         iommu->gcmd |= DMA_GCMD_TE;
1414         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1415
1416         /* Make sure hardware complete it */
1417         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1418                       readl, (sts & DMA_GSTS_TES), sts);
1419
1420         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1421 }
1422
1423 static void iommu_disable_translation(struct intel_iommu *iommu)
1424 {
1425         u32 sts;
1426         unsigned long flag;
1427
1428         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1429         iommu->gcmd &= ~DMA_GCMD_TE;
1430         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1431
1432         /* Make sure hardware complete it */
1433         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1434                       readl, (!(sts & DMA_GSTS_TES)), sts);
1435
1436         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1437 }
1438
1439
1440 static int iommu_init_domains(struct intel_iommu *iommu)
1441 {
1442         unsigned long ndomains;
1443         unsigned long nlongs;
1444
1445         ndomains = cap_ndoms(iommu->cap);
1446         pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1447                  iommu->seq_id, ndomains);
1448         nlongs = BITS_TO_LONGS(ndomains);
1449
1450         spin_lock_init(&iommu->lock);
1451
1452         /* TBD: there might be 64K domains,
1453          * consider other allocation for future chip
1454          */
1455         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1456         if (!iommu->domain_ids) {
1457                 pr_err("IOMMU%d: allocating domain id array failed\n",
1458                        iommu->seq_id);
1459                 return -ENOMEM;
1460         }
1461         iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1462                         GFP_KERNEL);
1463         if (!iommu->domains) {
1464                 pr_err("IOMMU%d: allocating domain array failed\n",
1465                        iommu->seq_id);
1466                 kfree(iommu->domain_ids);
1467                 iommu->domain_ids = NULL;
1468                 return -ENOMEM;
1469         }
1470
1471         /*
1472          * if Caching mode is set, then invalid translations are tagged
1473          * with domainid 0. Hence we need to pre-allocate it.
1474          */
1475         if (cap_caching_mode(iommu->cap))
1476                 set_bit(0, iommu->domain_ids);
1477         return 0;
1478 }
1479
1480 static void disable_dmar_iommu(struct intel_iommu *iommu)
1481 {
1482         struct dmar_domain *domain;
1483         int i;
1484
1485         if ((iommu->domains) && (iommu->domain_ids)) {
1486                 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1487                         /*
1488                          * Domain id 0 is reserved for invalid translation
1489                          * if hardware supports caching mode.
1490                          */
1491                         if (cap_caching_mode(iommu->cap) && i == 0)
1492                                 continue;
1493
1494                         domain = iommu->domains[i];
1495                         clear_bit(i, iommu->domain_ids);
1496                         if (domain_detach_iommu(domain, iommu) == 0 &&
1497                             !domain_type_is_vm(domain))
1498                                 domain_exit(domain);
1499                 }
1500         }
1501
1502         if (iommu->gcmd & DMA_GCMD_TE)
1503                 iommu_disable_translation(iommu);
1504 }
1505
1506 static void free_dmar_iommu(struct intel_iommu *iommu)
1507 {
1508         if ((iommu->domains) && (iommu->domain_ids)) {
1509                 kfree(iommu->domains);
1510                 kfree(iommu->domain_ids);
1511                 iommu->domains = NULL;
1512                 iommu->domain_ids = NULL;
1513         }
1514
1515         g_iommus[iommu->seq_id] = NULL;
1516
1517         /* free context mapping */
1518         free_context_table(iommu);
1519 }
1520
1521 static struct dmar_domain *alloc_domain(int flags)
1522 {
1523         /* domain id for virtual machine, it won't be set in context */
1524         static atomic_t vm_domid = ATOMIC_INIT(0);
1525         struct dmar_domain *domain;
1526
1527         domain = alloc_domain_mem();
1528         if (!domain)
1529                 return NULL;
1530
1531         memset(domain, 0, sizeof(*domain));
1532         domain->nid = -1;
1533         domain->flags = flags;
1534         spin_lock_init(&domain->iommu_lock);
1535         INIT_LIST_HEAD(&domain->devices);
1536         if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1537                 domain->id = atomic_inc_return(&vm_domid);
1538
1539         return domain;
1540 }
1541
1542 static int __iommu_attach_domain(struct dmar_domain *domain,
1543                                  struct intel_iommu *iommu)
1544 {
1545         int num;
1546         unsigned long ndomains;
1547
1548         ndomains = cap_ndoms(iommu->cap);
1549         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1550         if (num < ndomains) {
1551                 set_bit(num, iommu->domain_ids);
1552                 iommu->domains[num] = domain;
1553         } else {
1554                 num = -ENOSPC;
1555         }
1556
1557         return num;
1558 }
1559
1560 static int iommu_attach_domain(struct dmar_domain *domain,
1561                                struct intel_iommu *iommu)
1562 {
1563         int num;
1564         unsigned long flags;
1565
1566         spin_lock_irqsave(&iommu->lock, flags);
1567         num = __iommu_attach_domain(domain, iommu);
1568         spin_unlock_irqrestore(&iommu->lock, flags);
1569         if (num < 0)
1570                 pr_err("IOMMU: no free domain ids\n");
1571
1572         return num;
1573 }
1574
1575 static int iommu_attach_vm_domain(struct dmar_domain *domain,
1576                                   struct intel_iommu *iommu)
1577 {
1578         int num;
1579         unsigned long ndomains;
1580
1581         ndomains = cap_ndoms(iommu->cap);
1582         for_each_set_bit(num, iommu->domain_ids, ndomains)
1583                 if (iommu->domains[num] == domain)
1584                         return num;
1585
1586         return __iommu_attach_domain(domain, iommu);
1587 }
1588
1589 static void iommu_detach_domain(struct dmar_domain *domain,
1590                                 struct intel_iommu *iommu)
1591 {
1592         unsigned long flags;
1593         int num, ndomains;
1594
1595         spin_lock_irqsave(&iommu->lock, flags);
1596         if (domain_type_is_vm_or_si(domain)) {
1597                 ndomains = cap_ndoms(iommu->cap);
1598                 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1599                         if (iommu->domains[num] == domain) {
1600                                 clear_bit(num, iommu->domain_ids);
1601                                 iommu->domains[num] = NULL;
1602                                 break;
1603                         }
1604                 }
1605         } else {
1606                 clear_bit(domain->id, iommu->domain_ids);
1607                 iommu->domains[domain->id] = NULL;
1608         }
1609         spin_unlock_irqrestore(&iommu->lock, flags);
1610 }
1611
1612 static void domain_attach_iommu(struct dmar_domain *domain,
1613                                struct intel_iommu *iommu)
1614 {
1615         unsigned long flags;
1616
1617         spin_lock_irqsave(&domain->iommu_lock, flags);
1618         if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1619                 domain->iommu_count++;
1620                 if (domain->iommu_count == 1)
1621                         domain->nid = iommu->node;
1622                 domain_update_iommu_cap(domain);
1623         }
1624         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1625 }
1626
1627 static int domain_detach_iommu(struct dmar_domain *domain,
1628                                struct intel_iommu *iommu)
1629 {
1630         unsigned long flags;
1631         int count = INT_MAX;
1632
1633         spin_lock_irqsave(&domain->iommu_lock, flags);
1634         if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1635                 count = --domain->iommu_count;
1636                 domain_update_iommu_cap(domain);
1637         }
1638         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1639
1640         return count;
1641 }
1642
1643 static struct iova_domain reserved_iova_list;
1644 static struct lock_class_key reserved_rbtree_key;
1645
1646 static int dmar_init_reserved_ranges(void)
1647 {
1648         struct pci_dev *pdev = NULL;
1649         struct iova *iova;
1650         int i;
1651
1652         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1653                         DMA_32BIT_PFN);
1654
1655         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1656                 &reserved_rbtree_key);
1657
1658         /* IOAPIC ranges shouldn't be accessed by DMA */
1659         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1660                 IOVA_PFN(IOAPIC_RANGE_END));
1661         if (!iova) {
1662                 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1663                 return -ENODEV;
1664         }
1665
1666         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1667         for_each_pci_dev(pdev) {
1668                 struct resource *r;
1669
1670                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1671                         r = &pdev->resource[i];
1672                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1673                                 continue;
1674                         iova = reserve_iova(&reserved_iova_list,
1675                                             IOVA_PFN(r->start),
1676                                             IOVA_PFN(r->end));
1677                         if (!iova) {
1678                                 printk(KERN_ERR "Reserve iova failed\n");
1679                                 return -ENODEV;
1680                         }
1681                 }
1682         }
1683         return 0;
1684 }
1685
1686 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1687 {
1688         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1689 }
1690
1691 static inline int guestwidth_to_adjustwidth(int gaw)
1692 {
1693         int agaw;
1694         int r = (gaw - 12) % 9;
1695
1696         if (r == 0)
1697                 agaw = gaw;
1698         else
1699                 agaw = gaw + 9 - r;
1700         if (agaw > 64)
1701                 agaw = 64;
1702         return agaw;
1703 }
1704
1705 static int domain_init(struct dmar_domain *domain, int guest_width)
1706 {
1707         struct intel_iommu *iommu;
1708         int adjust_width, agaw;
1709         unsigned long sagaw;
1710
1711         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1712                         DMA_32BIT_PFN);
1713         domain_reserve_special_ranges(domain);
1714
1715         /* calculate AGAW */
1716         iommu = domain_get_iommu(domain);
1717         if (guest_width > cap_mgaw(iommu->cap))
1718                 guest_width = cap_mgaw(iommu->cap);
1719         domain->gaw = guest_width;
1720         adjust_width = guestwidth_to_adjustwidth(guest_width);
1721         agaw = width_to_agaw(adjust_width);
1722         sagaw = cap_sagaw(iommu->cap);
1723         if (!test_bit(agaw, &sagaw)) {
1724                 /* hardware doesn't support it, choose a bigger one */
1725                 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1726                 agaw = find_next_bit(&sagaw, 5, agaw);
1727                 if (agaw >= 5)
1728                         return -ENODEV;
1729         }
1730         domain->agaw = agaw;
1731
1732         if (ecap_coherent(iommu->ecap))
1733                 domain->iommu_coherency = 1;
1734         else
1735                 domain->iommu_coherency = 0;
1736
1737         if (ecap_sc_support(iommu->ecap))
1738                 domain->iommu_snooping = 1;
1739         else
1740                 domain->iommu_snooping = 0;
1741
1742         if (intel_iommu_superpage)
1743                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1744         else
1745                 domain->iommu_superpage = 0;
1746
1747         domain->nid = iommu->node;
1748
1749         /* always allocate the top pgd */
1750         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1751         if (!domain->pgd)
1752                 return -ENOMEM;
1753         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1754         return 0;
1755 }
1756
1757 static void domain_exit(struct dmar_domain *domain)
1758 {
1759         struct page *freelist = NULL;
1760         int i;
1761
1762         /* Domain 0 is reserved, so dont process it */
1763         if (!domain)
1764                 return;
1765
1766         /* Flush any lazy unmaps that may reference this domain */
1767         if (!intel_iommu_strict)
1768                 flush_unmaps_timeout(0);
1769
1770         /* remove associated devices */
1771         domain_remove_dev_info(domain);
1772
1773         /* destroy iovas */
1774         put_iova_domain(&domain->iovad);
1775
1776         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1777
1778         /* clear attached or cached domains */
1779         rcu_read_lock();
1780         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1781                 iommu_detach_domain(domain, g_iommus[i]);
1782         rcu_read_unlock();
1783
1784         dma_free_pagelist(freelist);
1785
1786         free_domain_mem(domain);
1787 }
1788
1789 static int domain_context_mapping_one(struct dmar_domain *domain,
1790                                       struct intel_iommu *iommu,
1791                                       u8 bus, u8 devfn, int translation)
1792 {
1793         struct context_entry *context;
1794         unsigned long flags;
1795         struct dma_pte *pgd;
1796         int id;
1797         int agaw;
1798         struct device_domain_info *info = NULL;
1799
1800         pr_debug("Set context mapping for %02x:%02x.%d\n",
1801                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1802
1803         BUG_ON(!domain->pgd);
1804         BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1805                translation != CONTEXT_TT_MULTI_LEVEL);
1806
1807         spin_lock_irqsave(&iommu->lock, flags);
1808         context = iommu_context_addr(iommu, bus, devfn, 1);
1809         spin_unlock_irqrestore(&iommu->lock, flags);
1810         if (!context)
1811                 return -ENOMEM;
1812         spin_lock_irqsave(&iommu->lock, flags);
1813         if (context_present(context)) {
1814                 spin_unlock_irqrestore(&iommu->lock, flags);
1815                 return 0;
1816         }
1817
1818         id = domain->id;
1819         pgd = domain->pgd;
1820
1821         if (domain_type_is_vm_or_si(domain)) {
1822                 if (domain_type_is_vm(domain)) {
1823                         id = iommu_attach_vm_domain(domain, iommu);
1824                         if (id < 0) {
1825                                 spin_unlock_irqrestore(&iommu->lock, flags);
1826                                 pr_err("IOMMU: no free domain ids\n");
1827                                 return -EFAULT;
1828                         }
1829                 }
1830
1831                 /* Skip top levels of page tables for
1832                  * iommu which has less agaw than default.
1833                  * Unnecessary for PT mode.
1834                  */
1835                 if (translation != CONTEXT_TT_PASS_THROUGH) {
1836                         for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1837                                 pgd = phys_to_virt(dma_pte_addr(pgd));
1838                                 if (!dma_pte_present(pgd)) {
1839                                         spin_unlock_irqrestore(&iommu->lock, flags);
1840                                         return -ENOMEM;
1841                                 }
1842                         }
1843                 }
1844         }
1845
1846         context_set_domain_id(context, id);
1847
1848         if (translation != CONTEXT_TT_PASS_THROUGH) {
1849                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1850                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1851                                      CONTEXT_TT_MULTI_LEVEL;
1852         }
1853         /*
1854          * In pass through mode, AW must be programmed to indicate the largest
1855          * AGAW value supported by hardware. And ASR is ignored by hardware.
1856          */
1857         if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1858                 context_set_address_width(context, iommu->msagaw);
1859         else {
1860                 context_set_address_root(context, virt_to_phys(pgd));
1861                 context_set_address_width(context, iommu->agaw);
1862         }
1863
1864         context_set_translation_type(context, translation);
1865         context_set_fault_enable(context);
1866         context_set_present(context);
1867         domain_flush_cache(domain, context, sizeof(*context));
1868
1869         /*
1870          * It's a non-present to present mapping. If hardware doesn't cache
1871          * non-present entry we only need to flush the write-buffer. If the
1872          * _does_ cache non-present entries, then it does so in the special
1873          * domain #0, which we have to flush:
1874          */
1875         if (cap_caching_mode(iommu->cap)) {
1876                 iommu->flush.flush_context(iommu, 0,
1877                                            (((u16)bus) << 8) | devfn,
1878                                            DMA_CCMD_MASK_NOBIT,
1879                                            DMA_CCMD_DEVICE_INVL);
1880                 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
1881         } else {
1882                 iommu_flush_write_buffer(iommu);
1883         }
1884         iommu_enable_dev_iotlb(info);
1885         spin_unlock_irqrestore(&iommu->lock, flags);
1886
1887         domain_attach_iommu(domain, iommu);
1888
1889         return 0;
1890 }
1891
1892 struct domain_context_mapping_data {
1893         struct dmar_domain *domain;
1894         struct intel_iommu *iommu;
1895         int translation;
1896 };
1897
1898 static int domain_context_mapping_cb(struct pci_dev *pdev,
1899                                      u16 alias, void *opaque)
1900 {
1901         struct domain_context_mapping_data *data = opaque;
1902
1903         return domain_context_mapping_one(data->domain, data->iommu,
1904                                           PCI_BUS_NUM(alias), alias & 0xff,
1905                                           data->translation);
1906 }
1907
1908 static int
1909 domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1910                        int translation)
1911 {
1912         struct intel_iommu *iommu;
1913         u8 bus, devfn;
1914         struct domain_context_mapping_data data;
1915
1916         iommu = device_to_iommu(dev, &bus, &devfn);
1917         if (!iommu)
1918                 return -ENODEV;
1919
1920         if (!dev_is_pci(dev))
1921                 return domain_context_mapping_one(domain, iommu, bus, devfn,
1922                                                   translation);
1923
1924         data.domain = domain;
1925         data.iommu = iommu;
1926         data.translation = translation;
1927
1928         return pci_for_each_dma_alias(to_pci_dev(dev),
1929                                       &domain_context_mapping_cb, &data);
1930 }
1931
1932 static int domain_context_mapped_cb(struct pci_dev *pdev,
1933                                     u16 alias, void *opaque)
1934 {
1935         struct intel_iommu *iommu = opaque;
1936
1937         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
1938 }
1939
1940 static int domain_context_mapped(struct device *dev)
1941 {
1942         struct intel_iommu *iommu;
1943         u8 bus, devfn;
1944
1945         iommu = device_to_iommu(dev, &bus, &devfn);
1946         if (!iommu)
1947                 return -ENODEV;
1948
1949         if (!dev_is_pci(dev))
1950                 return device_context_mapped(iommu, bus, devfn);
1951
1952         return !pci_for_each_dma_alias(to_pci_dev(dev),
1953                                        domain_context_mapped_cb, iommu);
1954 }
1955
1956 /* Returns a number of VTD pages, but aligned to MM page size */
1957 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1958                                             size_t size)
1959 {
1960         host_addr &= ~PAGE_MASK;
1961         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1962 }
1963
1964 /* Return largest possible superpage level for a given mapping */
1965 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1966                                           unsigned long iov_pfn,
1967                                           unsigned long phy_pfn,
1968                                           unsigned long pages)
1969 {
1970         int support, level = 1;
1971         unsigned long pfnmerge;
1972
1973         support = domain->iommu_superpage;
1974
1975         /* To use a large page, the virtual *and* physical addresses
1976            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1977            of them will mean we have to use smaller pages. So just
1978            merge them and check both at once. */
1979         pfnmerge = iov_pfn | phy_pfn;
1980
1981         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1982                 pages >>= VTD_STRIDE_SHIFT;
1983                 if (!pages)
1984                         break;
1985                 pfnmerge >>= VTD_STRIDE_SHIFT;
1986                 level++;
1987                 support--;
1988         }
1989         return level;
1990 }
1991
1992 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1993                             struct scatterlist *sg, unsigned long phys_pfn,
1994                             unsigned long nr_pages, int prot)
1995 {
1996         struct dma_pte *first_pte = NULL, *pte = NULL;
1997         phys_addr_t uninitialized_var(pteval);
1998         unsigned long sg_res = 0;
1999         unsigned int largepage_lvl = 0;
2000         unsigned long lvl_pages = 0;
2001
2002         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2003
2004         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2005                 return -EINVAL;
2006
2007         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2008
2009         if (!sg) {
2010                 sg_res = nr_pages;
2011                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2012         }
2013
2014         while (nr_pages > 0) {
2015                 uint64_t tmp;
2016
2017                 if (!sg_res) {
2018                         sg_res = aligned_nrpages(sg->offset, sg->length);
2019                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2020                         sg->dma_length = sg->length;
2021                         pteval = page_to_phys(sg_page(sg)) | prot;
2022                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2023                 }
2024
2025                 if (!pte) {
2026                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2027
2028                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2029                         if (!pte)
2030                                 return -ENOMEM;
2031                         /* It is large page*/
2032                         if (largepage_lvl > 1) {
2033                                 pteval |= DMA_PTE_LARGE_PAGE;
2034                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2035                                 /*
2036                                  * Ensure that old small page tables are
2037                                  * removed to make room for superpage,
2038                                  * if they exist.
2039                                  */
2040                                 dma_pte_free_pagetable(domain, iov_pfn,
2041                                                        iov_pfn + lvl_pages - 1);
2042                         } else {
2043                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2044                         }
2045
2046                 }
2047                 /* We don't need lock here, nobody else
2048                  * touches the iova range
2049                  */
2050                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2051                 if (tmp) {
2052                         static int dumps = 5;
2053                         printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2054                                iov_pfn, tmp, (unsigned long long)pteval);
2055                         if (dumps) {
2056                                 dumps--;
2057                                 debug_dma_dump_mappings(NULL);
2058                         }
2059                         WARN_ON(1);
2060                 }
2061
2062                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2063
2064                 BUG_ON(nr_pages < lvl_pages);
2065                 BUG_ON(sg_res < lvl_pages);
2066
2067                 nr_pages -= lvl_pages;
2068                 iov_pfn += lvl_pages;
2069                 phys_pfn += lvl_pages;
2070                 pteval += lvl_pages * VTD_PAGE_SIZE;
2071                 sg_res -= lvl_pages;
2072
2073                 /* If the next PTE would be the first in a new page, then we
2074                    need to flush the cache on the entries we've just written.
2075                    And then we'll need to recalculate 'pte', so clear it and
2076                    let it get set again in the if (!pte) block above.
2077
2078                    If we're done (!nr_pages) we need to flush the cache too.
2079
2080                    Also if we've been setting superpages, we may need to
2081                    recalculate 'pte' and switch back to smaller pages for the
2082                    end of the mapping, if the trailing size is not enough to
2083                    use another superpage (i.e. sg_res < lvl_pages). */
2084                 pte++;
2085                 if (!nr_pages || first_pte_in_page(pte) ||
2086                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2087                         domain_flush_cache(domain, first_pte,
2088                                            (void *)pte - (void *)first_pte);
2089                         pte = NULL;
2090                 }
2091
2092                 if (!sg_res && nr_pages)
2093                         sg = sg_next(sg);
2094         }
2095         return 0;
2096 }
2097
2098 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2099                                     struct scatterlist *sg, unsigned long nr_pages,
2100                                     int prot)
2101 {
2102         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2103 }
2104
2105 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2106                                      unsigned long phys_pfn, unsigned long nr_pages,
2107                                      int prot)
2108 {
2109         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2110 }
2111
2112 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2113 {
2114         if (!iommu)
2115                 return;
2116
2117         clear_context_table(iommu, bus, devfn);
2118         iommu->flush.flush_context(iommu, 0, 0, 0,
2119                                            DMA_CCMD_GLOBAL_INVL);
2120         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2121 }
2122
2123 static inline void unlink_domain_info(struct device_domain_info *info)
2124 {
2125         assert_spin_locked(&device_domain_lock);
2126         list_del(&info->link);
2127         list_del(&info->global);
2128         if (info->dev)
2129                 info->dev->archdata.iommu = NULL;
2130 }
2131
2132 static void domain_remove_dev_info(struct dmar_domain *domain)
2133 {
2134         struct device_domain_info *info, *tmp;
2135         unsigned long flags;
2136
2137         spin_lock_irqsave(&device_domain_lock, flags);
2138         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
2139                 unlink_domain_info(info);
2140                 spin_unlock_irqrestore(&device_domain_lock, flags);
2141
2142                 iommu_disable_dev_iotlb(info);
2143                 iommu_detach_dev(info->iommu, info->bus, info->devfn);
2144
2145                 if (domain_type_is_vm(domain)) {
2146                         iommu_detach_dependent_devices(info->iommu, info->dev);
2147                         domain_detach_iommu(domain, info->iommu);
2148                 }
2149
2150                 free_devinfo_mem(info);
2151                 spin_lock_irqsave(&device_domain_lock, flags);
2152         }
2153         spin_unlock_irqrestore(&device_domain_lock, flags);
2154 }
2155
2156 /*
2157  * find_domain
2158  * Note: we use struct device->archdata.iommu stores the info
2159  */
2160 static struct dmar_domain *find_domain(struct device *dev)
2161 {
2162         struct device_domain_info *info;
2163
2164         /* No lock here, assumes no domain exit in normal case */
2165         info = dev->archdata.iommu;
2166         if (info)
2167                 return info->domain;
2168         return NULL;
2169 }
2170
2171 static inline struct device_domain_info *
2172 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2173 {
2174         struct device_domain_info *info;
2175
2176         list_for_each_entry(info, &device_domain_list, global)
2177                 if (info->iommu->segment == segment && info->bus == bus &&
2178                     info->devfn == devfn)
2179                         return info;
2180
2181         return NULL;
2182 }
2183
2184 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2185                                                 int bus, int devfn,
2186                                                 struct device *dev,
2187                                                 struct dmar_domain *domain)
2188 {
2189         struct dmar_domain *found = NULL;
2190         struct device_domain_info *info;
2191         unsigned long flags;
2192
2193         info = alloc_devinfo_mem();
2194         if (!info)
2195                 return NULL;
2196
2197         info->bus = bus;
2198         info->devfn = devfn;
2199         info->dev = dev;
2200         info->domain = domain;
2201         info->iommu = iommu;
2202
2203         spin_lock_irqsave(&device_domain_lock, flags);
2204         if (dev)
2205                 found = find_domain(dev);
2206         else {
2207                 struct device_domain_info *info2;
2208                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2209                 if (info2)
2210                         found = info2->domain;
2211         }
2212         if (found) {
2213                 spin_unlock_irqrestore(&device_domain_lock, flags);
2214                 free_devinfo_mem(info);
2215                 /* Caller must free the original domain */
2216                 return found;
2217         }
2218
2219         list_add(&info->link, &domain->devices);
2220         list_add(&info->global, &device_domain_list);
2221         if (dev)
2222                 dev->archdata.iommu = info;
2223         spin_unlock_irqrestore(&device_domain_lock, flags);
2224
2225         return domain;
2226 }
2227
2228 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2229 {
2230         *(u16 *)opaque = alias;
2231         return 0;
2232 }
2233
2234 /* domain is initialized */
2235 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2236 {
2237         struct dmar_domain *domain, *tmp;
2238         struct intel_iommu *iommu;
2239         struct device_domain_info *info;
2240         u16 dma_alias;
2241         unsigned long flags;
2242         u8 bus, devfn;
2243
2244         domain = find_domain(dev);
2245         if (domain)
2246                 return domain;
2247
2248         iommu = device_to_iommu(dev, &bus, &devfn);
2249         if (!iommu)
2250                 return NULL;
2251
2252         if (dev_is_pci(dev)) {
2253                 struct pci_dev *pdev = to_pci_dev(dev);
2254
2255                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2256
2257                 spin_lock_irqsave(&device_domain_lock, flags);
2258                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2259                                                       PCI_BUS_NUM(dma_alias),
2260                                                       dma_alias & 0xff);
2261                 if (info) {
2262                         iommu = info->iommu;
2263                         domain = info->domain;
2264                 }
2265                 spin_unlock_irqrestore(&device_domain_lock, flags);
2266
2267                 /* DMA alias already has a domain, uses it */
2268                 if (info)
2269                         goto found_domain;
2270         }
2271
2272         /* Allocate and initialize new domain for the device */
2273         domain = alloc_domain(0);
2274         if (!domain)
2275                 return NULL;
2276         domain->id = iommu_attach_domain(domain, iommu);
2277         if (domain->id < 0) {
2278                 free_domain_mem(domain);
2279                 return NULL;
2280         }
2281         domain_attach_iommu(domain, iommu);
2282         if (domain_init(domain, gaw)) {
2283                 domain_exit(domain);
2284                 return NULL;
2285         }
2286
2287         /* register PCI DMA alias device */
2288         if (dev_is_pci(dev)) {
2289                 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2290                                            dma_alias & 0xff, NULL, domain);
2291
2292                 if (!tmp || tmp != domain) {
2293                         domain_exit(domain);
2294                         domain = tmp;
2295                 }
2296
2297                 if (!domain)
2298                         return NULL;
2299         }
2300
2301 found_domain:
2302         tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2303
2304         if (!tmp || tmp != domain) {
2305                 domain_exit(domain);
2306                 domain = tmp;
2307         }
2308
2309         return domain;
2310 }
2311
2312 static int iommu_identity_mapping;
2313 #define IDENTMAP_ALL            1
2314 #define IDENTMAP_GFX            2
2315 #define IDENTMAP_AZALIA         4
2316
2317 static int iommu_domain_identity_map(struct dmar_domain *domain,
2318                                      unsigned long long start,
2319                                      unsigned long long end)
2320 {
2321         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2322         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2323
2324         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2325                           dma_to_mm_pfn(last_vpfn))) {
2326                 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2327                 return -ENOMEM;
2328         }
2329
2330         pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2331                  start, end, domain->id);
2332         /*
2333          * RMRR range might have overlap with physical memory range,
2334          * clear it first
2335          */
2336         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2337
2338         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2339                                   last_vpfn - first_vpfn + 1,
2340                                   DMA_PTE_READ|DMA_PTE_WRITE);
2341 }
2342
2343 static int iommu_prepare_identity_map(struct device *dev,
2344                                       unsigned long long start,
2345                                       unsigned long long end)
2346 {
2347         struct dmar_domain *domain;
2348         int ret;
2349
2350         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2351         if (!domain)
2352                 return -ENOMEM;
2353
2354         /* For _hardware_ passthrough, don't bother. But for software
2355            passthrough, we do it anyway -- it may indicate a memory
2356            range which is reserved in E820, so which didn't get set
2357            up to start with in si_domain */
2358         if (domain == si_domain && hw_pass_through) {
2359                 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2360                        dev_name(dev), start, end);
2361                 return 0;
2362         }
2363
2364         printk(KERN_INFO
2365                "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2366                dev_name(dev), start, end);
2367         
2368         if (end < start) {
2369                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2370                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2371                         dmi_get_system_info(DMI_BIOS_VENDOR),
2372                         dmi_get_system_info(DMI_BIOS_VERSION),
2373                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2374                 ret = -EIO;
2375                 goto error;
2376         }
2377
2378         if (end >> agaw_to_width(domain->agaw)) {
2379                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2380                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2381                      agaw_to_width(domain->agaw),
2382                      dmi_get_system_info(DMI_BIOS_VENDOR),
2383                      dmi_get_system_info(DMI_BIOS_VERSION),
2384                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2385                 ret = -EIO;
2386                 goto error;
2387         }
2388
2389         ret = iommu_domain_identity_map(domain, start, end);
2390         if (ret)
2391                 goto error;
2392
2393         /* context entry init */
2394         ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2395         if (ret)
2396                 goto error;
2397
2398         return 0;
2399
2400  error:
2401         domain_exit(domain);
2402         return ret;
2403 }
2404
2405 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2406                                          struct device *dev)
2407 {
2408         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2409                 return 0;
2410         return iommu_prepare_identity_map(dev, rmrr->base_address,
2411                                           rmrr->end_address);
2412 }
2413
2414 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2415 static inline void iommu_prepare_isa(void)
2416 {
2417         struct pci_dev *pdev;
2418         int ret;
2419
2420         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2421         if (!pdev)
2422                 return;
2423
2424         printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2425         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2426
2427         if (ret)
2428                 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2429                        "floppy might not work\n");
2430
2431         pci_dev_put(pdev);
2432 }
2433 #else
2434 static inline void iommu_prepare_isa(void)
2435 {
2436         return;
2437 }
2438 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2439
2440 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2441
2442 static int __init si_domain_init(int hw)
2443 {
2444         struct dmar_drhd_unit *drhd;
2445         struct intel_iommu *iommu;
2446         int nid, ret = 0;
2447         bool first = true;
2448
2449         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2450         if (!si_domain)
2451                 return -EFAULT;
2452
2453         for_each_active_iommu(iommu, drhd) {
2454                 ret = iommu_attach_domain(si_domain, iommu);
2455                 if (ret < 0) {
2456                         domain_exit(si_domain);
2457                         return -EFAULT;
2458                 } else if (first) {
2459                         si_domain->id = ret;
2460                         first = false;
2461                 } else if (si_domain->id != ret) {
2462                         domain_exit(si_domain);
2463                         return -EFAULT;
2464                 }
2465                 domain_attach_iommu(si_domain, iommu);
2466         }
2467
2468         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2469                 domain_exit(si_domain);
2470                 return -EFAULT;
2471         }
2472
2473         pr_debug("IOMMU: identity mapping domain is domain %d\n",
2474                  si_domain->id);
2475
2476         if (hw)
2477                 return 0;
2478
2479         for_each_online_node(nid) {
2480                 unsigned long start_pfn, end_pfn;
2481                 int i;
2482
2483                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2484                         ret = iommu_domain_identity_map(si_domain,
2485                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2486                         if (ret)
2487                                 return ret;
2488                 }
2489         }
2490
2491         return 0;
2492 }
2493
2494 static int identity_mapping(struct device *dev)
2495 {
2496         struct device_domain_info *info;
2497
2498         if (likely(!iommu_identity_mapping))
2499                 return 0;
2500
2501         info = dev->archdata.iommu;
2502         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2503                 return (info->domain == si_domain);
2504
2505         return 0;
2506 }
2507
2508 static int domain_add_dev_info(struct dmar_domain *domain,
2509                                struct device *dev, int translation)
2510 {
2511         struct dmar_domain *ndomain;
2512         struct intel_iommu *iommu;
2513         u8 bus, devfn;
2514         int ret;
2515
2516         iommu = device_to_iommu(dev, &bus, &devfn);
2517         if (!iommu)
2518                 return -ENODEV;
2519
2520         ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2521         if (ndomain != domain)
2522                 return -EBUSY;
2523
2524         ret = domain_context_mapping(domain, dev, translation);
2525         if (ret) {
2526                 domain_remove_one_dev_info(domain, dev);
2527                 return ret;
2528         }
2529
2530         return 0;
2531 }
2532
2533 static bool device_has_rmrr(struct device *dev)
2534 {
2535         struct dmar_rmrr_unit *rmrr;
2536         struct device *tmp;
2537         int i;
2538
2539         rcu_read_lock();
2540         for_each_rmrr_units(rmrr) {
2541                 /*
2542                  * Return TRUE if this RMRR contains the device that
2543                  * is passed in.
2544                  */
2545                 for_each_active_dev_scope(rmrr->devices,
2546                                           rmrr->devices_cnt, i, tmp)
2547                         if (tmp == dev) {
2548                                 rcu_read_unlock();
2549                                 return true;
2550                         }
2551         }
2552         rcu_read_unlock();
2553         return false;
2554 }
2555
2556 /*
2557  * There are a couple cases where we need to restrict the functionality of
2558  * devices associated with RMRRs.  The first is when evaluating a device for
2559  * identity mapping because problems exist when devices are moved in and out
2560  * of domains and their respective RMRR information is lost.  This means that
2561  * a device with associated RMRRs will never be in a "passthrough" domain.
2562  * The second is use of the device through the IOMMU API.  This interface
2563  * expects to have full control of the IOVA space for the device.  We cannot
2564  * satisfy both the requirement that RMRR access is maintained and have an
2565  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2566  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2567  * We therefore prevent devices associated with an RMRR from participating in
2568  * the IOMMU API, which eliminates them from device assignment.
2569  *
2570  * In both cases we assume that PCI USB devices with RMRRs have them largely
2571  * for historical reasons and that the RMRR space is not actively used post
2572  * boot.  This exclusion may change if vendors begin to abuse it.
2573  *
2574  * The same exception is made for graphics devices, with the requirement that
2575  * any use of the RMRR regions will be torn down before assigning the device
2576  * to a guest.
2577  */
2578 static bool device_is_rmrr_locked(struct device *dev)
2579 {
2580         if (!device_has_rmrr(dev))
2581                 return false;
2582
2583         if (dev_is_pci(dev)) {
2584                 struct pci_dev *pdev = to_pci_dev(dev);
2585
2586                 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2587                         return false;
2588         }
2589
2590         return true;
2591 }
2592
2593 static int iommu_should_identity_map(struct device *dev, int startup)
2594 {
2595
2596         if (dev_is_pci(dev)) {
2597                 struct pci_dev *pdev = to_pci_dev(dev);
2598
2599                 if (device_is_rmrr_locked(dev))
2600                         return 0;
2601
2602                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2603                         return 1;
2604
2605                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2606                         return 1;
2607
2608                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2609                         return 0;
2610
2611                 /*
2612                  * We want to start off with all devices in the 1:1 domain, and
2613                  * take them out later if we find they can't access all of memory.
2614                  *
2615                  * However, we can't do this for PCI devices behind bridges,
2616                  * because all PCI devices behind the same bridge will end up
2617                  * with the same source-id on their transactions.
2618                  *
2619                  * Practically speaking, we can't change things around for these
2620                  * devices at run-time, because we can't be sure there'll be no
2621                  * DMA transactions in flight for any of their siblings.
2622                  *
2623                  * So PCI devices (unless they're on the root bus) as well as
2624                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2625                  * the 1:1 domain, just in _case_ one of their siblings turns out
2626                  * not to be able to map all of memory.
2627                  */
2628                 if (!pci_is_pcie(pdev)) {
2629                         if (!pci_is_root_bus(pdev->bus))
2630                                 return 0;
2631                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2632                                 return 0;
2633                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2634                         return 0;
2635         } else {
2636                 if (device_has_rmrr(dev))
2637                         return 0;
2638         }
2639
2640         /*
2641          * At boot time, we don't yet know if devices will be 64-bit capable.
2642          * Assume that they will — if they turn out not to be, then we can
2643          * take them out of the 1:1 domain later.
2644          */
2645         if (!startup) {
2646                 /*
2647                  * If the device's dma_mask is less than the system's memory
2648                  * size then this is not a candidate for identity mapping.
2649                  */
2650                 u64 dma_mask = *dev->dma_mask;
2651
2652                 if (dev->coherent_dma_mask &&
2653                     dev->coherent_dma_mask < dma_mask)
2654                         dma_mask = dev->coherent_dma_mask;
2655
2656                 return dma_mask >= dma_get_required_mask(dev);
2657         }
2658
2659         return 1;
2660 }
2661
2662 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2663 {
2664         int ret;
2665
2666         if (!iommu_should_identity_map(dev, 1))
2667                 return 0;
2668
2669         ret = domain_add_dev_info(si_domain, dev,
2670                                   hw ? CONTEXT_TT_PASS_THROUGH :
2671                                        CONTEXT_TT_MULTI_LEVEL);
2672         if (!ret)
2673                 pr_info("IOMMU: %s identity mapping for device %s\n",
2674                         hw ? "hardware" : "software", dev_name(dev));
2675         else if (ret == -ENODEV)
2676                 /* device not associated with an iommu */
2677                 ret = 0;
2678
2679         return ret;
2680 }
2681
2682
2683 static int __init iommu_prepare_static_identity_mapping(int hw)
2684 {
2685         struct pci_dev *pdev = NULL;
2686         struct dmar_drhd_unit *drhd;
2687         struct intel_iommu *iommu;
2688         struct device *dev;
2689         int i;
2690         int ret = 0;
2691
2692         ret = si_domain_init(hw);
2693         if (ret)
2694                 return -EFAULT;
2695
2696         for_each_pci_dev(pdev) {
2697                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2698                 if (ret)
2699                         return ret;
2700         }
2701
2702         for_each_active_iommu(iommu, drhd)
2703                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2704                         struct acpi_device_physical_node *pn;
2705                         struct acpi_device *adev;
2706
2707                         if (dev->bus != &acpi_bus_type)
2708                                 continue;
2709                                 
2710                         adev= to_acpi_device(dev);
2711                         mutex_lock(&adev->physical_node_lock);
2712                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2713                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2714                                 if (ret)
2715                                         break;
2716                         }
2717                         mutex_unlock(&adev->physical_node_lock);
2718                         if (ret)
2719                                 return ret;
2720                 }
2721
2722         return 0;
2723 }
2724
2725 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2726 {
2727         /*
2728          * Start from the sane iommu hardware state.
2729          * If the queued invalidation is already initialized by us
2730          * (for example, while enabling interrupt-remapping) then
2731          * we got the things already rolling from a sane state.
2732          */
2733         if (!iommu->qi) {
2734                 /*
2735                  * Clear any previous faults.
2736                  */
2737                 dmar_fault(-1, iommu);
2738                 /*
2739                  * Disable queued invalidation if supported and already enabled
2740                  * before OS handover.
2741                  */
2742                 dmar_disable_qi(iommu);
2743         }
2744
2745         if (dmar_enable_qi(iommu)) {
2746                 /*
2747                  * Queued Invalidate not enabled, use Register Based Invalidate
2748                  */
2749                 iommu->flush.flush_context = __iommu_flush_context;
2750                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2751                 pr_info("IOMMU: %s using Register based invalidation\n",
2752                         iommu->name);
2753         } else {
2754                 iommu->flush.flush_context = qi_flush_context;
2755                 iommu->flush.flush_iotlb = qi_flush_iotlb;
2756                 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2757         }
2758 }
2759
2760 static int __init init_dmars(void)
2761 {
2762         struct dmar_drhd_unit *drhd;
2763         struct dmar_rmrr_unit *rmrr;
2764         struct device *dev;
2765         struct intel_iommu *iommu;
2766         int i, ret;
2767
2768         /*
2769          * for each drhd
2770          *    allocate root
2771          *    initialize and program root entry to not present
2772          * endfor
2773          */
2774         for_each_drhd_unit(drhd) {
2775                 /*
2776                  * lock not needed as this is only incremented in the single
2777                  * threaded kernel __init code path all other access are read
2778                  * only
2779                  */
2780                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
2781                         g_num_of_iommus++;
2782                         continue;
2783                 }
2784                 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2785                           DMAR_UNITS_SUPPORTED);
2786         }
2787
2788         /* Preallocate enough resources for IOMMU hot-addition */
2789         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2790                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2791
2792         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2793                         GFP_KERNEL);
2794         if (!g_iommus) {
2795                 printk(KERN_ERR "Allocating global iommu array failed\n");
2796                 ret = -ENOMEM;
2797                 goto error;
2798         }
2799
2800         deferred_flush = kzalloc(g_num_of_iommus *
2801                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2802         if (!deferred_flush) {
2803                 ret = -ENOMEM;
2804                 goto free_g_iommus;
2805         }
2806
2807         for_each_active_iommu(iommu, drhd) {
2808                 g_iommus[iommu->seq_id] = iommu;
2809
2810                 ret = iommu_init_domains(iommu);
2811                 if (ret)
2812                         goto free_iommu;
2813
2814                 /*
2815                  * TBD:
2816                  * we could share the same root & context tables
2817                  * among all IOMMU's. Need to Split it later.
2818                  */
2819                 ret = iommu_alloc_root_entry(iommu);
2820                 if (ret)
2821                         goto free_iommu;
2822                 if (!ecap_pass_through(iommu->ecap))
2823                         hw_pass_through = 0;
2824         }
2825
2826         for_each_active_iommu(iommu, drhd)
2827                 intel_iommu_init_qi(iommu);
2828
2829         if (iommu_pass_through)
2830                 iommu_identity_mapping |= IDENTMAP_ALL;
2831
2832 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2833         iommu_identity_mapping |= IDENTMAP_GFX;
2834 #endif
2835
2836         check_tylersburg_isoch();
2837
2838         /*
2839          * If pass through is not set or not enabled, setup context entries for
2840          * identity mappings for rmrr, gfx, and isa and may fall back to static
2841          * identity mapping if iommu_identity_mapping is set.
2842          */
2843         if (iommu_identity_mapping) {
2844                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2845                 if (ret) {
2846                         printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2847                         goto free_iommu;
2848                 }
2849         }
2850         /*
2851          * For each rmrr
2852          *   for each dev attached to rmrr
2853          *   do
2854          *     locate drhd for dev, alloc domain for dev
2855          *     allocate free domain
2856          *     allocate page table entries for rmrr
2857          *     if context not allocated for bus
2858          *           allocate and init context
2859          *           set present in root table for this bus
2860          *     init context with domain, translation etc
2861          *    endfor
2862          * endfor
2863          */
2864         printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2865         for_each_rmrr_units(rmrr) {
2866                 /* some BIOS lists non-exist devices in DMAR table. */
2867                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2868                                           i, dev) {
2869                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
2870                         if (ret)
2871                                 printk(KERN_ERR
2872                                        "IOMMU: mapping reserved region failed\n");
2873                 }
2874         }
2875
2876         iommu_prepare_isa();
2877
2878         /*
2879          * for each drhd
2880          *   enable fault log
2881          *   global invalidate context cache
2882          *   global invalidate iotlb
2883          *   enable translation
2884          */
2885         for_each_iommu(iommu, drhd) {
2886                 if (drhd->ignored) {
2887                         /*
2888                          * we always have to disable PMRs or DMA may fail on
2889                          * this device
2890                          */
2891                         if (force_on)
2892                                 iommu_disable_protect_mem_regions(iommu);
2893                         continue;
2894                 }
2895
2896                 iommu_flush_write_buffer(iommu);
2897
2898                 ret = dmar_set_interrupt(iommu);
2899                 if (ret)
2900                         goto free_iommu;
2901
2902                 iommu_set_root_entry(iommu);
2903
2904                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2905                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2906                 iommu_enable_translation(iommu);
2907                 iommu_disable_protect_mem_regions(iommu);
2908         }
2909
2910         return 0;
2911
2912 free_iommu:
2913         for_each_active_iommu(iommu, drhd) {
2914                 disable_dmar_iommu(iommu);
2915                 free_dmar_iommu(iommu);
2916         }
2917         kfree(deferred_flush);
2918 free_g_iommus:
2919         kfree(g_iommus);
2920 error:
2921         return ret;
2922 }
2923
2924 /* This takes a number of _MM_ pages, not VTD pages */
2925 static struct iova *intel_alloc_iova(struct device *dev,
2926                                      struct dmar_domain *domain,
2927                                      unsigned long nrpages, uint64_t dma_mask)
2928 {
2929         struct iova *iova = NULL;
2930
2931         /* Restrict dma_mask to the width that the iommu can handle */
2932         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2933
2934         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2935                 /*
2936                  * First try to allocate an io virtual address in
2937                  * DMA_BIT_MASK(32) and if that fails then try allocating
2938                  * from higher range
2939                  */
2940                 iova = alloc_iova(&domain->iovad, nrpages,
2941                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
2942                 if (iova)
2943                         return iova;
2944         }
2945         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2946         if (unlikely(!iova)) {
2947                 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2948                        nrpages, dev_name(dev));
2949                 return NULL;
2950         }
2951
2952         return iova;
2953 }
2954
2955 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
2956 {
2957         struct dmar_domain *domain;
2958         int ret;
2959
2960         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2961         if (!domain) {
2962                 printk(KERN_ERR "Allocating domain for %s failed",
2963                        dev_name(dev));
2964                 return NULL;
2965         }
2966
2967         /* make sure context mapping is ok */
2968         if (unlikely(!domain_context_mapped(dev))) {
2969                 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2970                 if (ret) {
2971                         printk(KERN_ERR "Domain context map for %s failed",
2972                                dev_name(dev));
2973                         return NULL;
2974                 }
2975         }
2976
2977         return domain;
2978 }
2979
2980 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2981 {
2982         struct device_domain_info *info;
2983
2984         /* No lock here, assumes no domain exit in normal case */
2985         info = dev->archdata.iommu;
2986         if (likely(info))
2987                 return info->domain;
2988
2989         return __get_valid_domain_for_dev(dev);
2990 }
2991
2992 /* Check if the dev needs to go through non-identity map and unmap process.*/
2993 static int iommu_no_mapping(struct device *dev)
2994 {
2995         int found;
2996
2997         if (iommu_dummy(dev))
2998                 return 1;
2999
3000         if (!iommu_identity_mapping)
3001                 return 0;
3002
3003         found = identity_mapping(dev);
3004         if (found) {
3005                 if (iommu_should_identity_map(dev, 0))
3006                         return 1;
3007                 else {
3008                         /*
3009                          * 32 bit DMA is removed from si_domain and fall back
3010                          * to non-identity mapping.
3011                          */
3012                         domain_remove_one_dev_info(si_domain, dev);
3013                         printk(KERN_INFO "32bit %s uses non-identity mapping\n",
3014                                dev_name(dev));
3015                         return 0;
3016                 }
3017         } else {
3018                 /*
3019                  * In case of a detached 64 bit DMA device from vm, the device
3020                  * is put into si_domain for identity mapping.
3021                  */
3022                 if (iommu_should_identity_map(dev, 0)) {
3023                         int ret;
3024                         ret = domain_add_dev_info(si_domain, dev,
3025                                                   hw_pass_through ?
3026                                                   CONTEXT_TT_PASS_THROUGH :
3027                                                   CONTEXT_TT_MULTI_LEVEL);
3028                         if (!ret) {
3029                                 printk(KERN_INFO "64bit %s uses identity mapping\n",
3030                                        dev_name(dev));
3031                                 return 1;
3032                         }
3033                 }
3034         }
3035
3036         return 0;
3037 }
3038
3039 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3040                                      size_t size, int dir, u64 dma_mask)
3041 {
3042         struct dmar_domain *domain;
3043         phys_addr_t start_paddr;
3044         struct iova *iova;
3045         int prot = 0;
3046         int ret;
3047         struct intel_iommu *iommu;
3048         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3049
3050         BUG_ON(dir == DMA_NONE);
3051
3052         if (iommu_no_mapping(dev))
3053                 return paddr;
3054
3055         domain = get_valid_domain_for_dev(dev);
3056         if (!domain)
3057                 return 0;
3058
3059         iommu = domain_get_iommu(domain);
3060         size = aligned_nrpages(paddr, size);
3061
3062         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3063         if (!iova)
3064                 goto error;
3065
3066         /*
3067          * Check if DMAR supports zero-length reads on write only
3068          * mappings..
3069          */
3070         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3071                         !cap_zlr(iommu->cap))
3072                 prot |= DMA_PTE_READ;
3073         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3074                 prot |= DMA_PTE_WRITE;
3075         /*
3076          * paddr - (paddr + size) might be partial page, we should map the whole
3077          * page.  Note: if two part of one page are separately mapped, we
3078          * might have two guest_addr mapping to the same host paddr, but this
3079          * is not a big problem
3080          */
3081         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3082                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3083         if (ret)
3084                 goto error;
3085
3086         /* it's a non-present to present mapping. Only flush if caching mode */
3087         if (cap_caching_mode(iommu->cap))
3088                 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3089         else
3090                 iommu_flush_write_buffer(iommu);
3091
3092         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3093         start_paddr += paddr & ~PAGE_MASK;
3094         return start_paddr;
3095
3096 error:
3097         if (iova)
3098                 __free_iova(&domain->iovad, iova);
3099         printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3100                 dev_name(dev), size, (unsigned long long)paddr, dir);
3101         return 0;
3102 }
3103
3104 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3105                                  unsigned long offset, size_t size,
3106                                  enum dma_data_direction dir,
3107                                  struct dma_attrs *attrs)
3108 {
3109         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3110                                   dir, *dev->dma_mask);
3111 }
3112
3113 static void flush_unmaps(void)
3114 {
3115         int i, j;
3116
3117         timer_on = 0;
3118
3119         /* just flush them all */
3120         for (i = 0; i < g_num_of_iommus; i++) {
3121                 struct intel_iommu *iommu = g_iommus[i];
3122                 if (!iommu)
3123                         continue;
3124
3125                 if (!deferred_flush[i].next)
3126                         continue;
3127
3128                 /* In caching mode, global flushes turn emulation expensive */
3129                 if (!cap_caching_mode(iommu->cap))
3130                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3131                                          DMA_TLB_GLOBAL_FLUSH);
3132                 for (j = 0; j < deferred_flush[i].next; j++) {
3133                         unsigned long mask;
3134                         struct iova *iova = deferred_flush[i].iova[j];
3135                         struct dmar_domain *domain = deferred_flush[i].domain[j];
3136
3137                         /* On real hardware multiple invalidations are expensive */
3138                         if (cap_caching_mode(iommu->cap))
3139                                 iommu_flush_iotlb_psi(iommu, domain->id,
3140                                         iova->pfn_lo, iova_size(iova),
3141                                         !deferred_flush[i].freelist[j], 0);
3142                         else {
3143                                 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3144                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3145                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3146                         }
3147                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3148                         if (deferred_flush[i].freelist[j])
3149                                 dma_free_pagelist(deferred_flush[i].freelist[j]);
3150                 }
3151                 deferred_flush[i].next = 0;
3152         }
3153
3154         list_size = 0;
3155 }
3156
3157 static void flush_unmaps_timeout(unsigned long data)
3158 {
3159         unsigned long flags;
3160
3161         spin_lock_irqsave(&async_umap_flush_lock, flags);
3162         flush_unmaps();
3163         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3164 }
3165
3166 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3167 {
3168         unsigned long flags;
3169         int next, iommu_id;
3170         struct intel_iommu *iommu;
3171
3172         spin_lock_irqsave(&async_umap_flush_lock, flags);
3173         if (list_size == HIGH_WATER_MARK)
3174                 flush_unmaps();
3175
3176         iommu = domain_get_iommu(dom);
3177         iommu_id = iommu->seq_id;
3178
3179         next = deferred_flush[iommu_id].next;
3180         deferred_flush[iommu_id].domain[next] = dom;
3181         deferred_flush[iommu_id].iova[next] = iova;
3182         deferred_flush[iommu_id].freelist[next] = freelist;
3183         deferred_flush[iommu_id].next++;
3184
3185         if (!timer_on) {
3186                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3187                 timer_on = 1;
3188         }
3189         list_size++;
3190         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3191 }
3192
3193 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3194 {
3195         struct dmar_domain *domain;
3196         unsigned long start_pfn, last_pfn;
3197         struct iova *iova;
3198         struct intel_iommu *iommu;
3199         struct page *freelist;
3200
3201         if (iommu_no_mapping(dev))
3202                 return;
3203
3204         domain = find_domain(dev);
3205         BUG_ON(!domain);
3206
3207         iommu = domain_get_iommu(domain);
3208
3209         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3210         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3211                       (unsigned long long)dev_addr))
3212                 return;
3213
3214         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3215         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3216
3217         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3218                  dev_name(dev), start_pfn, last_pfn);
3219
3220         freelist = domain_unmap(domain, start_pfn, last_pfn);
3221
3222         if (intel_iommu_strict) {
3223                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3224                                       last_pfn - start_pfn + 1, !freelist, 0);
3225                 /* free iova */
3226                 __free_iova(&domain->iovad, iova);
3227                 dma_free_pagelist(freelist);
3228         } else {
3229                 add_unmap(domain, iova, freelist);
3230                 /*
3231                  * queue up the release of the unmap to save the 1/6th of the
3232                  * cpu used up by the iotlb flush operation...
3233                  */
3234         }
3235 }
3236
3237 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3238                              size_t size, enum dma_data_direction dir,
3239                              struct dma_attrs *attrs)
3240 {
3241         intel_unmap(dev, dev_addr);
3242 }
3243
3244 static void *intel_alloc_coherent(struct device *dev, size_t size,
3245                                   dma_addr_t *dma_handle, gfp_t flags,
3246                                   struct dma_attrs *attrs)
3247 {
3248         struct page *page = NULL;
3249         int order;
3250
3251         size = PAGE_ALIGN(size);
3252         order = get_order(size);
3253
3254         if (!iommu_no_mapping(dev))
3255                 flags &= ~(GFP_DMA | GFP_DMA32);
3256         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3257                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3258                         flags |= GFP_DMA;
3259                 else
3260                         flags |= GFP_DMA32;
3261         }
3262
3263         if (flags & __GFP_WAIT) {
3264                 unsigned int count = size >> PAGE_SHIFT;
3265
3266                 page = dma_alloc_from_contiguous(dev, count, order);
3267                 if (page && iommu_no_mapping(dev) &&
3268                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3269                         dma_release_from_contiguous(dev, page, count);
3270                         page = NULL;
3271                 }
3272         }
3273
3274         if (!page)
3275                 page = alloc_pages(flags, order);
3276         if (!page)
3277                 return NULL;
3278         memset(page_address(page), 0, size);
3279
3280         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3281                                          DMA_BIDIRECTIONAL,
3282                                          dev->coherent_dma_mask);
3283         if (*dma_handle)
3284                 return page_address(page);
3285         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3286                 __free_pages(page, order);
3287
3288         return NULL;
3289 }
3290
3291 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3292                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3293 {
3294         int order;
3295         struct page *page = virt_to_page(vaddr);
3296
3297         size = PAGE_ALIGN(size);
3298         order = get_order(size);
3299
3300         intel_unmap(dev, dma_handle);
3301         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3302                 __free_pages(page, order);
3303 }
3304
3305 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3306                            int nelems, enum dma_data_direction dir,
3307                            struct dma_attrs *attrs)
3308 {
3309         intel_unmap(dev, sglist[0].dma_address);
3310 }
3311
3312 static int intel_nontranslate_map_sg(struct device *hddev,
3313         struct scatterlist *sglist, int nelems, int dir)
3314 {
3315         int i;
3316         struct scatterlist *sg;
3317
3318         for_each_sg(sglist, sg, nelems, i) {
3319                 BUG_ON(!sg_page(sg));
3320                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3321                 sg->dma_length = sg->length;
3322         }
3323         return nelems;
3324 }
3325
3326 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3327                         enum dma_data_direction dir, struct dma_attrs *attrs)
3328 {
3329         int i;
3330         struct dmar_domain *domain;
3331         size_t size = 0;
3332         int prot = 0;
3333         struct iova *iova = NULL;
3334         int ret;
3335         struct scatterlist *sg;
3336         unsigned long start_vpfn;
3337         struct intel_iommu *iommu;
3338
3339         BUG_ON(dir == DMA_NONE);
3340         if (iommu_no_mapping(dev))
3341                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3342
3343         domain = get_valid_domain_for_dev(dev);
3344         if (!domain)
3345                 return 0;
3346
3347         iommu = domain_get_iommu(domain);
3348
3349         for_each_sg(sglist, sg, nelems, i)
3350                 size += aligned_nrpages(sg->offset, sg->length);
3351
3352         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3353                                 *dev->dma_mask);
3354         if (!iova) {
3355                 sglist->dma_length = 0;
3356                 return 0;
3357         }
3358
3359         /*
3360          * Check if DMAR supports zero-length reads on write only
3361          * mappings..
3362          */
3363         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3364                         !cap_zlr(iommu->cap))
3365                 prot |= DMA_PTE_READ;
3366         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3367                 prot |= DMA_PTE_WRITE;
3368
3369         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3370
3371         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3372         if (unlikely(ret)) {
3373                 dma_pte_free_pagetable(domain, start_vpfn,
3374                                        start_vpfn + size - 1);
3375                 __free_iova(&domain->iovad, iova);
3376                 return 0;
3377         }
3378
3379         /* it's a non-present to present mapping. Only flush if caching mode */
3380         if (cap_caching_mode(iommu->cap))
3381                 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3382         else
3383                 iommu_flush_write_buffer(iommu);
3384
3385         return nelems;
3386 }
3387
3388 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3389 {
3390         return !dma_addr;
3391 }
3392
3393 struct dma_map_ops intel_dma_ops = {
3394         .alloc = intel_alloc_coherent,
3395         .free = intel_free_coherent,
3396         .map_sg = intel_map_sg,
3397         .unmap_sg = intel_unmap_sg,
3398         .map_page = intel_map_page,
3399         .unmap_page = intel_unmap_page,
3400         .mapping_error = intel_mapping_error,
3401 };
3402
3403 static inline int iommu_domain_cache_init(void)
3404 {
3405         int ret = 0;
3406
3407         iommu_domain_cache = kmem_cache_create("iommu_domain",
3408                                          sizeof(struct dmar_domain),
3409                                          0,
3410                                          SLAB_HWCACHE_ALIGN,
3411
3412                                          NULL);
3413         if (!iommu_domain_cache) {
3414                 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3415                 ret = -ENOMEM;
3416         }
3417
3418         return ret;
3419 }
3420
3421 static inline int iommu_devinfo_cache_init(void)
3422 {
3423         int ret = 0;
3424
3425         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3426                                          sizeof(struct device_domain_info),
3427                                          0,
3428                                          SLAB_HWCACHE_ALIGN,
3429                                          NULL);
3430         if (!iommu_devinfo_cache) {
3431                 printk(KERN_ERR "Couldn't create devinfo cache\n");
3432                 ret = -ENOMEM;
3433         }
3434
3435         return ret;
3436 }
3437
3438 static int __init iommu_init_mempool(void)
3439 {
3440         int ret;
3441         ret = iommu_iova_cache_init();
3442         if (ret)
3443                 return ret;
3444
3445         ret = iommu_domain_cache_init();
3446         if (ret)
3447                 goto domain_error;
3448
3449         ret = iommu_devinfo_cache_init();
3450         if (!ret)
3451                 return ret;
3452
3453         kmem_cache_destroy(iommu_domain_cache);
3454 domain_error:
3455         iommu_iova_cache_destroy();
3456
3457         return -ENOMEM;
3458 }
3459
3460 static void __init iommu_exit_mempool(void)
3461 {
3462         kmem_cache_destroy(iommu_devinfo_cache);
3463         kmem_cache_destroy(iommu_domain_cache);
3464         iommu_iova_cache_destroy();
3465 }
3466
3467 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3468 {
3469         struct dmar_drhd_unit *drhd;
3470         u32 vtbar;
3471         int rc;
3472
3473         /* We know that this device on this chipset has its own IOMMU.
3474          * If we find it under a different IOMMU, then the BIOS is lying
3475          * to us. Hope that the IOMMU for this device is actually
3476          * disabled, and it needs no translation...
3477          */
3478         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3479         if (rc) {
3480                 /* "can't" happen */
3481                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3482                 return;
3483         }
3484         vtbar &= 0xffff0000;
3485
3486         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3487         drhd = dmar_find_matched_drhd_unit(pdev);
3488         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3489                             TAINT_FIRMWARE_WORKAROUND,
3490                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3491                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3492 }
3493 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3494
3495 static void __init init_no_remapping_devices(void)
3496 {
3497         struct dmar_drhd_unit *drhd;
3498         struct device *dev;
3499         int i;
3500
3501         for_each_drhd_unit(drhd) {
3502                 if (!drhd->include_all) {
3503                         for_each_active_dev_scope(drhd->devices,
3504                                                   drhd->devices_cnt, i, dev)
3505                                 break;
3506                         /* ignore DMAR unit if no devices exist */
3507                         if (i == drhd->devices_cnt)
3508                                 drhd->ignored = 1;
3509                 }
3510         }
3511
3512         for_each_active_drhd_unit(drhd) {
3513                 if (drhd->include_all)
3514                         continue;
3515
3516                 for_each_active_dev_scope(drhd->devices,
3517                                           drhd->devices_cnt, i, dev)
3518                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3519                                 break;
3520                 if (i < drhd->devices_cnt)
3521                         continue;
3522
3523                 /* This IOMMU has *only* gfx devices. Either bypass it or
3524                    set the gfx_mapped flag, as appropriate */
3525                 if (dmar_map_gfx) {
3526                         intel_iommu_gfx_mapped = 1;
3527                 } else {
3528                         drhd->ignored = 1;
3529                         for_each_active_dev_scope(drhd->devices,
3530                                                   drhd->devices_cnt, i, dev)
3531                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3532                 }
3533         }
3534 }
3535
3536 #ifdef CONFIG_SUSPEND
3537 static int init_iommu_hw(void)
3538 {
3539         struct dmar_drhd_unit *drhd;
3540         struct intel_iommu *iommu = NULL;
3541
3542         for_each_active_iommu(iommu, drhd)
3543                 if (iommu->qi)
3544                         dmar_reenable_qi(iommu);
3545
3546         for_each_iommu(iommu, drhd) {
3547                 if (drhd->ignored) {
3548                         /*
3549                          * we always have to disable PMRs or DMA may fail on
3550                          * this device
3551                          */
3552                         if (force_on)
3553                                 iommu_disable_protect_mem_regions(iommu);
3554                         continue;
3555                 }
3556         
3557                 iommu_flush_write_buffer(iommu);
3558
3559                 iommu_set_root_entry(iommu);
3560
3561                 iommu->flush.flush_context(iommu, 0, 0, 0,
3562                                            DMA_CCMD_GLOBAL_INVL);
3563                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3564                 iommu_enable_translation(iommu);
3565                 iommu_disable_protect_mem_regions(iommu);
3566         }
3567
3568         return 0;
3569 }
3570
3571 static void iommu_flush_all(void)
3572 {
3573         struct dmar_drhd_unit *drhd;
3574         struct intel_iommu *iommu;
3575
3576         for_each_active_iommu(iommu, drhd) {
3577                 iommu->flush.flush_context(iommu, 0, 0, 0,
3578                                            DMA_CCMD_GLOBAL_INVL);
3579                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3580                                          DMA_TLB_GLOBAL_FLUSH);
3581         }
3582 }
3583
3584 static int iommu_suspend(void)
3585 {
3586         struct dmar_drhd_unit *drhd;
3587         struct intel_iommu *iommu = NULL;
3588         unsigned long flag;
3589
3590         for_each_active_iommu(iommu, drhd) {
3591                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3592                                                  GFP_ATOMIC);
3593                 if (!iommu->iommu_state)
3594                         goto nomem;
3595         }
3596
3597         iommu_flush_all();
3598
3599         for_each_active_iommu(iommu, drhd) {
3600                 iommu_disable_translation(iommu);
3601
3602                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3603
3604                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3605                         readl(iommu->reg + DMAR_FECTL_REG);
3606                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3607                         readl(iommu->reg + DMAR_FEDATA_REG);
3608                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3609                         readl(iommu->reg + DMAR_FEADDR_REG);
3610                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3611                         readl(iommu->reg + DMAR_FEUADDR_REG);
3612
3613                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3614         }
3615         return 0;
3616
3617 nomem:
3618         for_each_active_iommu(iommu, drhd)
3619                 kfree(iommu->iommu_state);
3620
3621         return -ENOMEM;
3622 }
3623
3624 static void iommu_resume(void)
3625 {
3626         struct dmar_drhd_unit *drhd;
3627         struct intel_iommu *iommu = NULL;
3628         unsigned long flag;
3629
3630         if (init_iommu_hw()) {
3631                 if (force_on)
3632                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3633                 else
3634                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3635                 return;
3636         }
3637
3638         for_each_active_iommu(iommu, drhd) {
3639
3640                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3641
3642                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3643                         iommu->reg + DMAR_FECTL_REG);
3644                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3645                         iommu->reg + DMAR_FEDATA_REG);
3646                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3647                         iommu->reg + DMAR_FEADDR_REG);
3648                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3649                         iommu->reg + DMAR_FEUADDR_REG);
3650
3651                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3652         }
3653
3654         for_each_active_iommu(iommu, drhd)
3655                 kfree(iommu->iommu_state);
3656 }
3657
3658 static struct syscore_ops iommu_syscore_ops = {
3659         .resume         = iommu_resume,
3660         .suspend        = iommu_suspend,
3661 };
3662
3663 static void __init init_iommu_pm_ops(void)
3664 {
3665         register_syscore_ops(&iommu_syscore_ops);
3666 }
3667
3668 #else
3669 static inline void init_iommu_pm_ops(void) {}
3670 #endif  /* CONFIG_PM */
3671
3672
3673 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3674 {
3675         struct acpi_dmar_reserved_memory *rmrr;
3676         struct dmar_rmrr_unit *rmrru;
3677
3678         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3679         if (!rmrru)
3680                 return -ENOMEM;
3681
3682         rmrru->hdr = header;
3683         rmrr = (struct acpi_dmar_reserved_memory *)header;
3684         rmrru->base_address = rmrr->base_address;
3685         rmrru->end_address = rmrr->end_address;
3686         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3687                                 ((void *)rmrr) + rmrr->header.length,
3688                                 &rmrru->devices_cnt);
3689         if (rmrru->devices_cnt && rmrru->devices == NULL) {
3690                 kfree(rmrru);
3691                 return -ENOMEM;
3692         }
3693
3694         list_add(&rmrru->list, &dmar_rmrr_units);
3695
3696         return 0;
3697 }
3698
3699 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3700 {
3701         struct dmar_atsr_unit *atsru;
3702         struct acpi_dmar_atsr *tmp;
3703
3704         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3705                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3706                 if (atsr->segment != tmp->segment)
3707                         continue;
3708                 if (atsr->header.length != tmp->header.length)
3709                         continue;
3710                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3711                         return atsru;
3712         }
3713
3714         return NULL;
3715 }
3716
3717 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3718 {
3719         struct acpi_dmar_atsr *atsr;
3720         struct dmar_atsr_unit *atsru;
3721
3722         if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3723                 return 0;
3724
3725         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3726         atsru = dmar_find_atsr(atsr);
3727         if (atsru)
3728                 return 0;
3729
3730         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3731         if (!atsru)
3732                 return -ENOMEM;
3733
3734         /*
3735          * If memory is allocated from slab by ACPI _DSM method, we need to
3736          * copy the memory content because the memory buffer will be freed
3737          * on return.
3738          */
3739         atsru->hdr = (void *)(atsru + 1);
3740         memcpy(atsru->hdr, hdr, hdr->length);
3741         atsru->include_all = atsr->flags & 0x1;
3742         if (!atsru->include_all) {
3743                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3744                                 (void *)atsr + atsr->header.length,
3745                                 &atsru->devices_cnt);
3746                 if (atsru->devices_cnt && atsru->devices == NULL) {
3747                         kfree(atsru);
3748                         return -ENOMEM;
3749                 }
3750         }
3751
3752         list_add_rcu(&atsru->list, &dmar_atsr_units);
3753
3754         return 0;
3755 }
3756
3757 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3758 {
3759         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3760         kfree(atsru);
3761 }
3762
3763 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3764 {
3765         struct acpi_dmar_atsr *atsr;
3766         struct dmar_atsr_unit *atsru;
3767
3768         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3769         atsru = dmar_find_atsr(atsr);
3770         if (atsru) {
3771                 list_del_rcu(&atsru->list);
3772                 synchronize_rcu();
3773                 intel_iommu_free_atsr(atsru);
3774         }
3775
3776         return 0;
3777 }
3778
3779 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3780 {
3781         int i;
3782         struct device *dev;
3783         struct acpi_dmar_atsr *atsr;
3784         struct dmar_atsr_unit *atsru;
3785
3786         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3787         atsru = dmar_find_atsr(atsr);
3788         if (!atsru)
3789                 return 0;
3790
3791         if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3792                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3793                                           i, dev)
3794                         return -EBUSY;
3795
3796         return 0;
3797 }
3798
3799 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3800 {
3801         int sp, ret = 0;
3802         struct intel_iommu *iommu = dmaru->iommu;
3803
3804         if (g_iommus[iommu->seq_id])
3805                 return 0;
3806
3807         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3808                 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3809                         iommu->name);
3810                 return -ENXIO;
3811         }
3812         if (!ecap_sc_support(iommu->ecap) &&
3813             domain_update_iommu_snooping(iommu)) {
3814                 pr_warn("IOMMU: %s doesn't support snooping.\n",
3815                         iommu->name);
3816                 return -ENXIO;
3817         }
3818         sp = domain_update_iommu_superpage(iommu) - 1;
3819         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3820                 pr_warn("IOMMU: %s doesn't support large page.\n",
3821                         iommu->name);
3822                 return -ENXIO;
3823         }
3824
3825         /*
3826          * Disable translation if already enabled prior to OS handover.
3827          */
3828         if (iommu->gcmd & DMA_GCMD_TE)
3829                 iommu_disable_translation(iommu);
3830
3831         g_iommus[iommu->seq_id] = iommu;
3832         ret = iommu_init_domains(iommu);
3833         if (ret == 0)
3834                 ret = iommu_alloc_root_entry(iommu);
3835         if (ret)
3836                 goto out;
3837
3838         if (dmaru->ignored) {
3839                 /*
3840                  * we always have to disable PMRs or DMA may fail on this device
3841                  */
3842                 if (force_on)
3843                         iommu_disable_protect_mem_regions(iommu);
3844                 return 0;
3845         }
3846
3847         intel_iommu_init_qi(iommu);
3848         iommu_flush_write_buffer(iommu);
3849         ret = dmar_set_interrupt(iommu);
3850         if (ret)
3851                 goto disable_iommu;
3852
3853         iommu_set_root_entry(iommu);
3854         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3855         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3856         iommu_enable_translation(iommu);
3857
3858         if (si_domain) {
3859                 ret = iommu_attach_domain(si_domain, iommu);
3860                 if (ret < 0 || si_domain->id != ret)
3861                         goto disable_iommu;
3862                 domain_attach_iommu(si_domain, iommu);
3863         }
3864
3865         iommu_disable_protect_mem_regions(iommu);
3866         return 0;
3867
3868 disable_iommu:
3869         disable_dmar_iommu(iommu);
3870 out:
3871         free_dmar_iommu(iommu);
3872         return ret;
3873 }
3874
3875 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3876 {
3877         int ret = 0;
3878         struct intel_iommu *iommu = dmaru->iommu;
3879
3880         if (!intel_iommu_enabled)
3881                 return 0;
3882         if (iommu == NULL)
3883                 return -EINVAL;
3884
3885         if (insert) {
3886                 ret = intel_iommu_add(dmaru);
3887         } else {
3888                 disable_dmar_iommu(iommu);
3889                 free_dmar_iommu(iommu);
3890         }
3891
3892         return ret;
3893 }
3894
3895 static void intel_iommu_free_dmars(void)
3896 {
3897         struct dmar_rmrr_unit *rmrru, *rmrr_n;
3898         struct dmar_atsr_unit *atsru, *atsr_n;
3899
3900         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3901                 list_del(&rmrru->list);
3902                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3903                 kfree(rmrru);
3904         }
3905
3906         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3907                 list_del(&atsru->list);
3908                 intel_iommu_free_atsr(atsru);
3909         }
3910 }
3911
3912 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3913 {
3914         int i, ret = 1;
3915         struct pci_bus *bus;
3916         struct pci_dev *bridge = NULL;
3917         struct device *tmp;
3918         struct acpi_dmar_atsr *atsr;
3919         struct dmar_atsr_unit *atsru;
3920
3921         dev = pci_physfn(dev);
3922         for (bus = dev->bus; bus; bus = bus->parent) {
3923                 bridge = bus->self;
3924                 if (!bridge || !pci_is_pcie(bridge) ||
3925                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3926                         return 0;
3927                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3928                         break;
3929         }
3930         if (!bridge)
3931                 return 0;
3932
3933         rcu_read_lock();
3934         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3935                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3936                 if (atsr->segment != pci_domain_nr(dev->bus))
3937                         continue;
3938
3939                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3940                         if (tmp == &bridge->dev)
3941                                 goto out;
3942
3943                 if (atsru->include_all)
3944                         goto out;
3945         }
3946         ret = 0;
3947 out:
3948         rcu_read_unlock();
3949
3950         return ret;
3951 }
3952
3953 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3954 {
3955         int ret = 0;
3956         struct dmar_rmrr_unit *rmrru;
3957         struct dmar_atsr_unit *atsru;
3958         struct acpi_dmar_atsr *atsr;
3959         struct acpi_dmar_reserved_memory *rmrr;
3960
3961         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3962                 return 0;
3963
3964         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3965                 rmrr = container_of(rmrru->hdr,
3966                                     struct acpi_dmar_reserved_memory, header);
3967                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3968                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3969                                 ((void *)rmrr) + rmrr->header.length,
3970                                 rmrr->segment, rmrru->devices,
3971                                 rmrru->devices_cnt);
3972                         if(ret < 0)
3973                                 return ret;
3974                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3975                         dmar_remove_dev_scope(info, rmrr->segment,
3976                                 rmrru->devices, rmrru->devices_cnt);
3977                 }
3978         }
3979
3980         list_for_each_entry(atsru, &dmar_atsr_units, list) {
3981                 if (atsru->include_all)
3982                         continue;
3983
3984                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3985                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3986                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3987                                         (void *)atsr + atsr->header.length,
3988                                         atsr->segment, atsru->devices,
3989                                         atsru->devices_cnt);
3990                         if (ret > 0)
3991                                 break;
3992                         else if(ret < 0)
3993                                 return ret;
3994                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3995                         if (dmar_remove_dev_scope(info, atsr->segment,
3996                                         atsru->devices, atsru->devices_cnt))
3997                                 break;
3998                 }
3999         }
4000
4001         return 0;
4002 }
4003
4004 /*
4005  * Here we only respond to action of unbound device from driver.
4006  *
4007  * Added device is not attached to its DMAR domain here yet. That will happen
4008  * when mapping the device to iova.
4009  */
4010 static int device_notifier(struct notifier_block *nb,
4011                                   unsigned long action, void *data)
4012 {
4013         struct device *dev = data;
4014         struct dmar_domain *domain;
4015
4016         if (iommu_dummy(dev))
4017                 return 0;
4018
4019         if (action != BUS_NOTIFY_REMOVED_DEVICE)
4020                 return 0;
4021
4022         domain = find_domain(dev);
4023         if (!domain)
4024                 return 0;
4025
4026         down_read(&dmar_global_lock);
4027         domain_remove_one_dev_info(domain, dev);
4028         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4029                 domain_exit(domain);
4030         up_read(&dmar_global_lock);
4031
4032         return 0;
4033 }
4034
4035 static struct notifier_block device_nb = {
4036         .notifier_call = device_notifier,
4037 };
4038
4039 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4040                                        unsigned long val, void *v)
4041 {
4042         struct memory_notify *mhp = v;
4043         unsigned long long start, end;
4044         unsigned long start_vpfn, last_vpfn;
4045
4046         switch (val) {
4047         case MEM_GOING_ONLINE:
4048                 start = mhp->start_pfn << PAGE_SHIFT;
4049                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4050                 if (iommu_domain_identity_map(si_domain, start, end)) {
4051                         pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4052                                 start, end);
4053                         return NOTIFY_BAD;
4054                 }
4055                 break;
4056
4057         case MEM_OFFLINE:
4058         case MEM_CANCEL_ONLINE:
4059                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4060                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4061                 while (start_vpfn <= last_vpfn) {
4062                         struct iova *iova;
4063                         struct dmar_drhd_unit *drhd;
4064                         struct intel_iommu *iommu;
4065                         struct page *freelist;
4066
4067                         iova = find_iova(&si_domain->iovad, start_vpfn);
4068                         if (iova == NULL) {
4069                                 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4070                                          start_vpfn);
4071                                 break;
4072                         }
4073
4074                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4075                                                      start_vpfn, last_vpfn);
4076                         if (iova == NULL) {
4077                                 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4078                                         start_vpfn, last_vpfn);
4079                                 return NOTIFY_BAD;
4080                         }
4081
4082                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4083                                                iova->pfn_hi);
4084
4085                         rcu_read_lock();
4086                         for_each_active_iommu(iommu, drhd)
4087                                 iommu_flush_iotlb_psi(iommu, si_domain->id,
4088                                         iova->pfn_lo, iova_size(iova),
4089                                         !freelist, 0);
4090                         rcu_read_unlock();
4091                         dma_free_pagelist(freelist);
4092
4093                         start_vpfn = iova->pfn_hi + 1;
4094                         free_iova_mem(iova);
4095                 }
4096                 break;
4097         }
4098
4099         return NOTIFY_OK;
4100 }
4101
4102 static struct notifier_block intel_iommu_memory_nb = {
4103         .notifier_call = intel_iommu_memory_notifier,
4104         .priority = 0
4105 };
4106
4107
4108 static ssize_t intel_iommu_show_version(struct device *dev,
4109                                         struct device_attribute *attr,
4110                                         char *buf)
4111 {
4112         struct intel_iommu *iommu = dev_get_drvdata(dev);
4113         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4114         return sprintf(buf, "%d:%d\n",
4115                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4116 }
4117 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4118
4119 static ssize_t intel_iommu_show_address(struct device *dev,
4120                                         struct device_attribute *attr,
4121                                         char *buf)
4122 {
4123         struct intel_iommu *iommu = dev_get_drvdata(dev);
4124         return sprintf(buf, "%llx\n", iommu->reg_phys);
4125 }
4126 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4127
4128 static ssize_t intel_iommu_show_cap(struct device *dev,
4129                                     struct device_attribute *attr,
4130                                     char *buf)
4131 {
4132         struct intel_iommu *iommu = dev_get_drvdata(dev);
4133         return sprintf(buf, "%llx\n", iommu->cap);
4134 }
4135 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4136
4137 static ssize_t intel_iommu_show_ecap(struct device *dev,
4138                                     struct device_attribute *attr,
4139                                     char *buf)
4140 {
4141         struct intel_iommu *iommu = dev_get_drvdata(dev);
4142         return sprintf(buf, "%llx\n", iommu->ecap);
4143 }
4144 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4145
4146 static struct attribute *intel_iommu_attrs[] = {
4147         &dev_attr_version.attr,
4148         &dev_attr_address.attr,
4149         &dev_attr_cap.attr,
4150         &dev_attr_ecap.attr,
4151         NULL,
4152 };
4153
4154 static struct attribute_group intel_iommu_group = {
4155         .name = "intel-iommu",
4156         .attrs = intel_iommu_attrs,
4157 };
4158
4159 const struct attribute_group *intel_iommu_groups[] = {
4160         &intel_iommu_group,
4161         NULL,
4162 };
4163
4164 int __init intel_iommu_init(void)
4165 {
4166         int ret = -ENODEV;
4167         struct dmar_drhd_unit *drhd;
4168         struct intel_iommu *iommu;
4169
4170         /* VT-d is required for a TXT/tboot launch, so enforce that */
4171         force_on = tboot_force_iommu();
4172
4173         if (iommu_init_mempool()) {
4174                 if (force_on)
4175                         panic("tboot: Failed to initialize iommu memory\n");
4176                 return -ENOMEM;
4177         }
4178
4179         down_write(&dmar_global_lock);
4180         if (dmar_table_init()) {
4181                 if (force_on)
4182                         panic("tboot: Failed to initialize DMAR table\n");
4183                 goto out_free_dmar;
4184         }
4185
4186         /*
4187          * Disable translation if already enabled prior to OS handover.
4188          */
4189         for_each_active_iommu(iommu, drhd)
4190                 if (iommu->gcmd & DMA_GCMD_TE)
4191                         iommu_disable_translation(iommu);
4192
4193         if (dmar_dev_scope_init() < 0) {
4194                 if (force_on)
4195                         panic("tboot: Failed to initialize DMAR device scope\n");
4196                 goto out_free_dmar;
4197         }
4198
4199         if (no_iommu || dmar_disabled)
4200                 goto out_free_dmar;
4201
4202         if (list_empty(&dmar_rmrr_units))
4203                 printk(KERN_INFO "DMAR: No RMRR found\n");
4204
4205         if (list_empty(&dmar_atsr_units))
4206                 printk(KERN_INFO "DMAR: No ATSR found\n");
4207
4208         if (dmar_init_reserved_ranges()) {
4209                 if (force_on)
4210                         panic("tboot: Failed to reserve iommu ranges\n");
4211                 goto out_free_reserved_range;
4212         }
4213
4214         init_no_remapping_devices();
4215
4216         ret = init_dmars();
4217         if (ret) {
4218                 if (force_on)
4219                         panic("tboot: Failed to initialize DMARs\n");
4220                 printk(KERN_ERR "IOMMU: dmar init failed\n");
4221                 goto out_free_reserved_range;
4222         }
4223         up_write(&dmar_global_lock);
4224         printk(KERN_INFO
4225         "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4226
4227         init_timer(&unmap_timer);
4228 #ifdef CONFIG_SWIOTLB
4229         swiotlb = 0;
4230 #endif
4231         dma_ops = &intel_dma_ops;
4232
4233         init_iommu_pm_ops();
4234
4235         for_each_active_iommu(iommu, drhd)
4236                 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4237                                                        intel_iommu_groups,
4238                                                        iommu->name);
4239
4240         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4241         bus_register_notifier(&pci_bus_type, &device_nb);
4242         if (si_domain && !hw_pass_through)
4243                 register_memory_notifier(&intel_iommu_memory_nb);
4244
4245         intel_iommu_enabled = 1;
4246
4247         return 0;
4248
4249 out_free_reserved_range:
4250         put_iova_domain(&reserved_iova_list);
4251 out_free_dmar:
4252         intel_iommu_free_dmars();
4253         up_write(&dmar_global_lock);
4254         iommu_exit_mempool();
4255         return ret;
4256 }
4257
4258 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4259 {
4260         struct intel_iommu *iommu = opaque;
4261
4262         iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4263         return 0;
4264 }
4265
4266 /*
4267  * NB - intel-iommu lacks any sort of reference counting for the users of
4268  * dependent devices.  If multiple endpoints have intersecting dependent
4269  * devices, unbinding the driver from any one of them will possibly leave
4270  * the others unable to operate.
4271  */
4272 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4273                                            struct device *dev)
4274 {
4275         if (!iommu || !dev || !dev_is_pci(dev))
4276                 return;
4277
4278         pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4279 }
4280
4281 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4282                                        struct device *dev)
4283 {
4284         struct device_domain_info *info, *tmp;
4285         struct intel_iommu *iommu;
4286         unsigned long flags;
4287         bool found = false;
4288         u8 bus, devfn;
4289
4290         iommu = device_to_iommu(dev, &bus, &devfn);
4291         if (!iommu)
4292                 return;
4293
4294         spin_lock_irqsave(&device_domain_lock, flags);
4295         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4296                 if (info->iommu == iommu && info->bus == bus &&
4297                     info->devfn == devfn) {
4298                         unlink_domain_info(info);
4299                         spin_unlock_irqrestore(&device_domain_lock, flags);
4300
4301                         iommu_disable_dev_iotlb(info);
4302                         iommu_detach_dev(iommu, info->bus, info->devfn);
4303                         iommu_detach_dependent_devices(iommu, dev);
4304                         free_devinfo_mem(info);
4305
4306                         spin_lock_irqsave(&device_domain_lock, flags);
4307
4308                         if (found)
4309                                 break;
4310                         else
4311                                 continue;
4312                 }
4313
4314                 /* if there is no other devices under the same iommu
4315                  * owned by this domain, clear this iommu in iommu_bmp
4316                  * update iommu count and coherency
4317                  */
4318                 if (info->iommu == iommu)
4319                         found = true;
4320         }
4321
4322         spin_unlock_irqrestore(&device_domain_lock, flags);
4323
4324         if (found == 0) {
4325                 domain_detach_iommu(domain, iommu);
4326                 if (!domain_type_is_vm_or_si(domain))
4327                         iommu_detach_domain(domain, iommu);
4328         }
4329 }
4330
4331 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4332 {
4333         int adjust_width;
4334
4335         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4336                         DMA_32BIT_PFN);
4337         domain_reserve_special_ranges(domain);
4338
4339         /* calculate AGAW */
4340         domain->gaw = guest_width;
4341         adjust_width = guestwidth_to_adjustwidth(guest_width);
4342         domain->agaw = width_to_agaw(adjust_width);
4343
4344         domain->iommu_coherency = 0;
4345         domain->iommu_snooping = 0;
4346         domain->iommu_superpage = 0;
4347         domain->max_addr = 0;
4348
4349         /* always allocate the top pgd */
4350         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4351         if (!domain->pgd)
4352                 return -ENOMEM;
4353         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4354         return 0;
4355 }
4356
4357 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4358 {
4359         struct dmar_domain *dmar_domain;
4360         struct iommu_domain *domain;
4361
4362         if (type != IOMMU_DOMAIN_UNMANAGED)
4363                 return NULL;
4364
4365         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4366         if (!dmar_domain) {
4367                 printk(KERN_ERR
4368                         "intel_iommu_domain_init: dmar_domain == NULL\n");
4369                 return NULL;
4370         }
4371         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4372                 printk(KERN_ERR
4373                         "intel_iommu_domain_init() failed\n");
4374                 domain_exit(dmar_domain);
4375                 return NULL;
4376         }
4377         domain_update_iommu_cap(dmar_domain);
4378
4379         domain = &dmar_domain->domain;
4380         domain->geometry.aperture_start = 0;
4381         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4382         domain->geometry.force_aperture = true;
4383
4384         return domain;
4385 }
4386
4387 static void intel_iommu_domain_free(struct iommu_domain *domain)
4388 {
4389         domain_exit(to_dmar_domain(domain));
4390 }
4391
4392 static int intel_iommu_attach_device(struct iommu_domain *domain,
4393                                      struct device *dev)
4394 {
4395         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4396         struct intel_iommu *iommu;
4397         int addr_width;
4398         u8 bus, devfn;
4399
4400         if (device_is_rmrr_locked(dev)) {
4401                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4402                 return -EPERM;
4403         }
4404
4405         /* normally dev is not mapped */
4406         if (unlikely(domain_context_mapped(dev))) {
4407                 struct dmar_domain *old_domain;
4408
4409                 old_domain = find_domain(dev);
4410                 if (old_domain) {
4411                         if (domain_type_is_vm_or_si(dmar_domain))
4412                                 domain_remove_one_dev_info(old_domain, dev);
4413                         else
4414                                 domain_remove_dev_info(old_domain);
4415
4416                         if (!domain_type_is_vm_or_si(old_domain) &&
4417                              list_empty(&old_domain->devices))
4418                                 domain_exit(old_domain);
4419                 }
4420         }
4421
4422         iommu = device_to_iommu(dev, &bus, &devfn);
4423         if (!iommu)
4424                 return -ENODEV;
4425
4426         /* check if this iommu agaw is sufficient for max mapped address */
4427         addr_width = agaw_to_width(iommu->agaw);
4428         if (addr_width > cap_mgaw(iommu->cap))
4429                 addr_width = cap_mgaw(iommu->cap);
4430
4431         if (dmar_domain->max_addr > (1LL << addr_width)) {
4432                 printk(KERN_ERR "%s: iommu width (%d) is not "
4433                        "sufficient for the mapped address (%llx)\n",
4434                        __func__, addr_width, dmar_domain->max_addr);
4435                 return -EFAULT;
4436         }
4437         dmar_domain->gaw = addr_width;
4438
4439         /*
4440          * Knock out extra levels of page tables if necessary
4441          */
4442         while (iommu->agaw < dmar_domain->agaw) {
4443                 struct dma_pte *pte;
4444
4445                 pte = dmar_domain->pgd;
4446                 if (dma_pte_present(pte)) {
4447                         dmar_domain->pgd = (struct dma_pte *)
4448                                 phys_to_virt(dma_pte_addr(pte));
4449                         free_pgtable_page(pte);
4450                 }
4451                 dmar_domain->agaw--;
4452         }
4453
4454         return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
4455 }
4456
4457 static void intel_iommu_detach_device(struct iommu_domain *domain,
4458                                       struct device *dev)
4459 {
4460         domain_remove_one_dev_info(to_dmar_domain(domain), dev);
4461 }
4462
4463 static int intel_iommu_map(struct iommu_domain *domain,
4464                            unsigned long iova, phys_addr_t hpa,
4465                            size_t size, int iommu_prot)
4466 {
4467         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4468         u64 max_addr;
4469         int prot = 0;
4470         int ret;
4471
4472         if (iommu_prot & IOMMU_READ)
4473                 prot |= DMA_PTE_READ;
4474         if (iommu_prot & IOMMU_WRITE)
4475                 prot |= DMA_PTE_WRITE;
4476         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4477                 prot |= DMA_PTE_SNP;
4478
4479         max_addr = iova + size;
4480         if (dmar_domain->max_addr < max_addr) {
4481                 u64 end;
4482
4483                 /* check if minimum agaw is sufficient for mapped address */
4484                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4485                 if (end < max_addr) {
4486                         printk(KERN_ERR "%s: iommu width (%d) is not "
4487                                "sufficient for the mapped address (%llx)\n",
4488                                __func__, dmar_domain->gaw, max_addr);
4489                         return -EFAULT;
4490                 }
4491                 dmar_domain->max_addr = max_addr;
4492         }
4493         /* Round up size to next multiple of PAGE_SIZE, if it and
4494            the low bits of hpa would take us onto the next page */
4495         size = aligned_nrpages(hpa, size);
4496         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4497                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4498         return ret;
4499 }
4500
4501 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4502                                 unsigned long iova, size_t size)
4503 {
4504         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4505         struct page *freelist = NULL;
4506         struct intel_iommu *iommu;
4507         unsigned long start_pfn, last_pfn;
4508         unsigned int npages;
4509         int iommu_id, num, ndomains, level = 0;
4510
4511         /* Cope with horrid API which requires us to unmap more than the
4512            size argument if it happens to be a large-page mapping. */
4513         if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4514                 BUG();
4515
4516         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4517                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4518
4519         start_pfn = iova >> VTD_PAGE_SHIFT;
4520         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4521
4522         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4523
4524         npages = last_pfn - start_pfn + 1;
4525
4526         for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4527                iommu = g_iommus[iommu_id];
4528
4529                /*
4530                 * find bit position of dmar_domain
4531                 */
4532                ndomains = cap_ndoms(iommu->cap);
4533                for_each_set_bit(num, iommu->domain_ids, ndomains) {
4534                        if (iommu->domains[num] == dmar_domain)
4535                                iommu_flush_iotlb_psi(iommu, num, start_pfn,
4536                                                      npages, !freelist, 0);
4537                }
4538
4539         }
4540
4541         dma_free_pagelist(freelist);
4542
4543         if (dmar_domain->max_addr == iova + size)
4544                 dmar_domain->max_addr = iova;
4545
4546         return size;
4547 }
4548
4549 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4550                                             dma_addr_t iova)
4551 {
4552         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4553         struct dma_pte *pte;
4554         int level = 0;
4555         u64 phys = 0;
4556
4557         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4558         if (pte)
4559                 phys = dma_pte_addr(pte);
4560
4561         return phys;
4562 }
4563
4564 static bool intel_iommu_capable(enum iommu_cap cap)
4565 {
4566         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4567                 return domain_update_iommu_snooping(NULL) == 1;
4568         if (cap == IOMMU_CAP_INTR_REMAP)
4569                 return irq_remapping_enabled == 1;
4570
4571         return false;
4572 }
4573
4574 static int intel_iommu_add_device(struct device *dev)
4575 {
4576         struct intel_iommu *iommu;
4577         struct iommu_group *group;
4578         u8 bus, devfn;
4579
4580         iommu = device_to_iommu(dev, &bus, &devfn);
4581         if (!iommu)
4582                 return -ENODEV;
4583
4584         iommu_device_link(iommu->iommu_dev, dev);
4585
4586         group = iommu_group_get_for_dev(dev);
4587
4588         if (IS_ERR(group))
4589                 return PTR_ERR(group);
4590
4591         iommu_group_put(group);
4592         return 0;
4593 }
4594
4595 static void intel_iommu_remove_device(struct device *dev)
4596 {
4597         struct intel_iommu *iommu;
4598         u8 bus, devfn;
4599
4600         iommu = device_to_iommu(dev, &bus, &devfn);
4601         if (!iommu)
4602                 return;
4603
4604         iommu_group_remove_device(dev);
4605
4606         iommu_device_unlink(iommu->iommu_dev, dev);
4607 }
4608
4609 static const struct iommu_ops intel_iommu_ops = {
4610         .capable        = intel_iommu_capable,
4611         .domain_alloc   = intel_iommu_domain_alloc,
4612         .domain_free    = intel_iommu_domain_free,
4613         .attach_dev     = intel_iommu_attach_device,
4614         .detach_dev     = intel_iommu_detach_device,
4615         .map            = intel_iommu_map,
4616         .unmap          = intel_iommu_unmap,
4617         .map_sg         = default_iommu_map_sg,
4618         .iova_to_phys   = intel_iommu_iova_to_phys,
4619         .add_device     = intel_iommu_add_device,
4620         .remove_device  = intel_iommu_remove_device,
4621         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4622 };
4623
4624 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4625 {
4626         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4627         printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4628         dmar_map_gfx = 0;
4629 }
4630
4631 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4632 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4633 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4634 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4635 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4636 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4637 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4638
4639 static void quirk_iommu_rwbf(struct pci_dev *dev)
4640 {
4641         /*
4642          * Mobile 4 Series Chipset neglects to set RWBF capability,
4643          * but needs it. Same seems to hold for the desktop versions.
4644          */
4645         printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4646         rwbf_quirk = 1;
4647 }
4648
4649 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4650 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4651 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4652 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4653 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4654 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4655 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4656
4657 #define GGC 0x52
4658 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4659 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4660 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4661 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4662 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4663 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4664 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4665 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4666
4667 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4668 {
4669         unsigned short ggc;
4670
4671         if (pci_read_config_word(dev, GGC, &ggc))
4672                 return;
4673
4674         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4675                 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4676                 dmar_map_gfx = 0;
4677         } else if (dmar_map_gfx) {
4678                 /* we have to ensure the gfx device is idle before we flush */
4679                 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4680                 intel_iommu_strict = 1;
4681        }
4682 }
4683 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4684 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4685 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4686 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4687
4688 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4689    ISOCH DMAR unit for the Azalia sound device, but not give it any
4690    TLB entries, which causes it to deadlock. Check for that.  We do
4691    this in a function called from init_dmars(), instead of in a PCI
4692    quirk, because we don't want to print the obnoxious "BIOS broken"
4693    message if VT-d is actually disabled.
4694 */
4695 static void __init check_tylersburg_isoch(void)
4696 {
4697         struct pci_dev *pdev;
4698         uint32_t vtisochctrl;
4699
4700         /* If there's no Azalia in the system anyway, forget it. */
4701         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4702         if (!pdev)
4703                 return;
4704         pci_dev_put(pdev);
4705
4706         /* System Management Registers. Might be hidden, in which case
4707            we can't do the sanity check. But that's OK, because the
4708            known-broken BIOSes _don't_ actually hide it, so far. */
4709         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4710         if (!pdev)
4711                 return;
4712
4713         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4714                 pci_dev_put(pdev);
4715                 return;
4716         }
4717
4718         pci_dev_put(pdev);
4719
4720         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4721         if (vtisochctrl & 1)
4722                 return;
4723
4724         /* Drop all bits other than the number of TLB entries */
4725         vtisochctrl &= 0x1c;
4726
4727         /* If we have the recommended number of TLB entries (16), fine. */
4728         if (vtisochctrl == 0x10)
4729                 return;
4730
4731         /* Zero TLB entries? You get to ride the short bus to school. */
4732         if (!vtisochctrl) {
4733                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4734                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4735                      dmi_get_system_info(DMI_BIOS_VENDOR),
4736                      dmi_get_system_info(DMI_BIOS_VERSION),
4737                      dmi_get_system_info(DMI_PRODUCT_VERSION));
4738                 iommu_identity_mapping |= IDENTMAP_AZALIA;
4739                 return;
4740         }
4741         
4742         printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4743                vtisochctrl);
4744 }