Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/highmem.h>
35 #include <linux/pci.h>
36 #include <linux/interrupt.h>
37 #include <linux/kmod.h>
38 #include <linux/delay.h>
39 #include <linux/workqueue.h>
40 #include <linux/nmi.h>
41 #include <linux/acpi.h>
42 #include <linux/efi.h>
43 #include <linux/ioport.h>
44 #include <linux/list.h>
45 #include <linux/jiffies.h>
46 #include <linux/semaphore.h>
47
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50
51 #include "internal.h"
52
53 #define _COMPONENT              ACPI_OS_SERVICES
54 ACPI_MODULE_NAME("osl");
55
56 struct acpi_os_dpc {
57         acpi_osd_exec_callback function;
58         void *context;
59         struct work_struct work;
60 };
61
62 #ifdef CONFIG_ACPI_CUSTOM_DSDT
63 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
64 #endif
65
66 #ifdef ENABLE_DEBUGGER
67 #include <linux/kdb.h>
68
69 /* stuff for debugger support */
70 int acpi_in_debugger;
71 EXPORT_SYMBOL(acpi_in_debugger);
72
73 extern char line_buf[80];
74 #endif                          /*ENABLE_DEBUGGER */
75
76 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
77                                       u32 pm1b_ctrl);
78 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
79                                       u32 val_b);
80
81 static acpi_osd_handler acpi_irq_handler;
82 static void *acpi_irq_context;
83 static struct workqueue_struct *kacpid_wq;
84 static struct workqueue_struct *kacpi_notify_wq;
85 static struct workqueue_struct *kacpi_hotplug_wq;
86
87 /*
88  * This list of permanent mappings is for memory that may be accessed from
89  * interrupt context, where we can't do the ioremap().
90  */
91 struct acpi_ioremap {
92         struct list_head list;
93         void __iomem *virt;
94         acpi_physical_address phys;
95         acpi_size size;
96         unsigned long refcount;
97 };
98
99 static LIST_HEAD(acpi_ioremaps);
100 static DEFINE_MUTEX(acpi_ioremap_lock);
101
102 static void __init acpi_osi_setup_late(void);
103
104 /*
105  * The story of _OSI(Linux)
106  *
107  * From pre-history through Linux-2.6.22,
108  * Linux responded TRUE upon a BIOS OSI(Linux) query.
109  *
110  * Unfortunately, reference BIOS writers got wind of this
111  * and put OSI(Linux) in their example code, quickly exposing
112  * this string as ill-conceived and opening the door to
113  * an un-bounded number of BIOS incompatibilities.
114  *
115  * For example, OSI(Linux) was used on resume to re-POST a
116  * video card on one system, because Linux at that time
117  * could not do a speedy restore in its native driver.
118  * But then upon gaining quick native restore capability,
119  * Linux has no way to tell the BIOS to skip the time-consuming
120  * POST -- putting Linux at a permanent performance disadvantage.
121  * On another system, the BIOS writer used OSI(Linux)
122  * to infer native OS support for IPMI!  On other systems,
123  * OSI(Linux) simply got in the way of Linux claiming to
124  * be compatible with other operating systems, exposing
125  * BIOS issues such as skipped device initialization.
126  *
127  * So "Linux" turned out to be a really poor chose of
128  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
129  *
130  * BIOS writers should NOT query _OSI(Linux) on future systems.
131  * Linux will complain on the console when it sees it, and return FALSE.
132  * To get Linux to return TRUE for your system  will require
133  * a kernel source update to add a DMI entry,
134  * or boot with "acpi_osi=Linux"
135  */
136
137 static struct osi_linux {
138         unsigned int    enable:1;
139         unsigned int    dmi:1;
140         unsigned int    cmdline:1;
141         unsigned int    default_disabling:1;
142 } osi_linux = {0, 0, 0, 0};
143
144 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
145 {
146         if (!strcmp("Linux", interface)) {
147
148                 printk_once(KERN_NOTICE FW_BUG PREFIX
149                         "BIOS _OSI(Linux) query %s%s\n",
150                         osi_linux.enable ? "honored" : "ignored",
151                         osi_linux.cmdline ? " via cmdline" :
152                         osi_linux.dmi ? " via DMI" : "");
153         }
154
155         if (!strcmp("Darwin", interface)) {
156                 /*
157                  * Apple firmware will behave poorly if it receives positive
158                  * answers to "Darwin" and any other OS. Respond positively
159                  * to Darwin and then disable all other vendor strings.
160                  */
161                 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
162                 supported = ACPI_UINT32_MAX;
163         }
164
165         return supported;
166 }
167
168 static void __init acpi_request_region (struct acpi_generic_address *gas,
169         unsigned int length, char *desc)
170 {
171         u64 addr;
172
173         /* Handle possible alignment issues */
174         memcpy(&addr, &gas->address, sizeof(addr));
175         if (!addr || !length)
176                 return;
177
178         acpi_reserve_region(addr, length, gas->space_id, 0, desc);
179 }
180
181 static void __init acpi_reserve_resources(void)
182 {
183         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
184                 "ACPI PM1a_EVT_BLK");
185
186         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
187                 "ACPI PM1b_EVT_BLK");
188
189         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
190                 "ACPI PM1a_CNT_BLK");
191
192         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
193                 "ACPI PM1b_CNT_BLK");
194
195         if (acpi_gbl_FADT.pm_timer_length == 4)
196                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
197
198         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
199                 "ACPI PM2_CNT_BLK");
200
201         /* Length of GPE blocks must be a non-negative multiple of 2 */
202
203         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
204                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
205                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
206
207         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
208                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
209                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
210 }
211
212 void acpi_os_printf(const char *fmt, ...)
213 {
214         va_list args;
215         va_start(args, fmt);
216         acpi_os_vprintf(fmt, args);
217         va_end(args);
218 }
219
220 void acpi_os_vprintf(const char *fmt, va_list args)
221 {
222         static char buffer[512];
223
224         vsprintf(buffer, fmt, args);
225
226 #ifdef ENABLE_DEBUGGER
227         if (acpi_in_debugger) {
228                 kdb_printf("%s", buffer);
229         } else {
230                 printk(KERN_CONT "%s", buffer);
231         }
232 #else
233         printk(KERN_CONT "%s", buffer);
234 #endif
235 }
236
237 #ifdef CONFIG_KEXEC
238 static unsigned long acpi_rsdp;
239 static int __init setup_acpi_rsdp(char *arg)
240 {
241         if (kstrtoul(arg, 16, &acpi_rsdp))
242                 return -EINVAL;
243         return 0;
244 }
245 early_param("acpi_rsdp", setup_acpi_rsdp);
246 #endif
247
248 acpi_physical_address __init acpi_os_get_root_pointer(void)
249 {
250 #ifdef CONFIG_KEXEC
251         if (acpi_rsdp)
252                 return acpi_rsdp;
253 #endif
254
255         if (efi_enabled(EFI_CONFIG_TABLES)) {
256                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
257                         return efi.acpi20;
258                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
259                         return efi.acpi;
260                 else {
261                         printk(KERN_ERR PREFIX
262                                "System description tables not found\n");
263                         return 0;
264                 }
265         } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
266                 acpi_physical_address pa = 0;
267
268                 acpi_find_root_pointer(&pa);
269                 return pa;
270         }
271
272         return 0;
273 }
274
275 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
276 static struct acpi_ioremap *
277 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
278 {
279         struct acpi_ioremap *map;
280
281         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
282                 if (map->phys <= phys &&
283                     phys + size <= map->phys + map->size)
284                         return map;
285
286         return NULL;
287 }
288
289 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
290 static void __iomem *
291 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
292 {
293         struct acpi_ioremap *map;
294
295         map = acpi_map_lookup(phys, size);
296         if (map)
297                 return map->virt + (phys - map->phys);
298
299         return NULL;
300 }
301
302 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
303 {
304         struct acpi_ioremap *map;
305         void __iomem *virt = NULL;
306
307         mutex_lock(&acpi_ioremap_lock);
308         map = acpi_map_lookup(phys, size);
309         if (map) {
310                 virt = map->virt + (phys - map->phys);
311                 map->refcount++;
312         }
313         mutex_unlock(&acpi_ioremap_lock);
314         return virt;
315 }
316 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
317
318 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
319 static struct acpi_ioremap *
320 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
321 {
322         struct acpi_ioremap *map;
323
324         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
325                 if (map->virt <= virt &&
326                     virt + size <= map->virt + map->size)
327                         return map;
328
329         return NULL;
330 }
331
332 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
333 /* ioremap will take care of cache attributes */
334 #define should_use_kmap(pfn)   0
335 #else
336 #define should_use_kmap(pfn)   page_is_ram(pfn)
337 #endif
338
339 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
340 {
341         unsigned long pfn;
342
343         pfn = pg_off >> PAGE_SHIFT;
344         if (should_use_kmap(pfn)) {
345                 if (pg_sz > PAGE_SIZE)
346                         return NULL;
347                 return (void __iomem __force *)kmap(pfn_to_page(pfn));
348         } else
349                 return acpi_os_ioremap(pg_off, pg_sz);
350 }
351
352 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
353 {
354         unsigned long pfn;
355
356         pfn = pg_off >> PAGE_SHIFT;
357         if (should_use_kmap(pfn))
358                 kunmap(pfn_to_page(pfn));
359         else
360                 iounmap(vaddr);
361 }
362
363 void __iomem *__init_refok
364 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
365 {
366         struct acpi_ioremap *map;
367         void __iomem *virt;
368         acpi_physical_address pg_off;
369         acpi_size pg_sz;
370
371         if (phys > ULONG_MAX) {
372                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
373                 return NULL;
374         }
375
376         if (!acpi_gbl_permanent_mmap)
377                 return __acpi_map_table((unsigned long)phys, size);
378
379         mutex_lock(&acpi_ioremap_lock);
380         /* Check if there's a suitable mapping already. */
381         map = acpi_map_lookup(phys, size);
382         if (map) {
383                 map->refcount++;
384                 goto out;
385         }
386
387         map = kzalloc(sizeof(*map), GFP_KERNEL);
388         if (!map) {
389                 mutex_unlock(&acpi_ioremap_lock);
390                 return NULL;
391         }
392
393         pg_off = round_down(phys, PAGE_SIZE);
394         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
395         virt = acpi_map(pg_off, pg_sz);
396         if (!virt) {
397                 mutex_unlock(&acpi_ioremap_lock);
398                 kfree(map);
399                 return NULL;
400         }
401
402         INIT_LIST_HEAD(&map->list);
403         map->virt = virt;
404         map->phys = pg_off;
405         map->size = pg_sz;
406         map->refcount = 1;
407
408         list_add_tail_rcu(&map->list, &acpi_ioremaps);
409
410 out:
411         mutex_unlock(&acpi_ioremap_lock);
412         return map->virt + (phys - map->phys);
413 }
414 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
415
416 void *__init_refok
417 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
418 {
419         return (void *)acpi_os_map_iomem(phys, size);
420 }
421 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
422
423 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
424 {
425         if (!--map->refcount)
426                 list_del_rcu(&map->list);
427 }
428
429 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
430 {
431         if (!map->refcount) {
432                 synchronize_rcu_expedited();
433                 acpi_unmap(map->phys, map->virt);
434                 kfree(map);
435         }
436 }
437
438 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
439 {
440         struct acpi_ioremap *map;
441
442         if (!acpi_gbl_permanent_mmap) {
443                 __acpi_unmap_table(virt, size);
444                 return;
445         }
446
447         mutex_lock(&acpi_ioremap_lock);
448         map = acpi_map_lookup_virt(virt, size);
449         if (!map) {
450                 mutex_unlock(&acpi_ioremap_lock);
451                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
452                 return;
453         }
454         acpi_os_drop_map_ref(map);
455         mutex_unlock(&acpi_ioremap_lock);
456
457         acpi_os_map_cleanup(map);
458 }
459 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
460
461 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
462 {
463         return acpi_os_unmap_iomem((void __iomem *)virt, size);
464 }
465 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
466
467 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
468 {
469         if (!acpi_gbl_permanent_mmap)
470                 __acpi_unmap_table(virt, size);
471 }
472
473 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
474 {
475         u64 addr;
476         void __iomem *virt;
477
478         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
479                 return 0;
480
481         /* Handle possible alignment issues */
482         memcpy(&addr, &gas->address, sizeof(addr));
483         if (!addr || !gas->bit_width)
484                 return -EINVAL;
485
486         virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
487         if (!virt)
488                 return -EIO;
489
490         return 0;
491 }
492 EXPORT_SYMBOL(acpi_os_map_generic_address);
493
494 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
495 {
496         u64 addr;
497         struct acpi_ioremap *map;
498
499         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
500                 return;
501
502         /* Handle possible alignment issues */
503         memcpy(&addr, &gas->address, sizeof(addr));
504         if (!addr || !gas->bit_width)
505                 return;
506
507         mutex_lock(&acpi_ioremap_lock);
508         map = acpi_map_lookup(addr, gas->bit_width / 8);
509         if (!map) {
510                 mutex_unlock(&acpi_ioremap_lock);
511                 return;
512         }
513         acpi_os_drop_map_ref(map);
514         mutex_unlock(&acpi_ioremap_lock);
515
516         acpi_os_map_cleanup(map);
517 }
518 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
519
520 #ifdef ACPI_FUTURE_USAGE
521 acpi_status
522 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
523 {
524         if (!phys || !virt)
525                 return AE_BAD_PARAMETER;
526
527         *phys = virt_to_phys(virt);
528
529         return AE_OK;
530 }
531 #endif
532
533 #define ACPI_MAX_OVERRIDE_LEN 100
534
535 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
536
537 acpi_status
538 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
539                             acpi_string * new_val)
540 {
541         if (!init_val || !new_val)
542                 return AE_BAD_PARAMETER;
543
544         *new_val = NULL;
545         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
546                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
547                        acpi_os_name);
548                 *new_val = acpi_os_name;
549         }
550
551         return AE_OK;
552 }
553
554 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
555 #include <linux/earlycpio.h>
556 #include <linux/memblock.h>
557
558 static u64 acpi_tables_addr;
559 static int all_tables_size;
560
561 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */
562 static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
563 {
564         u8 sum = 0;
565         u8 *end = buffer + length;
566
567         while (buffer < end)
568                 sum = (u8) (sum + *(buffer++));
569         return sum;
570 }
571
572 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
573 static const char * const table_sigs[] = {
574         ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
575         ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
576         ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
577         ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
578         ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
579         ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
580         ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
581         ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
582         ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
583
584 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
585
586 #define ACPI_OVERRIDE_TABLES 64
587 static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
588
589 #define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
590
591 void __init acpi_initrd_override(void *data, size_t size)
592 {
593         int sig, no, table_nr = 0, total_offset = 0;
594         long offset = 0;
595         struct acpi_table_header *table;
596         char cpio_path[32] = "kernel/firmware/acpi/";
597         struct cpio_data file;
598
599         if (data == NULL || size == 0)
600                 return;
601
602         for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
603                 file = find_cpio_data(cpio_path, data, size, &offset);
604                 if (!file.data)
605                         break;
606
607                 data += offset;
608                 size -= offset;
609
610                 if (file.size < sizeof(struct acpi_table_header)) {
611                         pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
612                                 cpio_path, file.name);
613                         continue;
614                 }
615
616                 table = file.data;
617
618                 for (sig = 0; table_sigs[sig]; sig++)
619                         if (!memcmp(table->signature, table_sigs[sig], 4))
620                                 break;
621
622                 if (!table_sigs[sig]) {
623                         pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
624                                 cpio_path, file.name);
625                         continue;
626                 }
627                 if (file.size != table->length) {
628                         pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
629                                 cpio_path, file.name);
630                         continue;
631                 }
632                 if (acpi_table_checksum(file.data, table->length)) {
633                         pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
634                                 cpio_path, file.name);
635                         continue;
636                 }
637
638                 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
639                         table->signature, cpio_path, file.name, table->length);
640
641                 all_tables_size += table->length;
642                 acpi_initrd_files[table_nr].data = file.data;
643                 acpi_initrd_files[table_nr].size = file.size;
644                 table_nr++;
645         }
646         if (table_nr == 0)
647                 return;
648
649         acpi_tables_addr =
650                 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
651                                        all_tables_size, PAGE_SIZE);
652         if (!acpi_tables_addr) {
653                 WARN_ON(1);
654                 return;
655         }
656         /*
657          * Only calling e820_add_reserve does not work and the
658          * tables are invalid (memory got used) later.
659          * memblock_reserve works as expected and the tables won't get modified.
660          * But it's not enough on X86 because ioremap will
661          * complain later (used by acpi_os_map_memory) that the pages
662          * that should get mapped are not marked "reserved".
663          * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
664          * works fine.
665          */
666         memblock_reserve(acpi_tables_addr, all_tables_size);
667         arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
668
669         /*
670          * early_ioremap only can remap 256k one time. If we map all
671          * tables one time, we will hit the limit. Need to map chunks
672          * one by one during copying the same as that in relocate_initrd().
673          */
674         for (no = 0; no < table_nr; no++) {
675                 unsigned char *src_p = acpi_initrd_files[no].data;
676                 phys_addr_t size = acpi_initrd_files[no].size;
677                 phys_addr_t dest_addr = acpi_tables_addr + total_offset;
678                 phys_addr_t slop, clen;
679                 char *dest_p;
680
681                 total_offset += size;
682
683                 while (size) {
684                         slop = dest_addr & ~PAGE_MASK;
685                         clen = size;
686                         if (clen > MAP_CHUNK_SIZE - slop)
687                                 clen = MAP_CHUNK_SIZE - slop;
688                         dest_p = early_ioremap(dest_addr & PAGE_MASK,
689                                                  clen + slop);
690                         memcpy(dest_p + slop, src_p, clen);
691                         early_iounmap(dest_p, clen + slop);
692                         src_p += clen;
693                         dest_addr += clen;
694                         size -= clen;
695                 }
696         }
697 }
698 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
699
700 static void acpi_table_taint(struct acpi_table_header *table)
701 {
702         pr_warn(PREFIX
703                 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
704                 table->signature, table->oem_table_id);
705         add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
706 }
707
708
709 acpi_status
710 acpi_os_table_override(struct acpi_table_header * existing_table,
711                        struct acpi_table_header ** new_table)
712 {
713         if (!existing_table || !new_table)
714                 return AE_BAD_PARAMETER;
715
716         *new_table = NULL;
717
718 #ifdef CONFIG_ACPI_CUSTOM_DSDT
719         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
720                 *new_table = (struct acpi_table_header *)AmlCode;
721 #endif
722         if (*new_table != NULL)
723                 acpi_table_taint(existing_table);
724         return AE_OK;
725 }
726
727 acpi_status
728 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
729                                 acpi_physical_address *address,
730                                 u32 *table_length)
731 {
732 #ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
733         *table_length = 0;
734         *address = 0;
735         return AE_OK;
736 #else
737         int table_offset = 0;
738         struct acpi_table_header *table;
739
740         *table_length = 0;
741         *address = 0;
742
743         if (!acpi_tables_addr)
744                 return AE_OK;
745
746         do {
747                 if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
748                         WARN_ON(1);
749                         return AE_OK;
750                 }
751
752                 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
753                                            ACPI_HEADER_SIZE);
754
755                 if (table_offset + table->length > all_tables_size) {
756                         acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
757                         WARN_ON(1);
758                         return AE_OK;
759                 }
760
761                 table_offset += table->length;
762
763                 if (memcmp(existing_table->signature, table->signature, 4)) {
764                         acpi_os_unmap_memory(table,
765                                      ACPI_HEADER_SIZE);
766                         continue;
767                 }
768
769                 /* Only override tables with matching oem id */
770                 if (memcmp(table->oem_table_id, existing_table->oem_table_id,
771                            ACPI_OEM_TABLE_ID_SIZE)) {
772                         acpi_os_unmap_memory(table,
773                                      ACPI_HEADER_SIZE);
774                         continue;
775                 }
776
777                 table_offset -= table->length;
778                 *table_length = table->length;
779                 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
780                 *address = acpi_tables_addr + table_offset;
781                 break;
782         } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
783
784         if (*address != 0)
785                 acpi_table_taint(existing_table);
786         return AE_OK;
787 #endif
788 }
789
790 static irqreturn_t acpi_irq(int irq, void *dev_id)
791 {
792         u32 handled;
793
794         handled = (*acpi_irq_handler) (acpi_irq_context);
795
796         if (handled) {
797                 acpi_irq_handled++;
798                 return IRQ_HANDLED;
799         } else {
800                 acpi_irq_not_handled++;
801                 return IRQ_NONE;
802         }
803 }
804
805 acpi_status
806 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
807                                   void *context)
808 {
809         unsigned int irq;
810
811         acpi_irq_stats_init();
812
813         /*
814          * ACPI interrupts different from the SCI in our copy of the FADT are
815          * not supported.
816          */
817         if (gsi != acpi_gbl_FADT.sci_interrupt)
818                 return AE_BAD_PARAMETER;
819
820         if (acpi_irq_handler)
821                 return AE_ALREADY_ACQUIRED;
822
823         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
824                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
825                        gsi);
826                 return AE_OK;
827         }
828
829         acpi_irq_handler = handler;
830         acpi_irq_context = context;
831         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
832                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
833                 acpi_irq_handler = NULL;
834                 return AE_NOT_ACQUIRED;
835         }
836
837         return AE_OK;
838 }
839
840 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
841 {
842         if (irq != acpi_gbl_FADT.sci_interrupt)
843                 return AE_BAD_PARAMETER;
844
845         free_irq(irq, acpi_irq);
846         acpi_irq_handler = NULL;
847
848         return AE_OK;
849 }
850
851 /*
852  * Running in interpreter thread context, safe to sleep
853  */
854
855 void acpi_os_sleep(u64 ms)
856 {
857         msleep(ms);
858 }
859
860 void acpi_os_stall(u32 us)
861 {
862         while (us) {
863                 u32 delay = 1000;
864
865                 if (delay > us)
866                         delay = us;
867                 udelay(delay);
868                 touch_nmi_watchdog();
869                 us -= delay;
870         }
871 }
872
873 /*
874  * Support ACPI 3.0 AML Timer operand
875  * Returns 64-bit free-running, monotonically increasing timer
876  * with 100ns granularity
877  */
878 u64 acpi_os_get_timer(void)
879 {
880         u64 time_ns = ktime_to_ns(ktime_get());
881         do_div(time_ns, 100);
882         return time_ns;
883 }
884
885 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
886 {
887         u32 dummy;
888
889         if (!value)
890                 value = &dummy;
891
892         *value = 0;
893         if (width <= 8) {
894                 *(u8 *) value = inb(port);
895         } else if (width <= 16) {
896                 *(u16 *) value = inw(port);
897         } else if (width <= 32) {
898                 *(u32 *) value = inl(port);
899         } else {
900                 BUG();
901         }
902
903         return AE_OK;
904 }
905
906 EXPORT_SYMBOL(acpi_os_read_port);
907
908 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
909 {
910         if (width <= 8) {
911                 outb(value, port);
912         } else if (width <= 16) {
913                 outw(value, port);
914         } else if (width <= 32) {
915                 outl(value, port);
916         } else {
917                 BUG();
918         }
919
920         return AE_OK;
921 }
922
923 EXPORT_SYMBOL(acpi_os_write_port);
924
925 #ifdef readq
926 static inline u64 read64(const volatile void __iomem *addr)
927 {
928         return readq(addr);
929 }
930 #else
931 static inline u64 read64(const volatile void __iomem *addr)
932 {
933         u64 l, h;
934         l = readl(addr);
935         h = readl(addr+4);
936         return l | (h << 32);
937 }
938 #endif
939
940 acpi_status
941 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
942 {
943         void __iomem *virt_addr;
944         unsigned int size = width / 8;
945         bool unmap = false;
946         u64 dummy;
947
948         rcu_read_lock();
949         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
950         if (!virt_addr) {
951                 rcu_read_unlock();
952                 virt_addr = acpi_os_ioremap(phys_addr, size);
953                 if (!virt_addr)
954                         return AE_BAD_ADDRESS;
955                 unmap = true;
956         }
957
958         if (!value)
959                 value = &dummy;
960
961         switch (width) {
962         case 8:
963                 *(u8 *) value = readb(virt_addr);
964                 break;
965         case 16:
966                 *(u16 *) value = readw(virt_addr);
967                 break;
968         case 32:
969                 *(u32 *) value = readl(virt_addr);
970                 break;
971         case 64:
972                 *(u64 *) value = read64(virt_addr);
973                 break;
974         default:
975                 BUG();
976         }
977
978         if (unmap)
979                 iounmap(virt_addr);
980         else
981                 rcu_read_unlock();
982
983         return AE_OK;
984 }
985
986 #ifdef writeq
987 static inline void write64(u64 val, volatile void __iomem *addr)
988 {
989         writeq(val, addr);
990 }
991 #else
992 static inline void write64(u64 val, volatile void __iomem *addr)
993 {
994         writel(val, addr);
995         writel(val>>32, addr+4);
996 }
997 #endif
998
999 acpi_status
1000 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
1001 {
1002         void __iomem *virt_addr;
1003         unsigned int size = width / 8;
1004         bool unmap = false;
1005
1006         rcu_read_lock();
1007         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1008         if (!virt_addr) {
1009                 rcu_read_unlock();
1010                 virt_addr = acpi_os_ioremap(phys_addr, size);
1011                 if (!virt_addr)
1012                         return AE_BAD_ADDRESS;
1013                 unmap = true;
1014         }
1015
1016         switch (width) {
1017         case 8:
1018                 writeb(value, virt_addr);
1019                 break;
1020         case 16:
1021                 writew(value, virt_addr);
1022                 break;
1023         case 32:
1024                 writel(value, virt_addr);
1025                 break;
1026         case 64:
1027                 write64(value, virt_addr);
1028                 break;
1029         default:
1030                 BUG();
1031         }
1032
1033         if (unmap)
1034                 iounmap(virt_addr);
1035         else
1036                 rcu_read_unlock();
1037
1038         return AE_OK;
1039 }
1040
1041 acpi_status
1042 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1043                                u64 *value, u32 width)
1044 {
1045         int result, size;
1046         u32 value32;
1047
1048         if (!value)
1049                 return AE_BAD_PARAMETER;
1050
1051         switch (width) {
1052         case 8:
1053                 size = 1;
1054                 break;
1055         case 16:
1056                 size = 2;
1057                 break;
1058         case 32:
1059                 size = 4;
1060                 break;
1061         default:
1062                 return AE_ERROR;
1063         }
1064
1065         result = raw_pci_read(pci_id->segment, pci_id->bus,
1066                                 PCI_DEVFN(pci_id->device, pci_id->function),
1067                                 reg, size, &value32);
1068         *value = value32;
1069
1070         return (result ? AE_ERROR : AE_OK);
1071 }
1072
1073 acpi_status
1074 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1075                                 u64 value, u32 width)
1076 {
1077         int result, size;
1078
1079         switch (width) {
1080         case 8:
1081                 size = 1;
1082                 break;
1083         case 16:
1084                 size = 2;
1085                 break;
1086         case 32:
1087                 size = 4;
1088                 break;
1089         default:
1090                 return AE_ERROR;
1091         }
1092
1093         result = raw_pci_write(pci_id->segment, pci_id->bus,
1094                                 PCI_DEVFN(pci_id->device, pci_id->function),
1095                                 reg, size, value);
1096
1097         return (result ? AE_ERROR : AE_OK);
1098 }
1099
1100 static void acpi_os_execute_deferred(struct work_struct *work)
1101 {
1102         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
1103
1104         dpc->function(dpc->context);
1105         kfree(dpc);
1106 }
1107
1108 /*******************************************************************************
1109  *
1110  * FUNCTION:    acpi_os_execute
1111  *
1112  * PARAMETERS:  Type               - Type of the callback
1113  *              Function           - Function to be executed
1114  *              Context            - Function parameters
1115  *
1116  * RETURN:      Status
1117  *
1118  * DESCRIPTION: Depending on type, either queues function for deferred execution or
1119  *              immediately executes function on a separate thread.
1120  *
1121  ******************************************************************************/
1122
1123 acpi_status acpi_os_execute(acpi_execute_type type,
1124                             acpi_osd_exec_callback function, void *context)
1125 {
1126         acpi_status status = AE_OK;
1127         struct acpi_os_dpc *dpc;
1128         struct workqueue_struct *queue;
1129         int ret;
1130         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1131                           "Scheduling function [%p(%p)] for deferred execution.\n",
1132                           function, context));
1133
1134         /*
1135          * Allocate/initialize DPC structure.  Note that this memory will be
1136          * freed by the callee.  The kernel handles the work_struct list  in a
1137          * way that allows us to also free its memory inside the callee.
1138          * Because we may want to schedule several tasks with different
1139          * parameters we can't use the approach some kernel code uses of
1140          * having a static work_struct.
1141          */
1142
1143         dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1144         if (!dpc)
1145                 return AE_NO_MEMORY;
1146
1147         dpc->function = function;
1148         dpc->context = context;
1149
1150         /*
1151          * To prevent lockdep from complaining unnecessarily, make sure that
1152          * there is a different static lockdep key for each workqueue by using
1153          * INIT_WORK() for each of them separately.
1154          */
1155         if (type == OSL_NOTIFY_HANDLER) {
1156                 queue = kacpi_notify_wq;
1157                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1158         } else {
1159                 queue = kacpid_wq;
1160                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1161         }
1162
1163         /*
1164          * On some machines, a software-initiated SMI causes corruption unless
1165          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
1166          * typically it's done in GPE-related methods that are run via
1167          * workqueues, so we can avoid the known corruption cases by always
1168          * queueing on CPU 0.
1169          */
1170         ret = queue_work_on(0, queue, &dpc->work);
1171
1172         if (!ret) {
1173                 printk(KERN_ERR PREFIX
1174                           "Call to queue_work() failed.\n");
1175                 status = AE_ERROR;
1176                 kfree(dpc);
1177         }
1178         return status;
1179 }
1180 EXPORT_SYMBOL(acpi_os_execute);
1181
1182 void acpi_os_wait_events_complete(void)
1183 {
1184         /*
1185          * Make sure the GPE handler or the fixed event handler is not used
1186          * on another CPU after removal.
1187          */
1188         if (acpi_irq_handler)
1189                 synchronize_hardirq(acpi_gbl_FADT.sci_interrupt);
1190         flush_workqueue(kacpid_wq);
1191         flush_workqueue(kacpi_notify_wq);
1192 }
1193
1194 struct acpi_hp_work {
1195         struct work_struct work;
1196         struct acpi_device *adev;
1197         u32 src;
1198 };
1199
1200 static void acpi_hotplug_work_fn(struct work_struct *work)
1201 {
1202         struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1203
1204         acpi_os_wait_events_complete();
1205         acpi_device_hotplug(hpw->adev, hpw->src);
1206         kfree(hpw);
1207 }
1208
1209 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1210 {
1211         struct acpi_hp_work *hpw;
1212
1213         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1214                   "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1215                   adev, src));
1216
1217         hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1218         if (!hpw)
1219                 return AE_NO_MEMORY;
1220
1221         INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1222         hpw->adev = adev;
1223         hpw->src = src;
1224         /*
1225          * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1226          * the hotplug code may call driver .remove() functions, which may
1227          * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1228          * these workqueues.
1229          */
1230         if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1231                 kfree(hpw);
1232                 return AE_ERROR;
1233         }
1234         return AE_OK;
1235 }
1236
1237 bool acpi_queue_hotplug_work(struct work_struct *work)
1238 {
1239         return queue_work(kacpi_hotplug_wq, work);
1240 }
1241
1242 acpi_status
1243 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1244 {
1245         struct semaphore *sem = NULL;
1246
1247         sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1248         if (!sem)
1249                 return AE_NO_MEMORY;
1250
1251         sema_init(sem, initial_units);
1252
1253         *handle = (acpi_handle *) sem;
1254
1255         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1256                           *handle, initial_units));
1257
1258         return AE_OK;
1259 }
1260
1261 /*
1262  * TODO: A better way to delete semaphores?  Linux doesn't have a
1263  * 'delete_semaphore()' function -- may result in an invalid
1264  * pointer dereference for non-synchronized consumers.  Should
1265  * we at least check for blocked threads and signal/cancel them?
1266  */
1267
1268 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1269 {
1270         struct semaphore *sem = (struct semaphore *)handle;
1271
1272         if (!sem)
1273                 return AE_BAD_PARAMETER;
1274
1275         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1276
1277         BUG_ON(!list_empty(&sem->wait_list));
1278         kfree(sem);
1279         sem = NULL;
1280
1281         return AE_OK;
1282 }
1283
1284 /*
1285  * TODO: Support for units > 1?
1286  */
1287 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1288 {
1289         acpi_status status = AE_OK;
1290         struct semaphore *sem = (struct semaphore *)handle;
1291         long jiffies;
1292         int ret = 0;
1293
1294         if (!sem || (units < 1))
1295                 return AE_BAD_PARAMETER;
1296
1297         if (units > 1)
1298                 return AE_SUPPORT;
1299
1300         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1301                           handle, units, timeout));
1302
1303         if (timeout == ACPI_WAIT_FOREVER)
1304                 jiffies = MAX_SCHEDULE_TIMEOUT;
1305         else
1306                 jiffies = msecs_to_jiffies(timeout);
1307
1308         ret = down_timeout(sem, jiffies);
1309         if (ret)
1310                 status = AE_TIME;
1311
1312         if (ACPI_FAILURE(status)) {
1313                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1314                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1315                                   handle, units, timeout,
1316                                   acpi_format_exception(status)));
1317         } else {
1318                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1319                                   "Acquired semaphore[%p|%d|%d]", handle,
1320                                   units, timeout));
1321         }
1322
1323         return status;
1324 }
1325
1326 /*
1327  * TODO: Support for units > 1?
1328  */
1329 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1330 {
1331         struct semaphore *sem = (struct semaphore *)handle;
1332
1333         if (!sem || (units < 1))
1334                 return AE_BAD_PARAMETER;
1335
1336         if (units > 1)
1337                 return AE_SUPPORT;
1338
1339         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1340                           units));
1341
1342         up(sem);
1343
1344         return AE_OK;
1345 }
1346
1347 #ifdef ACPI_FUTURE_USAGE
1348 u32 acpi_os_get_line(char *buffer)
1349 {
1350
1351 #ifdef ENABLE_DEBUGGER
1352         if (acpi_in_debugger) {
1353                 u32 chars;
1354
1355                 kdb_read(buffer, sizeof(line_buf));
1356
1357                 /* remove the CR kdb includes */
1358                 chars = strlen(buffer) - 1;
1359                 buffer[chars] = '\0';
1360         }
1361 #endif
1362
1363         return 0;
1364 }
1365 #endif                          /*  ACPI_FUTURE_USAGE  */
1366
1367 acpi_status acpi_os_signal(u32 function, void *info)
1368 {
1369         switch (function) {
1370         case ACPI_SIGNAL_FATAL:
1371                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1372                 break;
1373         case ACPI_SIGNAL_BREAKPOINT:
1374                 /*
1375                  * AML Breakpoint
1376                  * ACPI spec. says to treat it as a NOP unless
1377                  * you are debugging.  So if/when we integrate
1378                  * AML debugger into the kernel debugger its
1379                  * hook will go here.  But until then it is
1380                  * not useful to print anything on breakpoints.
1381                  */
1382                 break;
1383         default:
1384                 break;
1385         }
1386
1387         return AE_OK;
1388 }
1389
1390 static int __init acpi_os_name_setup(char *str)
1391 {
1392         char *p = acpi_os_name;
1393         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1394
1395         if (!str || !*str)
1396                 return 0;
1397
1398         for (; count-- && *str; str++) {
1399                 if (isalnum(*str) || *str == ' ' || *str == ':')
1400                         *p++ = *str;
1401                 else if (*str == '\'' || *str == '"')
1402                         continue;
1403                 else
1404                         break;
1405         }
1406         *p = 0;
1407
1408         return 1;
1409
1410 }
1411
1412 __setup("acpi_os_name=", acpi_os_name_setup);
1413
1414 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
1415 #define OSI_STRING_ENTRIES_MAX 16       /* arbitrary */
1416
1417 struct osi_setup_entry {
1418         char string[OSI_STRING_LENGTH_MAX];
1419         bool enable;
1420 };
1421
1422 static struct osi_setup_entry
1423                 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1424         {"Module Device", true},
1425         {"Processor Device", true},
1426         {"3.0 _SCP Extensions", true},
1427         {"Processor Aggregator Device", true},
1428 };
1429
1430 void __init acpi_osi_setup(char *str)
1431 {
1432         struct osi_setup_entry *osi;
1433         bool enable = true;
1434         int i;
1435
1436         if (!acpi_gbl_create_osi_method)
1437                 return;
1438
1439         if (str == NULL || *str == '\0') {
1440                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1441                 acpi_gbl_create_osi_method = FALSE;
1442                 return;
1443         }
1444
1445         if (*str == '!') {
1446                 str++;
1447                 if (*str == '\0') {
1448                         osi_linux.default_disabling = 1;
1449                         return;
1450                 } else if (*str == '*') {
1451                         acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
1452                         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1453                                 osi = &osi_setup_entries[i];
1454                                 osi->enable = false;
1455                         }
1456                         return;
1457                 }
1458                 enable = false;
1459         }
1460
1461         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1462                 osi = &osi_setup_entries[i];
1463                 if (!strcmp(osi->string, str)) {
1464                         osi->enable = enable;
1465                         break;
1466                 } else if (osi->string[0] == '\0') {
1467                         osi->enable = enable;
1468                         strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1469                         break;
1470                 }
1471         }
1472 }
1473
1474 static void __init set_osi_linux(unsigned int enable)
1475 {
1476         if (osi_linux.enable != enable)
1477                 osi_linux.enable = enable;
1478
1479         if (osi_linux.enable)
1480                 acpi_osi_setup("Linux");
1481         else
1482                 acpi_osi_setup("!Linux");
1483
1484         return;
1485 }
1486
1487 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1488 {
1489         osi_linux.cmdline = 1;  /* cmdline set the default and override DMI */
1490         osi_linux.dmi = 0;
1491         set_osi_linux(enable);
1492
1493         return;
1494 }
1495
1496 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1497 {
1498         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1499
1500         if (enable == -1)
1501                 return;
1502
1503         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1504         set_osi_linux(enable);
1505
1506         return;
1507 }
1508
1509 /*
1510  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1511  *
1512  * empty string disables _OSI
1513  * string starting with '!' disables that string
1514  * otherwise string is added to list, augmenting built-in strings
1515  */
1516 static void __init acpi_osi_setup_late(void)
1517 {
1518         struct osi_setup_entry *osi;
1519         char *str;
1520         int i;
1521         acpi_status status;
1522
1523         if (osi_linux.default_disabling) {
1524                 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
1525
1526                 if (ACPI_SUCCESS(status))
1527                         printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
1528         }
1529
1530         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1531                 osi = &osi_setup_entries[i];
1532                 str = osi->string;
1533
1534                 if (*str == '\0')
1535                         break;
1536                 if (osi->enable) {
1537                         status = acpi_install_interface(str);
1538
1539                         if (ACPI_SUCCESS(status))
1540                                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1541                 } else {
1542                         status = acpi_remove_interface(str);
1543
1544                         if (ACPI_SUCCESS(status))
1545                                 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1546                 }
1547         }
1548 }
1549
1550 static int __init osi_setup(char *str)
1551 {
1552         if (str && !strcmp("Linux", str))
1553                 acpi_cmdline_osi_linux(1);
1554         else if (str && !strcmp("!Linux", str))
1555                 acpi_cmdline_osi_linux(0);
1556         else
1557                 acpi_osi_setup(str);
1558
1559         return 1;
1560 }
1561
1562 __setup("acpi_osi=", osi_setup);
1563
1564 /*
1565  * Disable the auto-serialization of named objects creation methods.
1566  *
1567  * This feature is enabled by default.  It marks the AML control methods
1568  * that contain the opcodes to create named objects as "Serialized".
1569  */
1570 static int __init acpi_no_auto_serialize_setup(char *str)
1571 {
1572         acpi_gbl_auto_serialize_methods = FALSE;
1573         pr_info("ACPI: auto-serialization disabled\n");
1574
1575         return 1;
1576 }
1577
1578 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1579
1580 /* Check of resource interference between native drivers and ACPI
1581  * OperationRegions (SystemIO and System Memory only).
1582  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1583  * in arbitrary AML code and can interfere with legacy drivers.
1584  * acpi_enforce_resources= can be set to:
1585  *
1586  *   - strict (default) (2)
1587  *     -> further driver trying to access the resources will not load
1588  *   - lax              (1)
1589  *     -> further driver trying to access the resources will load, but you
1590  *     get a system message that something might go wrong...
1591  *
1592  *   - no               (0)
1593  *     -> ACPI Operation Region resources will not be registered
1594  *
1595  */
1596 #define ENFORCE_RESOURCES_STRICT 2
1597 #define ENFORCE_RESOURCES_LAX    1
1598 #define ENFORCE_RESOURCES_NO     0
1599
1600 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1601
1602 static int __init acpi_enforce_resources_setup(char *str)
1603 {
1604         if (str == NULL || *str == '\0')
1605                 return 0;
1606
1607         if (!strcmp("strict", str))
1608                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1609         else if (!strcmp("lax", str))
1610                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1611         else if (!strcmp("no", str))
1612                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1613
1614         return 1;
1615 }
1616
1617 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1618
1619 /* Check for resource conflicts between ACPI OperationRegions and native
1620  * drivers */
1621 int acpi_check_resource_conflict(const struct resource *res)
1622 {
1623         acpi_adr_space_type space_id;
1624         acpi_size length;
1625         u8 warn = 0;
1626         int clash = 0;
1627
1628         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1629                 return 0;
1630         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1631                 return 0;
1632
1633         if (res->flags & IORESOURCE_IO)
1634                 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1635         else
1636                 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1637
1638         length = resource_size(res);
1639         if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1640                 warn = 1;
1641         clash = acpi_check_address_range(space_id, res->start, length, warn);
1642
1643         if (clash) {
1644                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1645                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1646                                 printk(KERN_NOTICE "ACPI: This conflict may"
1647                                        " cause random problems and system"
1648                                        " instability\n");
1649                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1650                                " for this device, you should use it instead of"
1651                                " the native driver\n");
1652                 }
1653                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1654                         return -EBUSY;
1655         }
1656         return 0;
1657 }
1658 EXPORT_SYMBOL(acpi_check_resource_conflict);
1659
1660 int acpi_check_region(resource_size_t start, resource_size_t n,
1661                       const char *name)
1662 {
1663         struct resource res = {
1664                 .start = start,
1665                 .end   = start + n - 1,
1666                 .name  = name,
1667                 .flags = IORESOURCE_IO,
1668         };
1669
1670         return acpi_check_resource_conflict(&res);
1671 }
1672 EXPORT_SYMBOL(acpi_check_region);
1673
1674 /*
1675  * Let drivers know whether the resource checks are effective
1676  */
1677 int acpi_resources_are_enforced(void)
1678 {
1679         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1680 }
1681 EXPORT_SYMBOL(acpi_resources_are_enforced);
1682
1683 /*
1684  * Deallocate the memory for a spinlock.
1685  */
1686 void acpi_os_delete_lock(acpi_spinlock handle)
1687 {
1688         ACPI_FREE(handle);
1689 }
1690
1691 /*
1692  * Acquire a spinlock.
1693  *
1694  * handle is a pointer to the spinlock_t.
1695  */
1696
1697 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1698 {
1699         acpi_cpu_flags flags;
1700         spin_lock_irqsave(lockp, flags);
1701         return flags;
1702 }
1703
1704 /*
1705  * Release a spinlock. See above.
1706  */
1707
1708 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1709 {
1710         spin_unlock_irqrestore(lockp, flags);
1711 }
1712
1713 #ifndef ACPI_USE_LOCAL_CACHE
1714
1715 /*******************************************************************************
1716  *
1717  * FUNCTION:    acpi_os_create_cache
1718  *
1719  * PARAMETERS:  name      - Ascii name for the cache
1720  *              size      - Size of each cached object
1721  *              depth     - Maximum depth of the cache (in objects) <ignored>
1722  *              cache     - Where the new cache object is returned
1723  *
1724  * RETURN:      status
1725  *
1726  * DESCRIPTION: Create a cache object
1727  *
1728  ******************************************************************************/
1729
1730 acpi_status
1731 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1732 {
1733         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1734         if (*cache == NULL)
1735                 return AE_ERROR;
1736         else
1737                 return AE_OK;
1738 }
1739
1740 /*******************************************************************************
1741  *
1742  * FUNCTION:    acpi_os_purge_cache
1743  *
1744  * PARAMETERS:  Cache           - Handle to cache object
1745  *
1746  * RETURN:      Status
1747  *
1748  * DESCRIPTION: Free all objects within the requested cache.
1749  *
1750  ******************************************************************************/
1751
1752 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1753 {
1754         kmem_cache_shrink(cache);
1755         return (AE_OK);
1756 }
1757
1758 /*******************************************************************************
1759  *
1760  * FUNCTION:    acpi_os_delete_cache
1761  *
1762  * PARAMETERS:  Cache           - Handle to cache object
1763  *
1764  * RETURN:      Status
1765  *
1766  * DESCRIPTION: Free all objects within the requested cache and delete the
1767  *              cache object.
1768  *
1769  ******************************************************************************/
1770
1771 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1772 {
1773         kmem_cache_destroy(cache);
1774         return (AE_OK);
1775 }
1776
1777 /*******************************************************************************
1778  *
1779  * FUNCTION:    acpi_os_release_object
1780  *
1781  * PARAMETERS:  Cache       - Handle to cache object
1782  *              Object      - The object to be released
1783  *
1784  * RETURN:      None
1785  *
1786  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1787  *              the object is deleted.
1788  *
1789  ******************************************************************************/
1790
1791 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1792 {
1793         kmem_cache_free(cache, object);
1794         return (AE_OK);
1795 }
1796 #endif
1797
1798 static int __init acpi_no_static_ssdt_setup(char *s)
1799 {
1800         acpi_gbl_disable_ssdt_table_install = TRUE;
1801         pr_info("ACPI: static SSDT installation disabled\n");
1802
1803         return 0;
1804 }
1805
1806 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1807
1808 static int __init acpi_disable_return_repair(char *s)
1809 {
1810         printk(KERN_NOTICE PREFIX
1811                "ACPI: Predefined validation mechanism disabled\n");
1812         acpi_gbl_disable_auto_repair = TRUE;
1813
1814         return 1;
1815 }
1816
1817 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1818
1819 acpi_status __init acpi_os_initialize(void)
1820 {
1821         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1822         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1823         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1824         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1825         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1826                 /*
1827                  * Use acpi_os_map_generic_address to pre-map the reset
1828                  * register if it's in system memory.
1829                  */
1830                 int rv;
1831
1832                 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1833                 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1834         }
1835
1836         return AE_OK;
1837 }
1838
1839 acpi_status __init acpi_os_initialize1(void)
1840 {
1841         acpi_reserve_resources();
1842         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1843         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1844         kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1845         BUG_ON(!kacpid_wq);
1846         BUG_ON(!kacpi_notify_wq);
1847         BUG_ON(!kacpi_hotplug_wq);
1848         acpi_install_interface_handler(acpi_osi_handler);
1849         acpi_osi_setup_late();
1850         return AE_OK;
1851 }
1852
1853 acpi_status acpi_os_terminate(void)
1854 {
1855         if (acpi_irq_handler) {
1856                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1857                                                  acpi_irq_handler);
1858         }
1859
1860         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1861         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1862         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1863         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1864         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1865                 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1866
1867         destroy_workqueue(kacpid_wq);
1868         destroy_workqueue(kacpi_notify_wq);
1869         destroy_workqueue(kacpi_hotplug_wq);
1870
1871         return AE_OK;
1872 }
1873
1874 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1875                                   u32 pm1b_control)
1876 {
1877         int rc = 0;
1878         if (__acpi_os_prepare_sleep)
1879                 rc = __acpi_os_prepare_sleep(sleep_state,
1880                                              pm1a_control, pm1b_control);
1881         if (rc < 0)
1882                 return AE_ERROR;
1883         else if (rc > 0)
1884                 return AE_CTRL_SKIP;
1885
1886         return AE_OK;
1887 }
1888
1889 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1890                                u32 pm1a_ctrl, u32 pm1b_ctrl))
1891 {
1892         __acpi_os_prepare_sleep = func;
1893 }
1894
1895 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1896                                   u32 val_b)
1897 {
1898         int rc = 0;
1899         if (__acpi_os_prepare_extended_sleep)
1900                 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1901                                              val_a, val_b);
1902         if (rc < 0)
1903                 return AE_ERROR;
1904         else if (rc > 0)
1905                 return AE_CTRL_SKIP;
1906
1907         return AE_OK;
1908 }
1909
1910 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1911                                u32 val_a, u32 val_b))
1912 {
1913         __acpi_os_prepare_extended_sleep = func;
1914 }