Linux-libre 4.15.7-gnu
[librecmc/linux-libre.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  *
24  */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
43
44 #include <asm/io.h>
45 #include <linux/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
47
48 #include "internal.h"
49
50 #define _COMPONENT              ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52
53 struct acpi_os_dpc {
54         acpi_osd_exec_callback function;
55         void *context;
56         struct work_struct work;
57 };
58
59 #ifdef ENABLE_DEBUGGER
60 #include <linux/kdb.h>
61
62 /* stuff for debugger support */
63 int acpi_in_debugger;
64 EXPORT_SYMBOL(acpi_in_debugger);
65 #endif                          /*ENABLE_DEBUGGER */
66
67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
68                                       u32 pm1b_ctrl);
69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
70                                       u32 val_b);
71
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76 static struct workqueue_struct *kacpi_hotplug_wq;
77 static bool acpi_os_initialized;
78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
79 bool acpi_permanent_mmap = false;
80
81 /*
82  * This list of permanent mappings is for memory that may be accessed from
83  * interrupt context, where we can't do the ioremap().
84  */
85 struct acpi_ioremap {
86         struct list_head list;
87         void __iomem *virt;
88         acpi_physical_address phys;
89         acpi_size size;
90         unsigned long refcount;
91 };
92
93 static LIST_HEAD(acpi_ioremaps);
94 static DEFINE_MUTEX(acpi_ioremap_lock);
95
96 static void __init acpi_request_region (struct acpi_generic_address *gas,
97         unsigned int length, char *desc)
98 {
99         u64 addr;
100
101         /* Handle possible alignment issues */
102         memcpy(&addr, &gas->address, sizeof(addr));
103         if (!addr || !length)
104                 return;
105
106         /* Resources are never freed */
107         if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
108                 request_region(addr, length, desc);
109         else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
110                 request_mem_region(addr, length, desc);
111 }
112
113 static int __init acpi_reserve_resources(void)
114 {
115         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
116                 "ACPI PM1a_EVT_BLK");
117
118         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
119                 "ACPI PM1b_EVT_BLK");
120
121         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
122                 "ACPI PM1a_CNT_BLK");
123
124         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
125                 "ACPI PM1b_CNT_BLK");
126
127         if (acpi_gbl_FADT.pm_timer_length == 4)
128                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
129
130         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
131                 "ACPI PM2_CNT_BLK");
132
133         /* Length of GPE blocks must be a non-negative multiple of 2 */
134
135         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
136                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
137                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
138
139         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
140                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
141                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
142
143         return 0;
144 }
145 fs_initcall_sync(acpi_reserve_resources);
146
147 void acpi_os_printf(const char *fmt, ...)
148 {
149         va_list args;
150         va_start(args, fmt);
151         acpi_os_vprintf(fmt, args);
152         va_end(args);
153 }
154 EXPORT_SYMBOL(acpi_os_printf);
155
156 void acpi_os_vprintf(const char *fmt, va_list args)
157 {
158         static char buffer[512];
159
160         vsprintf(buffer, fmt, args);
161
162 #ifdef ENABLE_DEBUGGER
163         if (acpi_in_debugger) {
164                 kdb_printf("%s", buffer);
165         } else {
166                 if (printk_get_level(buffer))
167                         printk("%s", buffer);
168                 else
169                         printk(KERN_CONT "%s", buffer);
170         }
171 #else
172         if (acpi_debugger_write_log(buffer) < 0) {
173                 if (printk_get_level(buffer))
174                         printk("%s", buffer);
175                 else
176                         printk(KERN_CONT "%s", buffer);
177         }
178 #endif
179 }
180
181 #ifdef CONFIG_KEXEC
182 static unsigned long acpi_rsdp;
183 static int __init setup_acpi_rsdp(char *arg)
184 {
185         return kstrtoul(arg, 16, &acpi_rsdp);
186 }
187 early_param("acpi_rsdp", setup_acpi_rsdp);
188 #endif
189
190 acpi_physical_address __init acpi_os_get_root_pointer(void)
191 {
192         acpi_physical_address pa = 0;
193
194 #ifdef CONFIG_KEXEC
195         if (acpi_rsdp)
196                 return acpi_rsdp;
197 #endif
198
199         if (efi_enabled(EFI_CONFIG_TABLES)) {
200                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
201                         return efi.acpi20;
202                 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
203                         return efi.acpi;
204                 pr_err(PREFIX "System description tables not found\n");
205         } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
206                 acpi_find_root_pointer(&pa);
207         }
208
209         return pa;
210 }
211
212 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
213 static struct acpi_ioremap *
214 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
215 {
216         struct acpi_ioremap *map;
217
218         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
219                 if (map->phys <= phys &&
220                     phys + size <= map->phys + map->size)
221                         return map;
222
223         return NULL;
224 }
225
226 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
227 static void __iomem *
228 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
229 {
230         struct acpi_ioremap *map;
231
232         map = acpi_map_lookup(phys, size);
233         if (map)
234                 return map->virt + (phys - map->phys);
235
236         return NULL;
237 }
238
239 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
240 {
241         struct acpi_ioremap *map;
242         void __iomem *virt = NULL;
243
244         mutex_lock(&acpi_ioremap_lock);
245         map = acpi_map_lookup(phys, size);
246         if (map) {
247                 virt = map->virt + (phys - map->phys);
248                 map->refcount++;
249         }
250         mutex_unlock(&acpi_ioremap_lock);
251         return virt;
252 }
253 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
254
255 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
256 static struct acpi_ioremap *
257 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
258 {
259         struct acpi_ioremap *map;
260
261         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
262                 if (map->virt <= virt &&
263                     virt + size <= map->virt + map->size)
264                         return map;
265
266         return NULL;
267 }
268
269 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
270 /* ioremap will take care of cache attributes */
271 #define should_use_kmap(pfn)   0
272 #else
273 #define should_use_kmap(pfn)   page_is_ram(pfn)
274 #endif
275
276 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
277 {
278         unsigned long pfn;
279
280         pfn = pg_off >> PAGE_SHIFT;
281         if (should_use_kmap(pfn)) {
282                 if (pg_sz > PAGE_SIZE)
283                         return NULL;
284                 return (void __iomem __force *)kmap(pfn_to_page(pfn));
285         } else
286                 return acpi_os_ioremap(pg_off, pg_sz);
287 }
288
289 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
290 {
291         unsigned long pfn;
292
293         pfn = pg_off >> PAGE_SHIFT;
294         if (should_use_kmap(pfn))
295                 kunmap(pfn_to_page(pfn));
296         else
297                 iounmap(vaddr);
298 }
299
300 /**
301  * acpi_os_map_iomem - Get a virtual address for a given physical address range.
302  * @phys: Start of the physical address range to map.
303  * @size: Size of the physical address range to map.
304  *
305  * Look up the given physical address range in the list of existing ACPI memory
306  * mappings.  If found, get a reference to it and return a pointer to it (its
307  * virtual address).  If not found, map it, add it to that list and return a
308  * pointer to it.
309  *
310  * During early init (when acpi_permanent_mmap has not been set yet) this
311  * routine simply calls __acpi_map_table() to get the job done.
312  */
313 void __iomem *__ref
314 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
315 {
316         struct acpi_ioremap *map;
317         void __iomem *virt;
318         acpi_physical_address pg_off;
319         acpi_size pg_sz;
320
321         if (phys > ULONG_MAX) {
322                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
323                 return NULL;
324         }
325
326         if (!acpi_permanent_mmap)
327                 return __acpi_map_table((unsigned long)phys, size);
328
329         mutex_lock(&acpi_ioremap_lock);
330         /* Check if there's a suitable mapping already. */
331         map = acpi_map_lookup(phys, size);
332         if (map) {
333                 map->refcount++;
334                 goto out;
335         }
336
337         map = kzalloc(sizeof(*map), GFP_KERNEL);
338         if (!map) {
339                 mutex_unlock(&acpi_ioremap_lock);
340                 return NULL;
341         }
342
343         pg_off = round_down(phys, PAGE_SIZE);
344         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
345         virt = acpi_map(pg_off, pg_sz);
346         if (!virt) {
347                 mutex_unlock(&acpi_ioremap_lock);
348                 kfree(map);
349                 return NULL;
350         }
351
352         INIT_LIST_HEAD(&map->list);
353         map->virt = virt;
354         map->phys = pg_off;
355         map->size = pg_sz;
356         map->refcount = 1;
357
358         list_add_tail_rcu(&map->list, &acpi_ioremaps);
359
360 out:
361         mutex_unlock(&acpi_ioremap_lock);
362         return map->virt + (phys - map->phys);
363 }
364 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
365
366 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
367 {
368         return (void *)acpi_os_map_iomem(phys, size);
369 }
370 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
371
372 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
373 {
374         if (!--map->refcount)
375                 list_del_rcu(&map->list);
376 }
377
378 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
379 {
380         if (!map->refcount) {
381                 synchronize_rcu_expedited();
382                 acpi_unmap(map->phys, map->virt);
383                 kfree(map);
384         }
385 }
386
387 /**
388  * acpi_os_unmap_iomem - Drop a memory mapping reference.
389  * @virt: Start of the address range to drop a reference to.
390  * @size: Size of the address range to drop a reference to.
391  *
392  * Look up the given virtual address range in the list of existing ACPI memory
393  * mappings, drop a reference to it and unmap it if there are no more active
394  * references to it.
395  *
396  * During early init (when acpi_permanent_mmap has not been set yet) this
397  * routine simply calls __acpi_unmap_table() to get the job done.  Since
398  * __acpi_unmap_table() is an __init function, the __ref annotation is needed
399  * here.
400  */
401 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
402 {
403         struct acpi_ioremap *map;
404
405         if (!acpi_permanent_mmap) {
406                 __acpi_unmap_table(virt, size);
407                 return;
408         }
409
410         mutex_lock(&acpi_ioremap_lock);
411         map = acpi_map_lookup_virt(virt, size);
412         if (!map) {
413                 mutex_unlock(&acpi_ioremap_lock);
414                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
415                 return;
416         }
417         acpi_os_drop_map_ref(map);
418         mutex_unlock(&acpi_ioremap_lock);
419
420         acpi_os_map_cleanup(map);
421 }
422 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
423
424 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
425 {
426         return acpi_os_unmap_iomem((void __iomem *)virt, size);
427 }
428 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
429
430 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
431 {
432         u64 addr;
433         void __iomem *virt;
434
435         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
436                 return 0;
437
438         /* Handle possible alignment issues */
439         memcpy(&addr, &gas->address, sizeof(addr));
440         if (!addr || !gas->bit_width)
441                 return -EINVAL;
442
443         virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
444         if (!virt)
445                 return -EIO;
446
447         return 0;
448 }
449 EXPORT_SYMBOL(acpi_os_map_generic_address);
450
451 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
452 {
453         u64 addr;
454         struct acpi_ioremap *map;
455
456         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
457                 return;
458
459         /* Handle possible alignment issues */
460         memcpy(&addr, &gas->address, sizeof(addr));
461         if (!addr || !gas->bit_width)
462                 return;
463
464         mutex_lock(&acpi_ioremap_lock);
465         map = acpi_map_lookup(addr, gas->bit_width / 8);
466         if (!map) {
467                 mutex_unlock(&acpi_ioremap_lock);
468                 return;
469         }
470         acpi_os_drop_map_ref(map);
471         mutex_unlock(&acpi_ioremap_lock);
472
473         acpi_os_map_cleanup(map);
474 }
475 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
476
477 #ifdef ACPI_FUTURE_USAGE
478 acpi_status
479 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
480 {
481         if (!phys || !virt)
482                 return AE_BAD_PARAMETER;
483
484         *phys = virt_to_phys(virt);
485
486         return AE_OK;
487 }
488 #endif
489
490 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
491 static bool acpi_rev_override;
492
493 int __init acpi_rev_override_setup(char *str)
494 {
495         acpi_rev_override = true;
496         return 1;
497 }
498 __setup("acpi_rev_override", acpi_rev_override_setup);
499 #else
500 #define acpi_rev_override       false
501 #endif
502
503 #define ACPI_MAX_OVERRIDE_LEN 100
504
505 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
506
507 acpi_status
508 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
509                             acpi_string *new_val)
510 {
511         if (!init_val || !new_val)
512                 return AE_BAD_PARAMETER;
513
514         *new_val = NULL;
515         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
516                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
517                        acpi_os_name);
518                 *new_val = acpi_os_name;
519         }
520
521         if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
522                 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
523                 *new_val = (char *)5;
524         }
525
526         return AE_OK;
527 }
528
529 static irqreturn_t acpi_irq(int irq, void *dev_id)
530 {
531         u32 handled;
532
533         handled = (*acpi_irq_handler) (acpi_irq_context);
534
535         if (handled) {
536                 acpi_irq_handled++;
537                 return IRQ_HANDLED;
538         } else {
539                 acpi_irq_not_handled++;
540                 return IRQ_NONE;
541         }
542 }
543
544 acpi_status
545 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
546                                   void *context)
547 {
548         unsigned int irq;
549
550         acpi_irq_stats_init();
551
552         /*
553          * ACPI interrupts different from the SCI in our copy of the FADT are
554          * not supported.
555          */
556         if (gsi != acpi_gbl_FADT.sci_interrupt)
557                 return AE_BAD_PARAMETER;
558
559         if (acpi_irq_handler)
560                 return AE_ALREADY_ACQUIRED;
561
562         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
563                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
564                        gsi);
565                 return AE_OK;
566         }
567
568         acpi_irq_handler = handler;
569         acpi_irq_context = context;
570         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
571                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
572                 acpi_irq_handler = NULL;
573                 return AE_NOT_ACQUIRED;
574         }
575         acpi_sci_irq = irq;
576
577         return AE_OK;
578 }
579
580 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
581 {
582         if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
583                 return AE_BAD_PARAMETER;
584
585         free_irq(acpi_sci_irq, acpi_irq);
586         acpi_irq_handler = NULL;
587         acpi_sci_irq = INVALID_ACPI_IRQ;
588
589         return AE_OK;
590 }
591
592 /*
593  * Running in interpreter thread context, safe to sleep
594  */
595
596 void acpi_os_sleep(u64 ms)
597 {
598         msleep(ms);
599 }
600
601 void acpi_os_stall(u32 us)
602 {
603         while (us) {
604                 u32 delay = 1000;
605
606                 if (delay > us)
607                         delay = us;
608                 udelay(delay);
609                 touch_nmi_watchdog();
610                 us -= delay;
611         }
612 }
613
614 /*
615  * Support ACPI 3.0 AML Timer operand
616  * Returns 64-bit free-running, monotonically increasing timer
617  * with 100ns granularity
618  */
619 u64 acpi_os_get_timer(void)
620 {
621         u64 time_ns = ktime_to_ns(ktime_get());
622         do_div(time_ns, 100);
623         return time_ns;
624 }
625
626 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
627 {
628         u32 dummy;
629
630         if (!value)
631                 value = &dummy;
632
633         *value = 0;
634         if (width <= 8) {
635                 *(u8 *) value = inb(port);
636         } else if (width <= 16) {
637                 *(u16 *) value = inw(port);
638         } else if (width <= 32) {
639                 *(u32 *) value = inl(port);
640         } else {
641                 BUG();
642         }
643
644         return AE_OK;
645 }
646
647 EXPORT_SYMBOL(acpi_os_read_port);
648
649 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
650 {
651         if (width <= 8) {
652                 outb(value, port);
653         } else if (width <= 16) {
654                 outw(value, port);
655         } else if (width <= 32) {
656                 outl(value, port);
657         } else {
658                 BUG();
659         }
660
661         return AE_OK;
662 }
663
664 EXPORT_SYMBOL(acpi_os_write_port);
665
666 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
667 {
668
669         switch (width) {
670         case 8:
671                 *(u8 *) value = readb(virt_addr);
672                 break;
673         case 16:
674                 *(u16 *) value = readw(virt_addr);
675                 break;
676         case 32:
677                 *(u32 *) value = readl(virt_addr);
678                 break;
679         case 64:
680                 *(u64 *) value = readq(virt_addr);
681                 break;
682         default:
683                 return -EINVAL;
684         }
685
686         return 0;
687 }
688
689 acpi_status
690 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
691 {
692         void __iomem *virt_addr;
693         unsigned int size = width / 8;
694         bool unmap = false;
695         u64 dummy;
696         int error;
697
698         rcu_read_lock();
699         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
700         if (!virt_addr) {
701                 rcu_read_unlock();
702                 virt_addr = acpi_os_ioremap(phys_addr, size);
703                 if (!virt_addr)
704                         return AE_BAD_ADDRESS;
705                 unmap = true;
706         }
707
708         if (!value)
709                 value = &dummy;
710
711         error = acpi_os_read_iomem(virt_addr, value, width);
712         BUG_ON(error);
713
714         if (unmap)
715                 iounmap(virt_addr);
716         else
717                 rcu_read_unlock();
718
719         return AE_OK;
720 }
721
722 acpi_status
723 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
724 {
725         void __iomem *virt_addr;
726         unsigned int size = width / 8;
727         bool unmap = false;
728
729         rcu_read_lock();
730         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
731         if (!virt_addr) {
732                 rcu_read_unlock();
733                 virt_addr = acpi_os_ioremap(phys_addr, size);
734                 if (!virt_addr)
735                         return AE_BAD_ADDRESS;
736                 unmap = true;
737         }
738
739         switch (width) {
740         case 8:
741                 writeb(value, virt_addr);
742                 break;
743         case 16:
744                 writew(value, virt_addr);
745                 break;
746         case 32:
747                 writel(value, virt_addr);
748                 break;
749         case 64:
750                 writeq(value, virt_addr);
751                 break;
752         default:
753                 BUG();
754         }
755
756         if (unmap)
757                 iounmap(virt_addr);
758         else
759                 rcu_read_unlock();
760
761         return AE_OK;
762 }
763
764 acpi_status
765 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
766                                u64 *value, u32 width)
767 {
768         int result, size;
769         u32 value32;
770
771         if (!value)
772                 return AE_BAD_PARAMETER;
773
774         switch (width) {
775         case 8:
776                 size = 1;
777                 break;
778         case 16:
779                 size = 2;
780                 break;
781         case 32:
782                 size = 4;
783                 break;
784         default:
785                 return AE_ERROR;
786         }
787
788         result = raw_pci_read(pci_id->segment, pci_id->bus,
789                                 PCI_DEVFN(pci_id->device, pci_id->function),
790                                 reg, size, &value32);
791         *value = value32;
792
793         return (result ? AE_ERROR : AE_OK);
794 }
795
796 acpi_status
797 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
798                                 u64 value, u32 width)
799 {
800         int result, size;
801
802         switch (width) {
803         case 8:
804                 size = 1;
805                 break;
806         case 16:
807                 size = 2;
808                 break;
809         case 32:
810                 size = 4;
811                 break;
812         default:
813                 return AE_ERROR;
814         }
815
816         result = raw_pci_write(pci_id->segment, pci_id->bus,
817                                 PCI_DEVFN(pci_id->device, pci_id->function),
818                                 reg, size, value);
819
820         return (result ? AE_ERROR : AE_OK);
821 }
822
823 static void acpi_os_execute_deferred(struct work_struct *work)
824 {
825         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
826
827         dpc->function(dpc->context);
828         kfree(dpc);
829 }
830
831 #ifdef CONFIG_ACPI_DEBUGGER
832 static struct acpi_debugger acpi_debugger;
833 static bool acpi_debugger_initialized;
834
835 int acpi_register_debugger(struct module *owner,
836                            const struct acpi_debugger_ops *ops)
837 {
838         int ret = 0;
839
840         mutex_lock(&acpi_debugger.lock);
841         if (acpi_debugger.ops) {
842                 ret = -EBUSY;
843                 goto err_lock;
844         }
845
846         acpi_debugger.owner = owner;
847         acpi_debugger.ops = ops;
848
849 err_lock:
850         mutex_unlock(&acpi_debugger.lock);
851         return ret;
852 }
853 EXPORT_SYMBOL(acpi_register_debugger);
854
855 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
856 {
857         mutex_lock(&acpi_debugger.lock);
858         if (ops == acpi_debugger.ops) {
859                 acpi_debugger.ops = NULL;
860                 acpi_debugger.owner = NULL;
861         }
862         mutex_unlock(&acpi_debugger.lock);
863 }
864 EXPORT_SYMBOL(acpi_unregister_debugger);
865
866 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
867 {
868         int ret;
869         int (*func)(acpi_osd_exec_callback, void *);
870         struct module *owner;
871
872         if (!acpi_debugger_initialized)
873                 return -ENODEV;
874         mutex_lock(&acpi_debugger.lock);
875         if (!acpi_debugger.ops) {
876                 ret = -ENODEV;
877                 goto err_lock;
878         }
879         if (!try_module_get(acpi_debugger.owner)) {
880                 ret = -ENODEV;
881                 goto err_lock;
882         }
883         func = acpi_debugger.ops->create_thread;
884         owner = acpi_debugger.owner;
885         mutex_unlock(&acpi_debugger.lock);
886
887         ret = func(function, context);
888
889         mutex_lock(&acpi_debugger.lock);
890         module_put(owner);
891 err_lock:
892         mutex_unlock(&acpi_debugger.lock);
893         return ret;
894 }
895
896 ssize_t acpi_debugger_write_log(const char *msg)
897 {
898         ssize_t ret;
899         ssize_t (*func)(const char *);
900         struct module *owner;
901
902         if (!acpi_debugger_initialized)
903                 return -ENODEV;
904         mutex_lock(&acpi_debugger.lock);
905         if (!acpi_debugger.ops) {
906                 ret = -ENODEV;
907                 goto err_lock;
908         }
909         if (!try_module_get(acpi_debugger.owner)) {
910                 ret = -ENODEV;
911                 goto err_lock;
912         }
913         func = acpi_debugger.ops->write_log;
914         owner = acpi_debugger.owner;
915         mutex_unlock(&acpi_debugger.lock);
916
917         ret = func(msg);
918
919         mutex_lock(&acpi_debugger.lock);
920         module_put(owner);
921 err_lock:
922         mutex_unlock(&acpi_debugger.lock);
923         return ret;
924 }
925
926 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
927 {
928         ssize_t ret;
929         ssize_t (*func)(char *, size_t);
930         struct module *owner;
931
932         if (!acpi_debugger_initialized)
933                 return -ENODEV;
934         mutex_lock(&acpi_debugger.lock);
935         if (!acpi_debugger.ops) {
936                 ret = -ENODEV;
937                 goto err_lock;
938         }
939         if (!try_module_get(acpi_debugger.owner)) {
940                 ret = -ENODEV;
941                 goto err_lock;
942         }
943         func = acpi_debugger.ops->read_cmd;
944         owner = acpi_debugger.owner;
945         mutex_unlock(&acpi_debugger.lock);
946
947         ret = func(buffer, buffer_length);
948
949         mutex_lock(&acpi_debugger.lock);
950         module_put(owner);
951 err_lock:
952         mutex_unlock(&acpi_debugger.lock);
953         return ret;
954 }
955
956 int acpi_debugger_wait_command_ready(void)
957 {
958         int ret;
959         int (*func)(bool, char *, size_t);
960         struct module *owner;
961
962         if (!acpi_debugger_initialized)
963                 return -ENODEV;
964         mutex_lock(&acpi_debugger.lock);
965         if (!acpi_debugger.ops) {
966                 ret = -ENODEV;
967                 goto err_lock;
968         }
969         if (!try_module_get(acpi_debugger.owner)) {
970                 ret = -ENODEV;
971                 goto err_lock;
972         }
973         func = acpi_debugger.ops->wait_command_ready;
974         owner = acpi_debugger.owner;
975         mutex_unlock(&acpi_debugger.lock);
976
977         ret = func(acpi_gbl_method_executing,
978                    acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
979
980         mutex_lock(&acpi_debugger.lock);
981         module_put(owner);
982 err_lock:
983         mutex_unlock(&acpi_debugger.lock);
984         return ret;
985 }
986
987 int acpi_debugger_notify_command_complete(void)
988 {
989         int ret;
990         int (*func)(void);
991         struct module *owner;
992
993         if (!acpi_debugger_initialized)
994                 return -ENODEV;
995         mutex_lock(&acpi_debugger.lock);
996         if (!acpi_debugger.ops) {
997                 ret = -ENODEV;
998                 goto err_lock;
999         }
1000         if (!try_module_get(acpi_debugger.owner)) {
1001                 ret = -ENODEV;
1002                 goto err_lock;
1003         }
1004         func = acpi_debugger.ops->notify_command_complete;
1005         owner = acpi_debugger.owner;
1006         mutex_unlock(&acpi_debugger.lock);
1007
1008         ret = func();
1009
1010         mutex_lock(&acpi_debugger.lock);
1011         module_put(owner);
1012 err_lock:
1013         mutex_unlock(&acpi_debugger.lock);
1014         return ret;
1015 }
1016
1017 int __init acpi_debugger_init(void)
1018 {
1019         mutex_init(&acpi_debugger.lock);
1020         acpi_debugger_initialized = true;
1021         return 0;
1022 }
1023 #endif
1024
1025 /*******************************************************************************
1026  *
1027  * FUNCTION:    acpi_os_execute
1028  *
1029  * PARAMETERS:  Type               - Type of the callback
1030  *              Function           - Function to be executed
1031  *              Context            - Function parameters
1032  *
1033  * RETURN:      Status
1034  *
1035  * DESCRIPTION: Depending on type, either queues function for deferred execution or
1036  *              immediately executes function on a separate thread.
1037  *
1038  ******************************************************************************/
1039
1040 acpi_status acpi_os_execute(acpi_execute_type type,
1041                             acpi_osd_exec_callback function, void *context)
1042 {
1043         acpi_status status = AE_OK;
1044         struct acpi_os_dpc *dpc;
1045         struct workqueue_struct *queue;
1046         int ret;
1047         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1048                           "Scheduling function [%p(%p)] for deferred execution.\n",
1049                           function, context));
1050
1051         if (type == OSL_DEBUGGER_MAIN_THREAD) {
1052                 ret = acpi_debugger_create_thread(function, context);
1053                 if (ret) {
1054                         pr_err("Call to kthread_create() failed.\n");
1055                         status = AE_ERROR;
1056                 }
1057                 goto out_thread;
1058         }
1059
1060         /*
1061          * Allocate/initialize DPC structure.  Note that this memory will be
1062          * freed by the callee.  The kernel handles the work_struct list  in a
1063          * way that allows us to also free its memory inside the callee.
1064          * Because we may want to schedule several tasks with different
1065          * parameters we can't use the approach some kernel code uses of
1066          * having a static work_struct.
1067          */
1068
1069         dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1070         if (!dpc)
1071                 return AE_NO_MEMORY;
1072
1073         dpc->function = function;
1074         dpc->context = context;
1075
1076         /*
1077          * To prevent lockdep from complaining unnecessarily, make sure that
1078          * there is a different static lockdep key for each workqueue by using
1079          * INIT_WORK() for each of them separately.
1080          */
1081         if (type == OSL_NOTIFY_HANDLER) {
1082                 queue = kacpi_notify_wq;
1083                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1084         } else if (type == OSL_GPE_HANDLER) {
1085                 queue = kacpid_wq;
1086                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1087         } else {
1088                 pr_err("Unsupported os_execute type %d.\n", type);
1089                 status = AE_ERROR;
1090         }
1091
1092         if (ACPI_FAILURE(status))
1093                 goto err_workqueue;
1094
1095         /*
1096          * On some machines, a software-initiated SMI causes corruption unless
1097          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
1098          * typically it's done in GPE-related methods that are run via
1099          * workqueues, so we can avoid the known corruption cases by always
1100          * queueing on CPU 0.
1101          */
1102         ret = queue_work_on(0, queue, &dpc->work);
1103         if (!ret) {
1104                 printk(KERN_ERR PREFIX
1105                           "Call to queue_work() failed.\n");
1106                 status = AE_ERROR;
1107         }
1108 err_workqueue:
1109         if (ACPI_FAILURE(status))
1110                 kfree(dpc);
1111 out_thread:
1112         return status;
1113 }
1114 EXPORT_SYMBOL(acpi_os_execute);
1115
1116 void acpi_os_wait_events_complete(void)
1117 {
1118         /*
1119          * Make sure the GPE handler or the fixed event handler is not used
1120          * on another CPU after removal.
1121          */
1122         if (acpi_sci_irq_valid())
1123                 synchronize_hardirq(acpi_sci_irq);
1124         flush_workqueue(kacpid_wq);
1125         flush_workqueue(kacpi_notify_wq);
1126 }
1127
1128 struct acpi_hp_work {
1129         struct work_struct work;
1130         struct acpi_device *adev;
1131         u32 src;
1132 };
1133
1134 static void acpi_hotplug_work_fn(struct work_struct *work)
1135 {
1136         struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1137
1138         acpi_os_wait_events_complete();
1139         acpi_device_hotplug(hpw->adev, hpw->src);
1140         kfree(hpw);
1141 }
1142
1143 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1144 {
1145         struct acpi_hp_work *hpw;
1146
1147         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1148                   "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1149                   adev, src));
1150
1151         hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1152         if (!hpw)
1153                 return AE_NO_MEMORY;
1154
1155         INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1156         hpw->adev = adev;
1157         hpw->src = src;
1158         /*
1159          * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1160          * the hotplug code may call driver .remove() functions, which may
1161          * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1162          * these workqueues.
1163          */
1164         if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1165                 kfree(hpw);
1166                 return AE_ERROR;
1167         }
1168         return AE_OK;
1169 }
1170
1171 bool acpi_queue_hotplug_work(struct work_struct *work)
1172 {
1173         return queue_work(kacpi_hotplug_wq, work);
1174 }
1175
1176 acpi_status
1177 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1178 {
1179         struct semaphore *sem = NULL;
1180
1181         sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1182         if (!sem)
1183                 return AE_NO_MEMORY;
1184
1185         sema_init(sem, initial_units);
1186
1187         *handle = (acpi_handle *) sem;
1188
1189         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1190                           *handle, initial_units));
1191
1192         return AE_OK;
1193 }
1194
1195 /*
1196  * TODO: A better way to delete semaphores?  Linux doesn't have a
1197  * 'delete_semaphore()' function -- may result in an invalid
1198  * pointer dereference for non-synchronized consumers.  Should
1199  * we at least check for blocked threads and signal/cancel them?
1200  */
1201
1202 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1203 {
1204         struct semaphore *sem = (struct semaphore *)handle;
1205
1206         if (!sem)
1207                 return AE_BAD_PARAMETER;
1208
1209         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1210
1211         BUG_ON(!list_empty(&sem->wait_list));
1212         kfree(sem);
1213         sem = NULL;
1214
1215         return AE_OK;
1216 }
1217
1218 /*
1219  * TODO: Support for units > 1?
1220  */
1221 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1222 {
1223         acpi_status status = AE_OK;
1224         struct semaphore *sem = (struct semaphore *)handle;
1225         long jiffies;
1226         int ret = 0;
1227
1228         if (!acpi_os_initialized)
1229                 return AE_OK;
1230
1231         if (!sem || (units < 1))
1232                 return AE_BAD_PARAMETER;
1233
1234         if (units > 1)
1235                 return AE_SUPPORT;
1236
1237         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1238                           handle, units, timeout));
1239
1240         if (timeout == ACPI_WAIT_FOREVER)
1241                 jiffies = MAX_SCHEDULE_TIMEOUT;
1242         else
1243                 jiffies = msecs_to_jiffies(timeout);
1244
1245         ret = down_timeout(sem, jiffies);
1246         if (ret)
1247                 status = AE_TIME;
1248
1249         if (ACPI_FAILURE(status)) {
1250                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1251                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1252                                   handle, units, timeout,
1253                                   acpi_format_exception(status)));
1254         } else {
1255                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1256                                   "Acquired semaphore[%p|%d|%d]", handle,
1257                                   units, timeout));
1258         }
1259
1260         return status;
1261 }
1262
1263 /*
1264  * TODO: Support for units > 1?
1265  */
1266 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1267 {
1268         struct semaphore *sem = (struct semaphore *)handle;
1269
1270         if (!acpi_os_initialized)
1271                 return AE_OK;
1272
1273         if (!sem || (units < 1))
1274                 return AE_BAD_PARAMETER;
1275
1276         if (units > 1)
1277                 return AE_SUPPORT;
1278
1279         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1280                           units));
1281
1282         up(sem);
1283
1284         return AE_OK;
1285 }
1286
1287 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1288 {
1289 #ifdef ENABLE_DEBUGGER
1290         if (acpi_in_debugger) {
1291                 u32 chars;
1292
1293                 kdb_read(buffer, buffer_length);
1294
1295                 /* remove the CR kdb includes */
1296                 chars = strlen(buffer) - 1;
1297                 buffer[chars] = '\0';
1298         }
1299 #else
1300         int ret;
1301
1302         ret = acpi_debugger_read_cmd(buffer, buffer_length);
1303         if (ret < 0)
1304                 return AE_ERROR;
1305         if (bytes_read)
1306                 *bytes_read = ret;
1307 #endif
1308
1309         return AE_OK;
1310 }
1311 EXPORT_SYMBOL(acpi_os_get_line);
1312
1313 acpi_status acpi_os_wait_command_ready(void)
1314 {
1315         int ret;
1316
1317         ret = acpi_debugger_wait_command_ready();
1318         if (ret < 0)
1319                 return AE_ERROR;
1320         return AE_OK;
1321 }
1322
1323 acpi_status acpi_os_notify_command_complete(void)
1324 {
1325         int ret;
1326
1327         ret = acpi_debugger_notify_command_complete();
1328         if (ret < 0)
1329                 return AE_ERROR;
1330         return AE_OK;
1331 }
1332
1333 acpi_status acpi_os_signal(u32 function, void *info)
1334 {
1335         switch (function) {
1336         case ACPI_SIGNAL_FATAL:
1337                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1338                 break;
1339         case ACPI_SIGNAL_BREAKPOINT:
1340                 /*
1341                  * AML Breakpoint
1342                  * ACPI spec. says to treat it as a NOP unless
1343                  * you are debugging.  So if/when we integrate
1344                  * AML debugger into the kernel debugger its
1345                  * hook will go here.  But until then it is
1346                  * not useful to print anything on breakpoints.
1347                  */
1348                 break;
1349         default:
1350                 break;
1351         }
1352
1353         return AE_OK;
1354 }
1355
1356 static int __init acpi_os_name_setup(char *str)
1357 {
1358         char *p = acpi_os_name;
1359         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1360
1361         if (!str || !*str)
1362                 return 0;
1363
1364         for (; count-- && *str; str++) {
1365                 if (isalnum(*str) || *str == ' ' || *str == ':')
1366                         *p++ = *str;
1367                 else if (*str == '\'' || *str == '"')
1368                         continue;
1369                 else
1370                         break;
1371         }
1372         *p = 0;
1373
1374         return 1;
1375
1376 }
1377
1378 __setup("acpi_os_name=", acpi_os_name_setup);
1379
1380 /*
1381  * Disable the auto-serialization of named objects creation methods.
1382  *
1383  * This feature is enabled by default.  It marks the AML control methods
1384  * that contain the opcodes to create named objects as "Serialized".
1385  */
1386 static int __init acpi_no_auto_serialize_setup(char *str)
1387 {
1388         acpi_gbl_auto_serialize_methods = FALSE;
1389         pr_info("ACPI: auto-serialization disabled\n");
1390
1391         return 1;
1392 }
1393
1394 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1395
1396 /* Check of resource interference between native drivers and ACPI
1397  * OperationRegions (SystemIO and System Memory only).
1398  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1399  * in arbitrary AML code and can interfere with legacy drivers.
1400  * acpi_enforce_resources= can be set to:
1401  *
1402  *   - strict (default) (2)
1403  *     -> further driver trying to access the resources will not load
1404  *   - lax              (1)
1405  *     -> further driver trying to access the resources will load, but you
1406  *     get a system message that something might go wrong...
1407  *
1408  *   - no               (0)
1409  *     -> ACPI Operation Region resources will not be registered
1410  *
1411  */
1412 #define ENFORCE_RESOURCES_STRICT 2
1413 #define ENFORCE_RESOURCES_LAX    1
1414 #define ENFORCE_RESOURCES_NO     0
1415
1416 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1417
1418 static int __init acpi_enforce_resources_setup(char *str)
1419 {
1420         if (str == NULL || *str == '\0')
1421                 return 0;
1422
1423         if (!strcmp("strict", str))
1424                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1425         else if (!strcmp("lax", str))
1426                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1427         else if (!strcmp("no", str))
1428                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1429
1430         return 1;
1431 }
1432
1433 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1434
1435 /* Check for resource conflicts between ACPI OperationRegions and native
1436  * drivers */
1437 int acpi_check_resource_conflict(const struct resource *res)
1438 {
1439         acpi_adr_space_type space_id;
1440         acpi_size length;
1441         u8 warn = 0;
1442         int clash = 0;
1443
1444         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1445                 return 0;
1446         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1447                 return 0;
1448
1449         if (res->flags & IORESOURCE_IO)
1450                 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1451         else
1452                 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1453
1454         length = resource_size(res);
1455         if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1456                 warn = 1;
1457         clash = acpi_check_address_range(space_id, res->start, length, warn);
1458
1459         if (clash) {
1460                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1461                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1462                                 printk(KERN_NOTICE "ACPI: This conflict may"
1463                                        " cause random problems and system"
1464                                        " instability\n");
1465                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1466                                " for this device, you should use it instead of"
1467                                " the native driver\n");
1468                 }
1469                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1470                         return -EBUSY;
1471         }
1472         return 0;
1473 }
1474 EXPORT_SYMBOL(acpi_check_resource_conflict);
1475
1476 int acpi_check_region(resource_size_t start, resource_size_t n,
1477                       const char *name)
1478 {
1479         struct resource res = {
1480                 .start = start,
1481                 .end   = start + n - 1,
1482                 .name  = name,
1483                 .flags = IORESOURCE_IO,
1484         };
1485
1486         return acpi_check_resource_conflict(&res);
1487 }
1488 EXPORT_SYMBOL(acpi_check_region);
1489
1490 /*
1491  * Let drivers know whether the resource checks are effective
1492  */
1493 int acpi_resources_are_enforced(void)
1494 {
1495         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1496 }
1497 EXPORT_SYMBOL(acpi_resources_are_enforced);
1498
1499 /*
1500  * Deallocate the memory for a spinlock.
1501  */
1502 void acpi_os_delete_lock(acpi_spinlock handle)
1503 {
1504         ACPI_FREE(handle);
1505 }
1506
1507 /*
1508  * Acquire a spinlock.
1509  *
1510  * handle is a pointer to the spinlock_t.
1511  */
1512
1513 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1514 {
1515         acpi_cpu_flags flags;
1516         spin_lock_irqsave(lockp, flags);
1517         return flags;
1518 }
1519
1520 /*
1521  * Release a spinlock. See above.
1522  */
1523
1524 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1525 {
1526         spin_unlock_irqrestore(lockp, flags);
1527 }
1528
1529 #ifndef ACPI_USE_LOCAL_CACHE
1530
1531 /*******************************************************************************
1532  *
1533  * FUNCTION:    acpi_os_create_cache
1534  *
1535  * PARAMETERS:  name      - Ascii name for the cache
1536  *              size      - Size of each cached object
1537  *              depth     - Maximum depth of the cache (in objects) <ignored>
1538  *              cache     - Where the new cache object is returned
1539  *
1540  * RETURN:      status
1541  *
1542  * DESCRIPTION: Create a cache object
1543  *
1544  ******************************************************************************/
1545
1546 acpi_status
1547 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1548 {
1549         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1550         if (*cache == NULL)
1551                 return AE_ERROR;
1552         else
1553                 return AE_OK;
1554 }
1555
1556 /*******************************************************************************
1557  *
1558  * FUNCTION:    acpi_os_purge_cache
1559  *
1560  * PARAMETERS:  Cache           - Handle to cache object
1561  *
1562  * RETURN:      Status
1563  *
1564  * DESCRIPTION: Free all objects within the requested cache.
1565  *
1566  ******************************************************************************/
1567
1568 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1569 {
1570         kmem_cache_shrink(cache);
1571         return (AE_OK);
1572 }
1573
1574 /*******************************************************************************
1575  *
1576  * FUNCTION:    acpi_os_delete_cache
1577  *
1578  * PARAMETERS:  Cache           - Handle to cache object
1579  *
1580  * RETURN:      Status
1581  *
1582  * DESCRIPTION: Free all objects within the requested cache and delete the
1583  *              cache object.
1584  *
1585  ******************************************************************************/
1586
1587 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1588 {
1589         kmem_cache_destroy(cache);
1590         return (AE_OK);
1591 }
1592
1593 /*******************************************************************************
1594  *
1595  * FUNCTION:    acpi_os_release_object
1596  *
1597  * PARAMETERS:  Cache       - Handle to cache object
1598  *              Object      - The object to be released
1599  *
1600  * RETURN:      None
1601  *
1602  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1603  *              the object is deleted.
1604  *
1605  ******************************************************************************/
1606
1607 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1608 {
1609         kmem_cache_free(cache, object);
1610         return (AE_OK);
1611 }
1612 #endif
1613
1614 static int __init acpi_no_static_ssdt_setup(char *s)
1615 {
1616         acpi_gbl_disable_ssdt_table_install = TRUE;
1617         pr_info("ACPI: static SSDT installation disabled\n");
1618
1619         return 0;
1620 }
1621
1622 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1623
1624 static int __init acpi_disable_return_repair(char *s)
1625 {
1626         printk(KERN_NOTICE PREFIX
1627                "ACPI: Predefined validation mechanism disabled\n");
1628         acpi_gbl_disable_auto_repair = TRUE;
1629
1630         return 1;
1631 }
1632
1633 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1634
1635 acpi_status __init acpi_os_initialize(void)
1636 {
1637         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1638         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1639         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1640         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1641         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1642                 /*
1643                  * Use acpi_os_map_generic_address to pre-map the reset
1644                  * register if it's in system memory.
1645                  */
1646                 int rv;
1647
1648                 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1649                 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1650         }
1651         acpi_os_initialized = true;
1652
1653         return AE_OK;
1654 }
1655
1656 acpi_status __init acpi_os_initialize1(void)
1657 {
1658         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1659         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1660         kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1661         BUG_ON(!kacpid_wq);
1662         BUG_ON(!kacpi_notify_wq);
1663         BUG_ON(!kacpi_hotplug_wq);
1664         acpi_osi_init();
1665         return AE_OK;
1666 }
1667
1668 acpi_status acpi_os_terminate(void)
1669 {
1670         if (acpi_irq_handler) {
1671                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1672                                                  acpi_irq_handler);
1673         }
1674
1675         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1676         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1677         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1678         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1679         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1680                 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1681
1682         destroy_workqueue(kacpid_wq);
1683         destroy_workqueue(kacpi_notify_wq);
1684         destroy_workqueue(kacpi_hotplug_wq);
1685
1686         return AE_OK;
1687 }
1688
1689 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1690                                   u32 pm1b_control)
1691 {
1692         int rc = 0;
1693         if (__acpi_os_prepare_sleep)
1694                 rc = __acpi_os_prepare_sleep(sleep_state,
1695                                              pm1a_control, pm1b_control);
1696         if (rc < 0)
1697                 return AE_ERROR;
1698         else if (rc > 0)
1699                 return AE_CTRL_TERMINATE;
1700
1701         return AE_OK;
1702 }
1703
1704 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1705                                u32 pm1a_ctrl, u32 pm1b_ctrl))
1706 {
1707         __acpi_os_prepare_sleep = func;
1708 }
1709
1710 #if (ACPI_REDUCED_HARDWARE)
1711 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1712                                   u32 val_b)
1713 {
1714         int rc = 0;
1715         if (__acpi_os_prepare_extended_sleep)
1716                 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1717                                              val_a, val_b);
1718         if (rc < 0)
1719                 return AE_ERROR;
1720         else if (rc > 0)
1721                 return AE_CTRL_TERMINATE;
1722
1723         return AE_OK;
1724 }
1725 #else
1726 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1727                                   u32 val_b)
1728 {
1729         return AE_OK;
1730 }
1731 #endif
1732
1733 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1734                                u32 val_a, u32 val_b))
1735 {
1736         __acpi_os_prepare_extended_sleep = func;
1737 }
1738
1739 acpi_status acpi_os_enter_sleep(u8 sleep_state,
1740                                 u32 reg_a_value, u32 reg_b_value)
1741 {
1742         acpi_status status;
1743
1744         if (acpi_gbl_reduced_hardware)
1745                 status = acpi_os_prepare_extended_sleep(sleep_state,
1746                                                         reg_a_value,
1747                                                         reg_b_value);
1748         else
1749                 status = acpi_os_prepare_sleep(sleep_state,
1750                                                reg_a_value, reg_b_value);
1751         return status;
1752 }