blob: f4a2082568c8544ea7d005c316af317a0581f2a9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixner240d3a72008-01-30 13:34:05 +010022#ifdef CONFIG_X86_64
23
24unsigned long __phys_addr(unsigned long x)
25{
26 if (x >= __START_KERNEL_map)
27 return x - __START_KERNEL_map + phys_base;
28 return x - PAGE_OFFSET;
29}
30EXPORT_SYMBOL(__phys_addr);
31
32#endif
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010035 * Fix up the linear direct mapping of the kernel to avoid cache attribute
36 * conflicts.
37 */
38static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
39 pgprot_t prot)
40{
41 unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
42 int err, level;
43
44 /* No change for pages after the last mapping */
45 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
46 return 0;
47
48 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
49 vaddr = (unsigned long) __va(phys_addr);
50
51 /*
52 * If there is no identity map for this address,
53 * change_page_attr_addr is unnecessary
54 */
55 if (!lookup_address(vaddr, &level))
56 return 0;
57
58 /*
59 * Must use an address here and not struct page because the
60 * phys addr can be a in hole between nodes and not have a
61 * memmap entry.
62 */
63 err = change_page_attr_addr(vaddr, npages, prot);
Thomas Gleixner240d3a72008-01-30 13:34:05 +010064
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010065 if (!err)
66 global_flush_tlb();
67
68 return err;
69}
70
71/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * Remap an arbitrary physical address space into the kernel virtual
73 * address space. Needed when the kernel wants to access high addresses
74 * directly.
75 *
76 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
77 * have to convert them into an offset in a page-aligned mapping, but the
78 * caller shouldn't need to know that small detail.
79 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +010080void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
81 unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
Thomas Gleixner91eebf42008-01-30 13:34:05 +010083 void __iomem *addr;
84 struct vm_struct *area;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 unsigned long offset, last_addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010086 pgprot_t pgprot;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88 /* Don't allow wraparound or zero size */
89 last_addr = phys_addr + size - 1;
90 if (!size || last_addr < phys_addr)
91 return NULL;
92
93 /*
94 * Don't remap the low PCI/ISA area, it's always mapped..
95 */
96 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +010097 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Thomas Gleixner240d3a72008-01-30 13:34:05 +010099#ifdef CONFIG_X86_32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 /*
101 * Don't allow anybody to remap normal RAM that we're using..
102 */
103 if (phys_addr <= virt_to_phys(high_memory - 1)) {
104 char *t_addr, *t_end;
105 struct page *page;
106
107 t_addr = __va(phys_addr);
108 t_end = t_addr + (size - 1);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100109
110 for (page = virt_to_page(t_addr);
111 page <= virt_to_page(t_end); page++)
112 if (!PageReserved(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 return NULL;
114 }
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100115#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100117 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 /*
120 * Mappings have to be page-aligned
121 */
122 offset = phys_addr & ~PAGE_MASK;
123 phys_addr &= PAGE_MASK;
124 size = PAGE_ALIGN(last_addr+1) - phys_addr;
125
126 /*
127 * Ok, go for it..
128 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100129 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 if (!area)
131 return NULL;
132 area->phys_addr = phys_addr;
133 addr = (void __iomem *) area->addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100134 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
135 phys_addr, pgprot)) {
Thomas Gleixnere4c1b972008-01-30 13:34:05 +0100136 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 return NULL;
138 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100139
140 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
141 vunmap(addr);
142 return NULL;
143 }
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 return (void __iomem *) (offset + (char __iomem *)addr);
146}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700147EXPORT_SYMBOL(__ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149/**
150 * ioremap_nocache - map bus memory into CPU space
151 * @offset: bus address of the memory
152 * @size: size of the resource to map
153 *
154 * ioremap_nocache performs a platform specific sequence of operations to
155 * make bus memory CPU accessible via the readb/readw/readl/writeb/
156 * writew/writel functions and the other mmio helpers. The returned
157 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100158 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 *
160 * This version of ioremap ensures that the memory is marked uncachable
161 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100162 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 * busses. In particular driver authors should read up on PCI writes
164 *
165 * It's useful if some control registers are in such an area and
166 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100167 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 * Must be freed with iounmap.
169 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100170void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100172 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700174EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Andi Kleenbf5421c2005-12-12 22:17:09 -0800176/**
177 * iounmap - Free a IO remapping
178 * @addr: virtual address from ioremap_*
179 *
180 * Caller must ensure there is only one unmapping for the same pointer.
181 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182void iounmap(volatile void __iomem *addr)
183{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800184 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700185
186 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 return;
188
189 /*
190 * __ioremap special-cases the PCI/ISA range by not instantiating a
191 * vm_area and by simply returning an address into the kernel mapping
192 * of ISA space. So handle that here.
193 */
194 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100195 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return;
197
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100198 addr = (volatile void __iomem *)
199 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800200
201 /* Use the vm area unlocked, assuming the caller
202 ensures there isn't another iounmap for the same address
203 in parallel. Reuse of the virtual address is prevented by
204 leaving it in the global lists until we're done with it.
205 cpa takes care of the direct mappings. */
206 read_lock(&vmlist_lock);
207 for (p = vmlist; p; p = p->next) {
208 if (p->addr == addr)
209 break;
210 }
211 read_unlock(&vmlist_lock);
212
213 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100214 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700215 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800216 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218
Andi Kleenbf5421c2005-12-12 22:17:09 -0800219 /* Reset the direct mapping. Can block */
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100220 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800221
222 /* Finally remove it */
223 o = remove_vm_area((void *)addr);
224 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100225 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700227EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100229#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100230
231int __initdata early_ioremap_debug;
232
233static int __init early_ioremap_debug_setup(char *str)
234{
235 early_ioremap_debug = 1;
236
Huang, Ying793b24a2008-01-30 13:33:45 +0100237 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100238}
Huang, Ying793b24a2008-01-30 13:33:45 +0100239early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100240
Huang, Ying0947b2f2008-01-30 13:33:44 +0100241static __initdata int after_paging_init;
242static __initdata unsigned long bm_pte[1024]
243 __attribute__((aligned(PAGE_SIZE)));
244
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100245static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100246{
247 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
248}
249
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100250static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100251{
252 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
253}
254
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100255void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100256{
257 unsigned long *pgd;
258
Ingo Molnard18d6d62008-01-30 13:33:45 +0100259 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100260 printk(KERN_DEBUG "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100261
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100262 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100263 *pgd = __pa(bm_pte) | _PAGE_TABLE;
264 memset(bm_pte, 0, sizeof(bm_pte));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100265 /*
266 * The boot-ioremap range spans multiple pgds, for which
267 * we are not prepared:
268 */
269 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
270 WARN_ON(1);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100271 printk(KERN_WARNING "pgd %p != %p\n",
272 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
273 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
274 fix_to_virt(FIX_BTMAP_BEGIN));
275 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
276 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100277
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100278 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
279 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
280 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100281 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100282}
283
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100284void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100285{
286 unsigned long *pgd;
287
Ingo Molnard18d6d62008-01-30 13:33:45 +0100288 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100289 printk(KERN_DEBUG "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100290
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100291 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100292 *pgd = 0;
293 __flush_tlb_all();
294}
295
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100296void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100297{
298 enum fixed_addresses idx;
299 unsigned long *pte, phys, addr;
300
301 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100302 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100303 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100304 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100305 if (!*pte & _PAGE_PRESENT) {
306 phys = *pte & PAGE_MASK;
307 set_fixmap(idx, phys);
308 }
309 }
310}
311
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100312static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100313 unsigned long phys, pgprot_t flags)
314{
315 unsigned long *pte, addr = __fix_to_virt(idx);
316
317 if (idx >= __end_of_fixed_addresses) {
318 BUG();
319 return;
320 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100321 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100322 if (pgprot_val(flags))
323 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
324 else
325 *pte = 0;
326 __flush_tlb_one(addr);
327}
328
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100329static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100330 unsigned long phys)
331{
332 if (after_paging_init)
333 set_fixmap(idx, phys);
334 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100335 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100336}
337
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100338static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100339{
340 if (after_paging_init)
341 clear_fixmap(idx);
342 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100343 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100344}
345
Ingo Molnar1b42f512008-01-30 13:33:45 +0100346
347int __initdata early_ioremap_nested;
348
Ingo Molnard690b2a2008-01-30 13:33:47 +0100349static int __init check_early_ioremap_leak(void)
350{
351 if (!early_ioremap_nested)
352 return 0;
353
354 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100355 "Debug warning: early ioremap leak of %d areas detected.\n",
356 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100357 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100358 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100359 WARN_ON(1);
360
361 return 1;
362}
363late_initcall(check_early_ioremap_leak);
364
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100365void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100368 unsigned int nrpages, nesting;
369 enum fixed_addresses idx0, idx;
370
371 WARN_ON(system_state != SYSTEM_BOOTING);
372
373 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100374 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100375 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
376 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100377 dump_stack();
378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
380 /* Don't allow wraparound or zero size */
381 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100382 if (!size || last_addr < phys_addr) {
383 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100387 if (nesting >= FIX_BTMAPS_NESTING) {
388 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100389 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100390 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100391 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 /*
393 * Mappings have to be page-aligned
394 */
395 offset = phys_addr & ~PAGE_MASK;
396 phys_addr &= PAGE_MASK;
397 size = PAGE_ALIGN(last_addr) - phys_addr;
398
399 /*
400 * Mappings have to fit in the FIX_BTMAP area.
401 */
402 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100403 if (nrpages > NR_FIX_BTMAPS) {
404 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 /*
409 * Ok, go for it..
410 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100411 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
412 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100414 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 phys_addr += PAGE_SIZE;
416 --idx;
417 --nrpages;
418 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100419 if (early_ioremap_debug)
420 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100421
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100422 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100425void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 unsigned long virt_addr;
428 unsigned long offset;
429 unsigned int nrpages;
430 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100431 unsigned int nesting;
432
433 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100434 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Ingo Molnard18d6d62008-01-30 13:33:45 +0100436 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100437 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
438 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100439 dump_stack();
440 }
441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100443 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
444 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 offset = virt_addr & ~PAGE_MASK;
448 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
449
Ingo Molnar1b42f512008-01-30 13:33:45 +0100450 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100452 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 --idx;
454 --nrpages;
455 }
456}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100457
458void __this_fixmap_does_not_exist(void)
459{
460 WARN_ON(1);
461}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100462
463#endif /* CONFIG_X86_32 */