blob: 7b0ab77b847980999409f878382fe55fe8849e62 [file] [log] [blame]
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070011#include <linux/memblock.h>
Len Brownd91ee582011-04-01 18:28:35 -040012#include <linux/cpuidle.h>
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -040013#include <linux/cpufreq.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070014
15#include <asm/elf.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010016#include <asm/vdso.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070017#include <asm/e820.h>
18#include <asm/setup.h>
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -070019#include <asm/acpi.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070020#include <asm/xen/hypervisor.h>
21#include <asm/xen/hypercall.h>
22
Ian Campbell45263cb2010-10-25 16:32:29 -070023#include <xen/xen.h>
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010024#include <xen/page.h>
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -070025#include <xen/interface/callback.h>
Ian Campbell35ae11f2009-02-06 19:09:48 -080026#include <xen/interface/memory.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070027#include <xen/interface/physdev.h>
28#include <xen/features.h>
29
30#include "xen-ops.h"
Roland McGrathd2eea682007-07-20 00:31:43 -070031#include "vdso.h"
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070032
33/* These are code, but not functions. Defined in entry.S */
34extern const char xen_hypervisor_callback[];
35extern const char xen_failsafe_callback[];
Tejf63c2f22008-12-16 11:56:06 -080036extern void xen_sysenter_target(void);
37extern void xen_syscall_target(void);
38extern void xen_syscall32_target(void);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070039
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070040/* Amount of extra memory space we add to the e820 ranges */
David Vrabel8b5d44a2011-09-28 17:46:34 +010041struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070042
David Vrabelaa244112011-09-28 17:46:32 +010043/* Number of pages released from the initial allocation. */
44unsigned long xen_released_pages;
45
Jeremy Fitzhardinge698bb8d2010-09-14 10:19:14 -070046/*
47 * The maximum amount of extra memory compared to the base size. The
48 * main scaling factor is the size of struct page. At extreme ratios
49 * of base:extra, all the base memory can be filled with page
50 * structures for the extra memory, leaving no space for anything
51 * else.
52 *
53 * 10x seems like a reasonable balance between scaling flexibility and
54 * leaving a practically usable system.
55 */
56#define EXTRA_MEM_RATIO (10)
57
David Vrabeldc91c722011-09-29 12:26:19 +010058static void __init xen_add_extra_mem(u64 start, u64 size)
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070059{
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050060 unsigned long pfn;
David Vrabeldc91c722011-09-29 12:26:19 +010061 int i;
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050062
David Vrabeldc91c722011-09-29 12:26:19 +010063 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
64 /* Add new region. */
65 if (xen_extra_mem[i].size == 0) {
66 xen_extra_mem[i].start = start;
67 xen_extra_mem[i].size = size;
68 break;
69 }
70 /* Append to existing region. */
71 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
72 xen_extra_mem[i].size += size;
73 break;
74 }
75 }
76 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
77 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070078
Tejun Heod4bbf7e2011-11-28 09:46:22 -080079 memblock_reserve(start, size);
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070080
David Vrabeldc91c722011-09-29 12:26:19 +010081 xen_max_p2m_pfn = PFN_DOWN(start + size);
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070082
David Vrabeldc91c722011-09-29 12:26:19 +010083 for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050084 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070085}
86
David Vrabelf3f436e2011-09-28 17:46:36 +010087static unsigned long __init xen_release_chunk(unsigned long start,
88 unsigned long end)
Miroslav Rezanina093d7b42009-09-16 03:56:17 -040089{
90 struct xen_memory_reservation reservation = {
91 .address_bits = 0,
92 .extent_order = 0,
93 .domid = DOMID_SELF
94 };
Jeremy Fitzhardingef89e0482009-09-16 12:38:33 -070095 unsigned long len = 0;
Miroslav Rezanina093d7b42009-09-16 03:56:17 -040096 unsigned long pfn;
97 int ret;
98
Jeremy Fitzhardingef89e0482009-09-16 12:38:33 -070099 for(pfn = start; pfn < end; pfn++) {
100 unsigned long mfn = pfn_to_mfn(pfn);
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400101
Jeremy Fitzhardingef89e0482009-09-16 12:38:33 -0700102 /* Make sure pfn exists to start with */
103 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
104 continue;
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400105
Jeremy Fitzhardingef89e0482009-09-16 12:38:33 -0700106 set_xen_guest_handle(reservation.extent_start, &mfn);
107 reservation.nr_extents = 1;
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400108
Jeremy Fitzhardingef89e0482009-09-16 12:38:33 -0700109 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
110 &reservation);
Igor Mammedov98f531d2011-08-02 11:45:25 +0200111 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
Jeremy Fitzhardingef89e0482009-09-16 12:38:33 -0700112 if (ret == 1) {
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -0500113 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Jeremy Fitzhardingef89e0482009-09-16 12:38:33 -0700114 len++;
115 }
116 }
Konrad Rzeszutek Wilkca118232012-03-30 15:37:07 -0400117 if (len)
118 printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
119 start, end, len);
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400120
121 return len;
122}
123
David Vrabelf3f436e2011-09-28 17:46:36 +0100124static unsigned long __init xen_set_identity_and_release(
125 const struct e820entry *list, size_t map_size, unsigned long nr_pages)
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400126{
David Vrabelf3f436e2011-09-28 17:46:36 +0100127 phys_addr_t start = 0;
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400128 unsigned long released = 0;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500129 unsigned long identity = 0;
David Vrabelf3f436e2011-09-28 17:46:36 +0100130 const struct e820entry *entry;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500131 int i;
132
David Vrabelf3f436e2011-09-28 17:46:36 +0100133 /*
134 * Combine non-RAM regions and gaps until a RAM region (or the
135 * end of the map) is reached, then set the 1:1 map and
136 * release the pages (if available) in those non-RAM regions.
137 *
138 * The combined non-RAM regions are rounded to a whole number
139 * of pages so any partial pages are accessible via the 1:1
140 * mapping. This is needed for some BIOSes that put (for
141 * example) the DMI tables in a reserved region that begins on
142 * a non-page boundary.
143 */
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500144 for (i = 0, entry = list; i < map_size; i++, entry++) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100145 phys_addr_t end = entry->addr + entry->size;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500146
David Vrabelf3f436e2011-09-28 17:46:36 +0100147 if (entry->type == E820_RAM || i == map_size - 1) {
148 unsigned long start_pfn = PFN_DOWN(start);
149 unsigned long end_pfn = PFN_UP(end);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500150
David Vrabelf3f436e2011-09-28 17:46:36 +0100151 if (entry->type == E820_RAM)
152 end_pfn = PFN_UP(entry->addr);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500153
David Vrabelf3f436e2011-09-28 17:46:36 +0100154 if (start_pfn < end_pfn) {
155 if (start_pfn < nr_pages)
156 released += xen_release_chunk(
157 start_pfn, min(end_pfn, nr_pages));
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500158
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500159 identity += set_phys_range_identity(
David Vrabelf3f436e2011-09-28 17:46:36 +0100160 start_pfn, end_pfn);
161 }
162 start = end;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500163 }
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500164 }
David Vrabelf3f436e2011-09-28 17:46:36 +0100165
Konrad Rzeszutek Wilkca118232012-03-30 15:37:07 -0400166 if (released)
167 printk(KERN_INFO "Released %lu pages of unused memory\n", released);
168 if (identity)
169 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
David Vrabelf3f436e2011-09-28 17:46:36 +0100170
171 return released;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500172}
David Vrabeld312ae872011-08-19 15:57:16 +0100173
174static unsigned long __init xen_get_max_pages(void)
175{
176 unsigned long max_pages = MAX_DOMAIN_PAGES;
177 domid_t domid = DOMID_SELF;
178 int ret;
179
Ian Campbelld3db7282011-12-14 12:16:08 +0000180 /*
181 * For the initial domain we use the maximum reservation as
182 * the maximum page.
183 *
184 * For guest domains the current maximum reservation reflects
185 * the current maximum rather than the static maximum. In this
186 * case the e820 map provided to us will cover the static
187 * maximum region.
188 */
189 if (xen_initial_domain()) {
190 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
191 if (ret > 0)
192 max_pages = ret;
193 }
194
David Vrabeld312ae872011-08-19 15:57:16 +0100195 return min(max_pages, MAX_DOMAIN_PAGES);
196}
197
David Vrabeldc91c722011-09-29 12:26:19 +0100198static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
199{
200 u64 end = start + size;
201
202 /* Align RAM regions to page boundaries. */
203 if (type == E820_RAM) {
204 start = PAGE_ALIGN(start);
205 end &= ~((u64)PAGE_SIZE - 1);
206 }
207
208 e820_add_region(start, end - start, type);
209}
210
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700211/**
212 * machine_specific_memory_setup - Hook for machine specific memory setup.
213 **/
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700214char * __init xen_memory_setup(void)
215{
Ian Campbell35ae11f2009-02-06 19:09:48 -0800216 static struct e820entry map[E820MAX] __initdata;
217
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700218 unsigned long max_pfn = xen_start_info->nr_pages;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800219 unsigned long long mem_end;
220 int rc;
221 struct xen_memory_map memmap;
David Vrabeldc91c722011-09-29 12:26:19 +0100222 unsigned long max_pages;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700223 unsigned long extra_pages = 0;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800224 int i;
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100225 int op;
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700226
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100227 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800228 mem_end = PFN_PHYS(max_pfn);
229
230 memmap.nr_entries = E820MAX;
231 set_xen_guest_handle(memmap.buffer, map);
232
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100233 op = xen_initial_domain() ?
234 XENMEM_machine_memory_map :
235 XENMEM_memory_map;
236 rc = HYPERVISOR_memory_op(op, &memmap);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800237 if (rc == -ENOSYS) {
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700238 BUG_ON(xen_initial_domain());
Ian Campbell35ae11f2009-02-06 19:09:48 -0800239 memmap.nr_entries = 1;
240 map[0].addr = 0ULL;
241 map[0].size = mem_end;
242 /* 8MB slack (to balance backend allocations). */
243 map[0].size += 8ULL << 20;
244 map[0].type = E820_RAM;
245 rc = 0;
246 }
247 BUG_ON(rc);
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100248
David Vrabeldc91c722011-09-29 12:26:19 +0100249 /* Make sure the Xen-supplied memory map is well-ordered. */
250 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700251
David Vrabeldc91c722011-09-29 12:26:19 +0100252 max_pages = xen_get_max_pages();
253 if (max_pages > max_pfn)
254 extra_pages += max_pages - max_pfn;
Stefano Stabellini7cb31b72011-01-27 10:13:25 -0500255
David Vrabelf3f436e2011-09-28 17:46:36 +0100256 /*
257 * Set P2M for all non-RAM pages and E820 gaps to be identity
258 * type PFNs. Any RAM pages that would be made inaccesible by
259 * this are first released.
260 */
261 xen_released_pages = xen_set_identity_and_release(
262 map, memmap.nr_entries, max_pfn);
David Vrabeldc91c722011-09-29 12:26:19 +0100263 extra_pages += xen_released_pages;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700264
David Vrabeldc91c722011-09-29 12:26:19 +0100265 /*
266 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
267 * factor the base size. On non-highmem systems, the base
268 * size is the full initial memory allocation; on highmem it
269 * is limited to the max size of lowmem, so that it doesn't
270 * get completely filled.
271 *
272 * In principle there could be a problem in lowmem systems if
273 * the initial memory is also very large with respect to
274 * lowmem, but we won't try to deal with that here.
275 */
276 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
277 extra_pages);
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700278
David Vrabeldc91c722011-09-29 12:26:19 +0100279 i = 0;
280 while (i < memmap.nr_entries) {
281 u64 addr = map[i].addr;
282 u64 size = map[i].size;
283 u32 type = map[i].type;
284
285 if (type == E820_RAM) {
286 if (addr < mem_end) {
287 size = min(size, mem_end - addr);
288 } else if (extra_pages) {
289 size = min(size, (u64)extra_pages * PAGE_SIZE);
290 extra_pages -= size / PAGE_SIZE;
291 xen_add_extra_mem(addr, size);
292 } else
293 type = E820_UNUSABLE;
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700294 }
295
David Vrabeldc91c722011-09-29 12:26:19 +0100296 xen_align_and_add_e820_region(addr, size, type);
Jeremy Fitzhardingeb5b43ce2010-09-02 17:10:12 -0700297
David Vrabeldc91c722011-09-29 12:26:19 +0100298 map[i].addr += size;
299 map[i].size -= size;
300 if (map[i].size == 0)
301 i++;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800302 }
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700303
304 /*
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700305 * In domU, the ISA region is normal, usable memory, but we
306 * reserve ISA memory anyway because too many things poke
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700307 * about in there.
308 */
309 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
310 E820_RESERVED);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700311
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700312 /*
313 * Reserve Xen bits:
314 * - mfn_list
315 * - xen_start_info
316 * See comment above "struct start_info" in <xen/interface/xen.h>
317 */
Tejun Heo24aa0782011-07-12 11:16:06 +0200318 memblock_reserve(__pa(xen_start_info->mfn_list),
319 xen_start_info->pt_base - xen_start_info->mfn_list);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700320
321 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
322
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700323 return "Xen";
324}
325
Roland McGrathd2eea682007-07-20 00:31:43 -0700326/*
327 * Set the bit indicating "nosegneg" library variants should be used.
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700328 * We only need to bother in pure 32-bit mode; compat 32-bit processes
329 * can have un-truncated segments, so wrapping around is allowed.
Roland McGrathd2eea682007-07-20 00:31:43 -0700330 */
Sam Ravnborg08b6d292008-01-30 13:33:25 +0100331static void __init fiddle_vdso(void)
Roland McGrathd2eea682007-07-20 00:31:43 -0700332{
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700333#ifdef CONFIG_X86_32
334 u32 *mask;
335 mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
336 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
337 mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
Roland McGrathd2eea682007-07-20 00:31:43 -0700338 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700339#endif
Roland McGrathd2eea682007-07-20 00:31:43 -0700340}
341
Daniel Kiperae15a3b2011-05-04 20:17:21 +0200342static int __cpuinit register_callback(unsigned type, const void *func)
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700343{
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700344 struct callback_register callback = {
345 .type = type,
346 .address = XEN_CALLBACK(__KERNEL_CS, func),
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700347 .flags = CALLBACKF_mask_events,
348 };
349
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700350 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
351}
352
353void __cpuinit xen_enable_sysenter(void)
354{
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700355 int ret;
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700356 unsigned sysenter_feature;
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700357
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700358#ifdef CONFIG_X86_32
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700359 sysenter_feature = X86_FEATURE_SEP;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700360#else
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700361 sysenter_feature = X86_FEATURE_SYSENTER32;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700362#endif
363
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700364 if (!boot_cpu_has(sysenter_feature))
365 return;
366
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700367 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700368 if(ret != 0)
369 setup_clear_cpu_cap(sysenter_feature);
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700370}
371
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700372void __cpuinit xen_enable_syscall(void)
373{
374#ifdef CONFIG_X86_64
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700375 int ret;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700376
377 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
378 if (ret != 0) {
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700379 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700380 /* Pretty fatal; 64-bit userspace has no other
381 mechanism for syscalls. */
382 }
383
384 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700385 ret = register_callback(CALLBACKTYPE_syscall32,
386 xen_syscall32_target);
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700387 if (ret != 0)
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700388 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700389 }
390#endif /* CONFIG_X86_64 */
391}
392
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700393void __init xen_arch_setup(void)
394{
Donald Dutilef09f6d12010-07-15 14:56:49 -0400395 xen_panic_handler_init();
396
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700397 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
398 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
399
400 if (!xen_feature(XENFEAT_auto_translated_physmap))
Tejf63c2f22008-12-16 11:56:06 -0800401 HYPERVISOR_vm_assist(VMASST_CMD_enable,
402 VMASST_TYPE_pae_extended_cr3);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700403
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700404 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
405 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
406 BUG();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700407
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700408 xen_enable_sysenter();
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700409 xen_enable_syscall();
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700410
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700411#ifdef CONFIG_ACPI
412 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
413 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
414 disable_acpi();
415 }
416#endif
417
418 memcpy(boot_command_line, xen_start_info->cmd_line,
419 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
420 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
421
Jeremy Fitzhardingebc15fde2010-11-22 17:17:50 -0800422 /* Set up idle, making sure it calls safe_halt() pvop */
423#ifdef CONFIG_X86_32
424 boot_cpu_data.hlt_works_ok = 1;
425#endif
Len Brownd91ee582011-04-01 18:28:35 -0400426 disable_cpuidle();
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -0400427 disable_cpufreq();
Konrad Rzeszutek Wilke5fd47b2011-11-21 18:02:02 -0500428 WARN_ON(set_pm_idle_to_default());
Roland McGrathd2eea682007-07-20 00:31:43 -0700429 fiddle_vdso();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700430}