blob: a5b80e1351dd737e70007e7fb26be5469403c76e [file] [log] [blame]
Paul Brook1ad21342009-05-19 16:17:58 +01001#ifndef CPU_COMMON_H
2#define CPU_COMMON_H 1
3
4/* CPU interfaces that are target indpendent. */
5
Aurelien Jarno477ba622010-03-29 02:12:51 +02006#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
Paul Brook1ad21342009-05-19 16:17:58 +01007#define WORDS_ALIGNED
8#endif
9
Paolo Bonzini37b76cf2010-04-01 19:57:10 +020010#ifdef TARGET_PHYS_ADDR_BITS
11#include "targphys.h"
12#endif
13
14#ifndef NEED_CPU_H
15#include "poison.h"
16#endif
17
Paul Brook1ad21342009-05-19 16:17:58 +010018#include "bswap.h"
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +020019#include "qemu-queue.h"
Paul Brook1ad21342009-05-19 16:17:58 +010020
Paul Brookb3755a92010-03-12 16:54:58 +000021#if !defined(CONFIG_USER_ONLY)
22
Alexander Grafdd310532010-12-08 12:05:36 +010023enum device_endian {
24 DEVICE_NATIVE_ENDIAN,
25 DEVICE_BIG_ENDIAN,
26 DEVICE_LITTLE_ENDIAN,
27};
28
Paul Brook1ad21342009-05-19 16:17:58 +010029/* address in the RAM (different from a physical address) */
Anthony Liguoric227f092009-10-01 16:12:16 -050030typedef unsigned long ram_addr_t;
Paul Brook1ad21342009-05-19 16:17:58 +010031
32/* memory API */
33
Anthony Liguoric227f092009-10-01 16:12:16 -050034typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
35typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
Paul Brook1ad21342009-05-19 16:17:58 +010036
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +030037void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
38 ram_addr_t size,
39 ram_addr_t phys_offset,
40 ram_addr_t region_offset,
41 bool log_dirty);
42
43static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
44 ram_addr_t size,
45 ram_addr_t phys_offset,
46 ram_addr_t region_offset)
47{
48 cpu_register_physical_memory_log(start_addr, size, phys_offset,
49 region_offset, false);
50}
51
Anthony Liguoric227f092009-10-01 16:12:16 -050052static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
53 ram_addr_t size,
54 ram_addr_t phys_offset)
Paul Brook1ad21342009-05-19 16:17:58 +010055{
56 cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
57}
58
Anthony Liguoric227f092009-10-01 16:12:16 -050059ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
Cam Macdonell84b89d72010-07-26 18:10:57 -060060ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
61 ram_addr_t size, void *host);
Alex Williamson1724f042010-06-25 11:09:35 -060062ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
Anthony Liguoric227f092009-10-01 16:12:16 -050063void qemu_ram_free(ram_addr_t addr);
Alex Williamson1f2e98b2011-05-03 12:48:09 -060064void qemu_ram_free_from_ptr(ram_addr_t addr);
Huang Yingcd19cfa2011-03-02 08:56:19 +010065void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
Paul Brook1ad21342009-05-19 16:17:58 +010066/* This should only be used for ram local to a device. */
Anthony Liguoric227f092009-10-01 16:12:16 -050067void *qemu_get_ram_ptr(ram_addr_t addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +010068void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size);
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +020069/* Same but slower, to use for migration, where the order of
70 * RAMBlocks must not change. */
71void *qemu_safe_ram_ptr(ram_addr_t addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +010072void qemu_put_ram_ptr(void *addr);
Paul Brook1ad21342009-05-19 16:17:58 +010073/* This should not be used by devices. */
Marcelo Tosattie8902612010-10-11 15:31:19 -030074int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
75ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
Paul Brook1ad21342009-05-19 16:17:58 +010076
Blue Swirld60efc62009-08-25 18:29:31 +000077int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
78 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +010079 void *opaque, enum device_endian endian);
Paul Brook1ad21342009-05-19 16:17:58 +010080void cpu_unregister_io_memory(int table_address);
81
Anthony Liguoric227f092009-10-01 16:12:16 -050082void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
Paul Brook1ad21342009-05-19 16:17:58 +010083 int len, int is_write);
Anthony Liguoric227f092009-10-01 16:12:16 -050084static inline void cpu_physical_memory_read(target_phys_addr_t addr,
Stefan Weil3bad9812011-04-10 17:28:56 +020085 void *buf, int len)
Paul Brook1ad21342009-05-19 16:17:58 +010086{
87 cpu_physical_memory_rw(addr, buf, len, 0);
88}
Anthony Liguoric227f092009-10-01 16:12:16 -050089static inline void cpu_physical_memory_write(target_phys_addr_t addr,
Stefan Weil3bad9812011-04-10 17:28:56 +020090 const void *buf, int len)
Paul Brook1ad21342009-05-19 16:17:58 +010091{
Stefan Weil3bad9812011-04-10 17:28:56 +020092 cpu_physical_memory_rw(addr, (void *)buf, len, 1);
Paul Brook1ad21342009-05-19 16:17:58 +010093}
Anthony Liguoric227f092009-10-01 16:12:16 -050094void *cpu_physical_memory_map(target_phys_addr_t addr,
95 target_phys_addr_t *plen,
Paul Brook1ad21342009-05-19 16:17:58 +010096 int is_write);
Anthony Liguoric227f092009-10-01 16:12:16 -050097void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
98 int is_write, target_phys_addr_t access_len);
Paul Brook1ad21342009-05-19 16:17:58 +010099void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
100void cpu_unregister_map_client(void *cookie);
101
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200102struct CPUPhysMemoryClient;
103typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
104struct CPUPhysMemoryClient {
105 void (*set_memory)(struct CPUPhysMemoryClient *client,
106 target_phys_addr_t start_addr,
107 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +0300108 ram_addr_t phys_offset,
109 bool log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200110 int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
111 target_phys_addr_t start_addr,
112 target_phys_addr_t end_addr);
113 int (*migration_log)(struct CPUPhysMemoryClient *client,
114 int enable);
Anthony PERARDe5896b12011-02-07 12:19:23 +0100115 int (*log_start)(struct CPUPhysMemoryClient *client,
116 target_phys_addr_t phys_addr, ram_addr_t size);
117 int (*log_stop)(struct CPUPhysMemoryClient *client,
118 target_phys_addr_t phys_addr, ram_addr_t size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200119 QLIST_ENTRY(CPUPhysMemoryClient) list;
120};
121
122void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
123void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
124
Blue Swirl6842a082010-03-21 19:47:13 +0000125/* Coalesced MMIO regions are areas where write operations can be reordered.
126 * This usually implies that write operations are side-effect free. This allows
127 * batching which can make a major impact on performance when using
128 * virtualization.
129 */
130void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
131
132void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
133
134void qemu_flush_coalesced_mmio_buffer(void);
135
Anthony Liguoric227f092009-10-01 16:12:16 -0500136uint32_t ldub_phys(target_phys_addr_t addr);
137uint32_t lduw_phys(target_phys_addr_t addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200138uint32_t lduw_le_phys(target_phys_addr_t addr);
139uint32_t lduw_be_phys(target_phys_addr_t addr);
Anthony Liguoric227f092009-10-01 16:12:16 -0500140uint32_t ldl_phys(target_phys_addr_t addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200141uint32_t ldl_le_phys(target_phys_addr_t addr);
142uint32_t ldl_be_phys(target_phys_addr_t addr);
Anthony Liguoric227f092009-10-01 16:12:16 -0500143uint64_t ldq_phys(target_phys_addr_t addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200144uint64_t ldq_le_phys(target_phys_addr_t addr);
145uint64_t ldq_be_phys(target_phys_addr_t addr);
Anthony Liguoric227f092009-10-01 16:12:16 -0500146void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
147void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
148void stb_phys(target_phys_addr_t addr, uint32_t val);
149void stw_phys(target_phys_addr_t addr, uint32_t val);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200150void stw_le_phys(target_phys_addr_t addr, uint32_t val);
151void stw_be_phys(target_phys_addr_t addr, uint32_t val);
Anthony Liguoric227f092009-10-01 16:12:16 -0500152void stl_phys(target_phys_addr_t addr, uint32_t val);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200153void stl_le_phys(target_phys_addr_t addr, uint32_t val);
154void stl_be_phys(target_phys_addr_t addr, uint32_t val);
Anthony Liguoric227f092009-10-01 16:12:16 -0500155void stq_phys(target_phys_addr_t addr, uint64_t val);
Alexander Graf1e78bcc2011-07-06 09:09:23 +0200156void stq_le_phys(target_phys_addr_t addr, uint64_t val);
157void stq_be_phys(target_phys_addr_t addr, uint64_t val);
Paul Brook1ad21342009-05-19 16:17:58 +0100158
Anthony Liguoric227f092009-10-01 16:12:16 -0500159void cpu_physical_memory_write_rom(target_phys_addr_t addr,
Paul Brook1ad21342009-05-19 16:17:58 +0100160 const uint8_t *buf, int len);
161
162#define IO_MEM_SHIFT 3
163
164#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
165#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
166#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
167#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
168
169/* Acts like a ROM when read and like a device when written. */
170#define IO_MEM_ROMD (1)
171#define IO_MEM_SUBPAGE (2)
Paul Brook1ad21342009-05-19 16:17:58 +0100172
Paul Brookb3755a92010-03-12 16:54:58 +0000173#endif
174
Paul Brook1ad21342009-05-19 16:17:58 +0100175#endif /* !CPU_COMMON_H */