aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/authz/base.h101
-rw-r--r--include/authz/list.h94
-rw-r--r--include/authz/listfile.h97
-rw-r--r--include/authz/pamacct.h88
-rw-r--r--include/authz/simple.h72
-rw-r--r--include/block/accounting.h9
-rw-r--r--include/block/aio-wait.h49
-rw-r--r--include/block/aio.h327
-rw-r--r--include/block/aio_task.h52
-rw-r--r--include/block/block-common.h565
-rw-r--r--include/block/block-copy.h101
-rw-r--r--include/block/block-global-state.h305
-rw-r--r--include/block/block-hmp-cmds.h56
-rw-r--r--include/block/block-io.h455
-rw-r--r--include/block/block.h731
-rw-r--r--include/block/block_backup.h2
-rw-r--r--include/block/block_int-common.h1321
-rw-r--r--include/block/block_int-global-state.h326
-rw-r--r--include/block/block_int-io.h194
-rw-r--r--include/block/block_int.h1153
-rw-r--r--include/block/blockjob.h111
-rw-r--r--include/block/blockjob_int.h73
-rw-r--r--include/block/dirty-bitmap.h92
-rw-r--r--include/block/export.h91
-rw-r--r--include/block/fuse.h30
-rw-r--r--include/block/graph-lock.h278
-rw-r--r--include/block/nbd.h337
-rw-r--r--include/block/nvme.h1385
-rw-r--r--include/block/qapi.h36
-rw-r--r--include/block/qdict.h5
-rw-r--r--include/block/raw-aio.h35
-rw-r--r--include/block/replication.h175
-rw-r--r--include/block/reqlist.h75
-rw-r--r--include/block/snapshot.h66
-rw-r--r--include/block/thread-pool.h20
-rw-r--r--include/block/throttle-groups.h18
-rw-r--r--include/block/ufs.h1090
-rw-r--r--include/block/write-threshold.h28
-rw-r--r--include/chardev/char-fd.h9
-rw-r--r--include/chardev/char-fe.h71
-rw-r--r--include/chardev/char-io.h2
-rw-r--r--include/chardev/char-mux.h62
-rw-r--r--include/chardev/char-socket.h87
-rw-r--r--include/chardev/char-win.h9
-rw-r--r--include/chardev/char.h78
-rw-r--r--include/chardev/spice.h26
-rw-r--r--include/crypto/aes-round.h164
-rw-r--r--include/crypto/aes.h37
-rw-r--r--include/crypto/afsplit.h11
-rw-r--r--include/crypto/akcipher.h179
-rw-r--r--include/crypto/block.h94
-rw-r--r--include/crypto/cipher.h8
-rw-r--r--include/crypto/clmul.h83
-rw-r--r--include/crypto/desrfb.h16
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/hmac.h2
-rw-r--r--include/crypto/init.h2
-rw-r--r--include/crypto/ivgen.h8
-rw-r--r--include/crypto/pbkdf.h2
-rw-r--r--include/crypto/random.h5
-rw-r--r--include/crypto/secret.h28
-rw-r--r--include/crypto/secret_common.h58
-rw-r--r--include/crypto/secret_keyring.h40
-rw-r--r--include/crypto/sm4.h15
-rw-r--r--include/crypto/tls-cipher-suites.h34
-rw-r--r--include/crypto/tlscreds.h40
-rw-r--r--include/crypto/tlscredsanon.h21
-rw-r--r--include/crypto/tlscredspsk.h21
-rw-r--r--include/crypto/tlscredsx509.h19
-rw-r--r--include/crypto/tlssession.h17
-rw-r--r--include/crypto/xts.h3
-rw-r--r--include/disas/capstone.h2
-rw-r--r--include/disas/dis-asm.h (renamed from include/disas/bfd.h)236
-rw-r--r--include/disas/disas.h25
-rw-r--r--include/elf.h1901
-rw-r--r--include/exec/address-spaces.h2
-rw-r--r--include/exec/confidential-guest-support.h94
-rw-r--r--include/exec/cpu-all.h325
-rw-r--r--include/exec/cpu-common.h225
-rw-r--r--include/exec/cpu-defs.h202
-rw-r--r--include/exec/cpu_ldst.h720
-rw-r--r--include/exec/cpu_ldst_template.h210
-rw-r--r--include/exec/cpu_ldst_useronly_template.h143
-rw-r--r--include/exec/cputlb.h6
-rw-r--r--include/exec/exec-all.h606
-rw-r--r--include/exec/gdbstub.h215
-rw-r--r--include/exec/gen-icount.h72
-rw-r--r--include/exec/helper-gen-common.h18
-rw-r--r--include/exec/helper-gen.h85
-rw-r--r--include/exec/helper-gen.h.inc102
-rw-r--r--include/exec/helper-head.h88
-rw-r--r--include/exec/helper-info.c.inc96
-rw-r--r--include/exec/helper-proto-common.h20
-rw-r--r--include/exec/helper-proto.h47
-rw-r--r--include/exec/helper-proto.h.inc68
-rw-r--r--include/exec/helper-tcg.h68
-rw-r--r--include/exec/hwaddr.h7
-rw-r--r--include/exec/ioport.h8
-rw-r--r--include/exec/log.h30
-rw-r--r--include/exec/memattrs.h18
-rw-r--r--include/exec/memop.h173
-rw-r--r--include/exec/memopidx.h55
-rw-r--r--include/exec/memory-internal.h74
-rw-r--r--include/exec/memory.h1459
-rw-r--r--include/exec/memory_ldst.h.inc (renamed from include/exec/memory_ldst.inc.h)52
-rw-r--r--include/exec/memory_ldst_cached.h.inc (renamed from include/exec/memory_ldst_cached.inc.h)45
-rw-r--r--include/exec/memory_ldst_phys.h.inc (renamed from include/exec/memory_ldst_phys.inc.h)74
-rw-r--r--include/exec/page-vary.h52
-rw-r--r--include/exec/plugin-gen.h57
-rw-r--r--include/exec/poison.h30
-rw-r--r--include/exec/ram_addr.h333
-rw-r--r--include/exec/ramblock.h94
-rw-r--r--include/exec/ramlist.h17
-rw-r--r--include/exec/replay-core.h80
-rw-r--r--include/exec/softmmu-semi.h99
-rw-r--r--include/exec/target_long.h44
-rw-r--r--include/exec/target_page.h2
-rw-r--r--include/exec/tb-flush.h28
-rw-r--r--include/exec/tb-hash.h67
-rw-r--r--include/exec/tb-lookup.h47
-rw-r--r--include/exec/tlb-common.h56
-rw-r--r--include/exec/translate-all.h33
-rw-r--r--include/exec/translation-block.h154
-rw-r--r--include/exec/translator.h147
-rw-r--r--include/exec/tswap.h72
-rw-r--r--include/exec/user/abitypes.h12
-rw-r--r--include/exec/user/guest-base.h12
-rw-r--r--include/exec/user/thunk.h26
-rw-r--r--include/exec/vaddr.h18
-rw-r--r--include/fpu/softfloat-helpers.h144
-rw-r--r--include/fpu/softfloat-macros.h360
-rw-r--r--include/fpu/softfloat-types.h84
-rw-r--r--include/fpu/softfloat.h738
-rw-r--r--include/gdbstub/helpers.h103
-rw-r--r--include/gdbstub/syscalls.h122
-rw-r--r--include/gdbstub/user.h67
-rw-r--r--include/glib-compat.h168
-rw-r--r--include/hw/acpi/acpi-defs.h599
-rw-r--r--include/hw/acpi/acpi.h15
-rw-r--r--include/hw/acpi/acpi_aml_interface.h52
-rw-r--r--include/hw/acpi/acpi_dev_interface.h19
-rw-r--r--include/hw/acpi/acpi_generic_initiator.h47
-rw-r--r--include/hw/acpi/aml-build.h119
-rw-r--r--include/hw/acpi/cpu.h14
-rw-r--r--include/hw/acpi/cxl.h30
-rw-r--r--include/hw/acpi/erst.h27
-rw-r--r--include/hw/acpi/generic_event_device.h119
-rw-r--r--include/hw/acpi/ghes.h84
-rw-r--r--include/hw/acpi/ich9.h22
-rw-r--r--include/hw/acpi/ich9_tco.h (renamed from include/hw/acpi/tco.h)6
-rw-r--r--include/hw/acpi/ipmi.h9
-rw-r--r--include/hw/acpi/memory_hotplug.h10
-rw-r--r--include/hw/acpi/pc-hotplug.h2
-rw-r--r--include/hw/acpi/pci.h43
-rw-r--r--include/hw/acpi/pcihp.h17
-rw-r--r--include/hw/acpi/piix4.h69
-rw-r--r--include/hw/acpi/tpm.h66
-rw-r--r--include/hw/acpi/utils.h8
-rw-r--r--include/hw/acpi/vmgenid.h19
-rw-r--r--include/hw/adc/aspeed_adc.h56
-rw-r--r--include/hw/adc/max111x.h56
-rw-r--r--include/hw/adc/npcm7xx_adc.h68
-rw-r--r--include/hw/adc/stm32f2xx_adc.h10
-rw-r--r--include/hw/adc/zynq-xadc.h (renamed from include/hw/misc/zynq-xadc.h)13
-rw-r--r--include/hw/arm/allwinner-a10.h66
-rw-r--r--include/hw/arm/allwinner-h3.h172
-rw-r--r--include/hw/arm/allwinner-r40.h157
-rw-r--r--include/hw/arm/armsse-version.h42
-rw-r--r--include/hw/arm/armsse.h241
-rw-r--r--include/hw/arm/armv7m.h59
-rw-r--r--include/hw/arm/aspeed.h43
-rw-r--r--include/hw/arm/aspeed_soc.h231
-rw-r--r--include/hw/arm/bcm2835_peripherals.h68
-rw-r--r--include/hw/arm/bcm2836.h49
-rw-r--r--include/hw/arm/bcm2838.h31
-rw-r--r--include/hw/arm/bcm2838_peripherals.h84
-rw-r--r--include/hw/arm/boot.h (renamed from include/hw/arm/arm.h)91
-rw-r--r--include/hw/arm/bsa.h35
-rw-r--r--include/hw/arm/digic.h7
-rw-r--r--include/hw/arm/exynos4210.h73
-rw-r--r--include/hw/arm/fsl-imx25.h42
-rw-r--r--include/hw/arm/fsl-imx31.h15
-rw-r--r--include/hw/arm/fsl-imx6.h56
-rw-r--r--include/hw/arm/fsl-imx6ul.h168
-rw-r--r--include/hw/arm/fsl-imx7.h372
-rw-r--r--include/hw/arm/iotkit.h136
-rw-r--r--include/hw/arm/linux-boot-if.h11
-rw-r--r--include/hw/arm/msf2-soc.h20
-rw-r--r--include/hw/arm/npcm7xx.h139
-rw-r--r--include/hw/arm/nrf51.h3
-rw-r--r--include/hw/arm/nrf51_soc.h14
-rw-r--r--include/hw/arm/omap.h79
-rw-r--r--include/hw/arm/pxa.h29
-rw-r--r--include/hw/arm/raspberrypi-fw-defs.h173
-rw-r--r--include/hw/arm/raspi_platform.h127
-rw-r--r--include/hw/arm/sharpsl.h4
-rw-r--r--include/hw/arm/smmu-common.h92
-rw-r--r--include/hw/arm/smmuv3.h24
-rw-r--r--include/hw/arm/soc_dma.h3
-rw-r--r--include/hw/arm/stm32f100_soc.h (renamed from include/qemu/acl.h)63
-rw-r--r--include/hw/arm/stm32f205_soc.h22
-rw-r--r--include/hw/arm/stm32f405_soc.h75
-rw-r--r--include/hw/arm/stm32l4x5_soc.h74
-rw-r--r--include/hw/arm/virt.h136
-rw-r--r--include/hw/arm/xen_arch_hvm.h9
-rw-r--r--include/hw/arm/xlnx-versal.h231
-rw-r--r--include/hw/arm/xlnx-zynqmp.h62
-rw-r--r--include/hw/audio/asc.h85
-rw-r--r--include/hw/audio/pcspk.h17
-rw-r--r--include/hw/audio/soundhw.h14
-rw-r--r--include/hw/audio/virtio-snd.h250
-rw-r--r--include/hw/audio/wm8750.h1
-rw-r--r--include/hw/block/block.h59
-rw-r--r--include/hw/block/fdc.h13
-rw-r--r--include/hw/block/flash.h86
-rw-r--r--include/hw/block/swim.h72
-rw-r--r--include/hw/boards.h329
-rw-r--r--include/hw/bt.h2178
-rw-r--r--include/hw/char/avr_usart.h92
-rw-r--r--include/hw/char/bcm2835_aux.h10
-rw-r--r--include/hw/char/cadence_uart.h30
-rw-r--r--include/hw/char/cmsdk-apb-uart.h40
-rw-r--r--include/hw/char/digic-uart.h8
-rw-r--r--include/hw/char/escc.h10
-rw-r--r--include/hw/char/goldfish_tty.h36
-rw-r--r--include/hw/char/grlib_uart.h32
-rw-r--r--include/hw/char/ibex_uart.h73
-rw-r--r--include/hw/char/imx_serial.h28
-rw-r--r--include/hw/char/lm32_juart.h13
-rw-r--r--include/hw/char/mchp_pfsoc_mmuart.h68
-rw-r--r--include/hw/char/nrf51_uart.h7
-rw-r--r--include/hw/char/parallel-isa.h35
-rw-r--r--include/hw/char/parallel.h18
-rw-r--r--include/hw/char/pl011.h74
-rw-r--r--include/hw/char/renesas_sci.h54
-rw-r--r--include/hw/char/riscv_htif.h (renamed from include/hw/riscv/riscv_htif.h)19
-rw-r--r--include/hw/char/serial.h42
-rw-r--r--include/hw/char/shakti_uart.h74
-rw-r--r--include/hw/char/sifive_uart.h (renamed from include/hw/riscv/sifive_uart.h)18
-rw-r--r--include/hw/char/stm32f2xx_usart.h19
-rw-r--r--include/hw/char/stm32l4x5_usart.h67
-rw-r--r--include/hw/char/xilinx_uartlite.h18
-rw-r--r--include/hw/clock.h381
-rw-r--r--include/hw/core/accel-cpu.h38
-rw-r--r--include/hw/core/cpu.h (renamed from include/qom/cpu.h)806
-rw-r--r--include/hw/core/generic-loader.h9
-rw-r--r--include/hw/core/resetcontainer.h48
-rw-r--r--include/hw/core/split-irq.h4
-rw-r--r--include/hw/core/sysbus-fdt.h (renamed from include/hw/arm/sysbus-fdt.h)0
-rw-r--r--include/hw/core/sysemu-cpu-ops.h92
-rw-r--r--include/hw/core/tcg-cpu-ops.h224
-rw-r--r--include/hw/cpu/a15mpcore.h8
-rw-r--r--include/hw/cpu/a9mpcore.h8
-rw-r--r--include/hw/cpu/arm11mpcore.h8
-rw-r--r--include/hw/cpu/cluster.h35
-rw-r--r--include/hw/cpu/core.h10
-rw-r--r--include/hw/cris/etraxfs.h27
-rw-r--r--include/hw/cris/etraxfs_dma.h2
-rw-r--r--include/hw/cxl/cxl.h70
-rw-r--r--include/hw/cxl/cxl_cdat.h172
-rw-r--r--include/hw/cxl/cxl_component.h280
-rw-r--r--include/hw/cxl/cxl_device.h506
-rw-r--r--include/hw/cxl/cxl_events.h169
-rw-r--r--include/hw/cxl/cxl_host.h22
-rw-r--r--include/hw/cxl/cxl_pci.h194
-rw-r--r--include/hw/devices.h62
-rw-r--r--include/hw/display/bcm2835_fb.h12
-rw-r--r--include/hw/display/blizzard.h21
-rw-r--r--include/hw/display/dpcd.h4
-rw-r--r--include/hw/display/edid.h17
-rw-r--r--include/hw/display/i2c-ddc.h (renamed from include/hw/i2c/i2c-ddc.h)5
-rw-r--r--include/hw/display/macfb.h101
-rw-r--r--include/hw/display/ramfb.h4
-rw-r--r--include/hw/display/tc6393xb.h21
-rw-r--r--include/hw/display/vga.h10
-rw-r--r--include/hw/display/xlnx_dp.h30
-rw-r--r--include/hw/dma/bcm2835_dma.h13
-rw-r--r--include/hw/dma/i8257.h9
-rw-r--r--include/hw/dma/pl080.h14
-rw-r--r--include/hw/dma/sifive_pdma.h59
-rw-r--r--include/hw/dma/xlnx-zdma.h10
-rw-r--r--include/hw/dma/xlnx-zynq-devcfg.h10
-rw-r--r--include/hw/dma/xlnx_csu_dma.h72
-rw-r--r--include/hw/dma/xlnx_dpdma.h4
-rw-r--r--include/hw/elf_ops.h310
-rw-r--r--include/hw/empty_slot.h7
-rw-r--r--include/hw/firmware/smbios.h96
-rw-r--r--include/hw/fsi/aspeed_apb2opb.h46
-rw-r--r--include/hw/fsi/cfam.h34
-rw-r--r--include/hw/fsi/fsi-master.h32
-rw-r--r--include/hw/fsi/fsi.h37
-rw-r--r--include/hw/fsi/lbus.h43
-rw-r--r--include/hw/fw-path-provider.h12
-rw-r--r--include/hw/gpio/aspeed_gpio.h110
-rw-r--r--include/hw/gpio/bcm2835_gpio.h9
-rw-r--r--include/hw/gpio/bcm2838_gpio.h45
-rw-r--r--include/hw/gpio/imx_gpio.h7
-rw-r--r--include/hw/gpio/npcm7xx_gpio.h55
-rw-r--r--include/hw/gpio/nrf51_gpio.h10
-rw-r--r--include/hw/gpio/pca9552.h38
-rw-r--r--include/hw/gpio/pca9552_regs.h (renamed from include/hw/misc/pca9552_regs.h)0
-rw-r--r--include/hw/gpio/pca9554.h36
-rw-r--r--include/hw/gpio/pca9554_regs.h19
-rw-r--r--include/hw/gpio/pcf8574.h15
-rw-r--r--include/hw/gpio/sifive_gpio.h79
-rw-r--r--include/hw/gpio/stm32l4x5_gpio.h71
-rw-r--r--include/hw/hotplug.h13
-rw-r--r--include/hw/hw.h12
-rw-r--r--include/hw/hyperv/dynmem-proto.h430
-rw-r--r--include/hw/hyperv/hv-balloon.h18
-rw-r--r--include/hw/hyperv/hyperv-proto.h52
-rw-r--r--include/hw/hyperv/hyperv.h63
-rw-r--r--include/hw/hyperv/vmbus-bridge.h34
-rw-r--r--include/hw/hyperv/vmbus-proto.h222
-rw-r--r--include/hw/hyperv/vmbus.h226
-rw-r--r--include/hw/i2c/allwinner-i2c.h61
-rw-r--r--include/hw/i2c/arm_sbcon_i2c.h36
-rw-r--r--include/hw/i2c/aspeed_i2c.h355
-rw-r--r--include/hw/i2c/bcm2835_i2c.h80
-rw-r--r--include/hw/i2c/bitbang_i2c.h52
-rw-r--r--include/hw/i2c/i2c.h180
-rw-r--r--include/hw/i2c/i2c_mux_pca954x.h19
-rw-r--r--include/hw/i2c/imx_i2c.h7
-rw-r--r--include/hw/i2c/microbit_i2c.h42
-rw-r--r--include/hw/i2c/npcm7xx_smbus.h112
-rw-r--r--include/hw/i2c/pm_smbus.h18
-rw-r--r--include/hw/i2c/pmbus_device.h564
-rw-r--r--include/hw/i2c/pnv_i2c_regs.h143
-rw-r--r--include/hw/i2c/ppc4xx_i2c.h12
-rw-r--r--include/hw/i2c/smbus_eeprom.h36
-rw-r--r--include/hw/i2c/smbus_master.h (renamed from include/hw/i2c/smbus.h)49
-rw-r--r--include/hw/i2c/smbus_slave.h95
-rw-r--r--include/hw/i386/apic.h12
-rw-r--r--include/hw/i386/apic_internal.h33
-rw-r--r--include/hw/i386/hostmem-epc.h28
-rw-r--r--include/hw/i386/intel_iommu.h160
-rw-r--r--include/hw/i386/ioapic_internal.h121
-rw-r--r--include/hw/i386/microvm.h111
-rw-r--r--include/hw/i386/pc.h301
-rw-r--r--include/hw/i386/sgx-epc.h71
-rw-r--r--include/hw/i386/topology.h131
-rw-r--r--include/hw/i386/vmport.h28
-rw-r--r--include/hw/i386/x86-iommu.h81
-rw-r--r--include/hw/i386/x86.h148
-rw-r--r--include/hw/i386/xen_arch_hvm.h11
-rw-r--r--include/hw/ide.h33
-rw-r--r--include/hw/ide/ahci-pci.h22
-rw-r--r--include/hw/ide/ahci-sysbus.h35
-rw-r--r--include/hw/ide/ahci.h37
-rw-r--r--include/hw/ide/ide-bus.h42
-rw-r--r--include/hw/ide/ide-dev.h186
-rw-r--r--include/hw/ide/ide-dma.h37
-rw-r--r--include/hw/ide/internal.h649
-rw-r--r--include/hw/ide/isa.h20
-rw-r--r--include/hw/ide/mmio.h26
-rw-r--r--include/hw/ide/pci.h35
-rw-r--r--include/hw/ide/piix.h7
-rw-r--r--include/hw/input/adb.h42
-rw-r--r--include/hw/input/hid.h1
-rw-r--r--include/hw/input/i8042.h96
-rw-r--r--include/hw/input/lasips2.h80
-rw-r--r--include/hw/input/lm832x.h28
-rw-r--r--include/hw/input/pl050.h58
-rw-r--r--include/hw/input/ps2.h78
-rw-r--r--include/hw/input/stellaris_gamepad.h37
-rw-r--r--include/hw/input/tsc2xxx.h41
-rw-r--r--include/hw/intc/allwinner-a10-pic.h9
-rw-r--r--include/hw/intc/arm_gic.h19
-rw-r--r--include/hw/intc/arm_gic_common.h24
-rw-r--r--include/hw/intc/arm_gicv3.h14
-rw-r--r--include/hw/intc/arm_gicv3_common.h89
-rw-r--r--include/hw/intc/arm_gicv3_its_common.h65
-rw-r--r--include/hw/intc/armv7m_nvic.h140
-rw-r--r--include/hw/intc/aspeed_vic.h7
-rw-r--r--include/hw/intc/bcm2835_ic.h11
-rw-r--r--include/hw/intc/bcm2836_control.h20
-rw-r--r--include/hw/intc/exynos4210_combiner.h57
-rw-r--r--include/hw/intc/exynos4210_gic.h43
-rw-r--r--include/hw/intc/goldfish_pic.h35
-rw-r--r--include/hw/intc/grlib_irqmp.h41
-rw-r--r--include/hw/intc/heathrow_pic.h15
-rw-r--r--include/hw/intc/i8259.h20
-rw-r--r--include/hw/intc/imx_avic.h7
-rw-r--r--include/hw/intc/imx_gpcv2.h7
-rw-r--r--include/hw/intc/intc.h11
-rw-r--r--include/hw/intc/ioapic.h (renamed from include/hw/i386/ioapic.h)10
-rw-r--r--include/hw/intc/kvm_irqcount.h10
-rw-r--r--include/hw/intc/loongarch_extioi.h68
-rw-r--r--include/hw/intc/loongarch_ipi.h54
-rw-r--r--include/hw/intc/loongarch_pch_msi.h25
-rw-r--r--include/hw/intc/loongarch_pch_pic.h69
-rw-r--r--include/hw/intc/loongson_liointc.h22
-rw-r--r--include/hw/intc/m68k_irqc.h42
-rw-r--r--include/hw/intc/mips_gic.h9
-rw-r--r--include/hw/intc/ppc-uic.h78
-rw-r--r--include/hw/intc/realview_gic.h8
-rw-r--r--include/hw/intc/riscv_aclint.h83
-rw-r--r--include/hw/intc/riscv_aplic.h79
-rw-r--r--include/hw/intc/riscv_imsic.h68
-rw-r--r--include/hw/intc/rx_icu.h76
-rw-r--r--include/hw/intc/sifive_plic.h (renamed from include/hw/riscv/sifive_plic.h)32
-rw-r--r--include/hw/intc/xlnx-pmu-iomod-intc.h14
-rw-r--r--include/hw/intc/xlnx-zynqmp-ipi.h8
-rw-r--r--include/hw/ipack/ipack.h24
-rw-r--r--include/hw/ipmi/ipmi.h81
-rw-r--r--include/hw/ipmi/ipmi_bt.h73
-rw-r--r--include/hw/ipmi/ipmi_kcs.h76
-rw-r--r--include/hw/irq.h32
-rw-r--r--include/hw/isa/apm.h2
-rw-r--r--include/hw/isa/i8259_internal.h23
-rw-r--r--include/hw/isa/isa.h92
-rw-r--r--include/hw/isa/pc87312.h9
-rw-r--r--include/hw/isa/superio.h24
-rw-r--r--include/hw/isa/vt82c686.h42
-rw-r--r--include/hw/kvm/clock.h24
-rw-r--r--include/hw/lm32/lm32_pic.h11
-rw-r--r--include/hw/loader-fit.h2
-rw-r--r--include/hw/loader.h188
-rw-r--r--include/hw/loongarch/virt.h64
-rw-r--r--include/hw/m68k/mcf.h8
-rw-r--r--include/hw/m68k/mcf_fec.h8
-rw-r--r--include/hw/m68k/next-cube.h56
-rw-r--r--include/hw/m68k/q800-glue.h51
-rw-r--r--include/hw/m68k/q800.h78
-rw-r--r--include/hw/mem/memory-device.h87
-rw-r--r--include/hw/mem/npcm7xx_mc.h36
-rw-r--r--include/hw/mem/nvdimm.h48
-rw-r--r--include/hw/mem/pc-dimm.h29
-rw-r--r--include/hw/mem/sparse-mem.h19
-rw-r--r--include/hw/mips/bios.h9
-rw-r--r--include/hw/mips/bootloader.h26
-rw-r--r--include/hw/mips/cps.h12
-rw-r--r--include/hw/mips/cpudevs.h21
-rw-r--r--include/hw/mips/mips.h8
-rw-r--r--include/hw/misc/a9scu.h7
-rw-r--r--include/hw/misc/allwinner-a10-ccm.h67
-rw-r--r--include/hw/misc/allwinner-a10-dramc.h68
-rw-r--r--include/hw/misc/allwinner-cpucfg.h51
-rw-r--r--include/hw/misc/allwinner-h3-ccu.h65
-rw-r--r--include/hw/misc/allwinner-h3-dramc.h105
-rw-r--r--include/hw/misc/allwinner-h3-sysctrl.h66
-rw-r--r--include/hw/misc/allwinner-r40-ccu.h65
-rw-r--r--include/hw/misc/allwinner-r40-dramc.h108
-rw-r--r--include/hw/misc/allwinner-sid.h59
-rw-r--r--include/hw/misc/allwinner-sramc.h69
-rw-r--r--include/hw/misc/arm11scu.h7
-rw-r--r--include/hw/misc/arm_integrator_debug.h2
-rw-r--r--include/hw/misc/armsse-cpu-pwrctrl.h40
-rw-r--r--include/hw/misc/armsse-cpuid.h42
-rw-r--r--include/hw/misc/armsse-mhu.h45
-rw-r--r--include/hw/misc/armv7m_ras.h37
-rw-r--r--include/hw/misc/aspeed_hace.h50
-rw-r--r--include/hw/misc/aspeed_i3c.h48
-rw-r--r--include/hw/misc/aspeed_lpc.h45
-rw-r--r--include/hw/misc/aspeed_peci.h29
-rw-r--r--include/hw/misc/aspeed_sbc.h45
-rw-r--r--include/hw/misc/aspeed_scu.h82
-rw-r--r--include/hw/misc/aspeed_sdmc.h35
-rw-r--r--include/hw/misc/aspeed_xdma.h46
-rw-r--r--include/hw/misc/auxbus.h34
-rw-r--r--include/hw/misc/avr_power.h46
-rw-r--r--include/hw/misc/bcm2835_cprman.h210
-rw-r--r--include/hw/misc/bcm2835_cprman_internals.h1019
-rw-r--r--include/hw/misc/bcm2835_mbox.h12
-rw-r--r--include/hw/misc/bcm2835_mbox_defs.h4
-rw-r--r--include/hw/misc/bcm2835_mphi.h44
-rw-r--r--include/hw/misc/bcm2835_powermgt.h29
-rw-r--r--include/hw/misc/bcm2835_property.h13
-rw-r--r--include/hw/misc/bcm2835_rng.h8
-rw-r--r--include/hw/misc/bcm2835_thermal.h27
-rw-r--r--include/hw/misc/cbus.h31
-rw-r--r--include/hw/misc/djmemc.h30
-rw-r--r--include/hw/misc/empty_slot.h19
-rw-r--r--include/hw/misc/grlib_ahb_apb_pnp.h57
-rw-r--r--include/hw/misc/imx25_ccm.h7
-rw-r--r--include/hw/misc/imx2_wdt.h33
-rw-r--r--include/hw/misc/imx31_ccm.h7
-rw-r--r--include/hw/misc/imx6_ccm.h7
-rw-r--r--include/hw/misc/imx6_src.h7
-rw-r--r--include/hw/misc/imx6ul_ccm.h7
-rw-r--r--include/hw/misc/imx7_ccm.h13
-rw-r--r--include/hw/misc/imx7_gpr.h7
-rw-r--r--include/hw/misc/imx7_snvs.h14
-rw-r--r--include/hw/misc/imx7_src.h66
-rw-r--r--include/hw/misc/imx_ccm.h16
-rw-r--r--include/hw/misc/imx_rngc.h36
-rw-r--r--include/hw/misc/iosb.h25
-rw-r--r--include/hw/misc/iotkit-secctl.h14
-rw-r--r--include/hw/misc/iotkit-sysctl.h36
-rw-r--r--include/hw/misc/iotkit-sysinfo.h18
-rw-r--r--include/hw/misc/lasi.h79
-rw-r--r--include/hw/misc/led.h98
-rw-r--r--include/hw/misc/mac_via.h115
-rw-r--r--include/hw/misc/macio/cuda.h21
-rw-r--r--include/hw/misc/macio/gpio.h12
-rw-r--r--include/hw/misc/macio/macio.h61
-rw-r--r--include/hw/misc/macio/pmu.h26
-rw-r--r--include/hw/misc/mchp_pfsoc_dmc.h58
-rw-r--r--include/hw/misc/mchp_pfsoc_ioscb.h56
-rw-r--r--include/hw/misc/mchp_pfsoc_sysreg.h42
-rw-r--r--include/hw/misc/mips_cmgcr.h8
-rw-r--r--include/hw/misc/mips_cpc.h11
-rw-r--r--include/hw/misc/mips_itu.h16
-rw-r--r--include/hw/misc/mos6522.h68
-rw-r--r--include/hw/misc/mps2-fpgaio.h17
-rw-r--r--include/hw/misc/mps2-scc.h41
-rw-r--r--include/hw/misc/msf2-sysreg.h7
-rw-r--r--include/hw/misc/npcm7xx_clk.h180
-rw-r--r--include/hw/misc/npcm7xx_gcr.h73
-rw-r--r--include/hw/misc/npcm7xx_mft.h69
-rw-r--r--include/hw/misc/npcm7xx_pwm.h106
-rw-r--r--include/hw/misc/npcm7xx_rng.h34
-rw-r--r--include/hw/misc/nrf51_rng.h10
-rw-r--r--include/hw/misc/pca9552.h32
-rw-r--r--include/hw/misc/pvpanic.h25
-rw-r--r--include/hw/misc/sifive_e_aon.h60
-rw-r--r--include/hw/misc/sifive_e_prci.h74
-rw-r--r--include/hw/misc/sifive_test.h (renamed from include/hw/riscv/sifive_test.h)15
-rw-r--r--include/hw/misc/sifive_u_otp.h88
-rw-r--r--include/hw/misc/sifive_u_prci.h94
-rw-r--r--include/hw/misc/stm32f2xx_syscfg.h11
-rw-r--r--include/hw/misc/stm32f4xx_exti.h59
-rw-r--r--include/hw/misc/stm32f4xx_syscfg.h60
-rw-r--r--include/hw/misc/stm32l4x5_exti.h51
-rw-r--r--include/hw/misc/stm32l4x5_rcc.h239
-rw-r--r--include/hw/misc/stm32l4x5_rcc_internals.h1042
-rw-r--r--include/hw/misc/stm32l4x5_syscfg.h53
-rw-r--r--include/hw/misc/tz-mpc.h4
-rw-r--r--include/hw/misc/tz-msc.h7
-rw-r--r--include/hw/misc/tz-ppc.h12
-rw-r--r--include/hw/misc/unimp.h15
-rw-r--r--include/hw/misc/virt_ctrl.h24
-rw-r--r--include/hw/misc/vmcoreinfo.h13
-rw-r--r--include/hw/misc/xlnx-cfi-if.h59
-rw-r--r--include/hw/misc/xlnx-versal-cframe-reg.h303
-rw-r--r--include/hw/misc/xlnx-versal-cfu.h258
-rw-r--r--include/hw/misc/xlnx-versal-crl.h235
-rw-r--r--include/hw/misc/xlnx-versal-pmc-iou-slcr.h79
-rw-r--r--include/hw/misc/xlnx-versal-trng.h58
-rw-r--r--include/hw/misc/xlnx-versal-xramc.h97
-rw-r--r--include/hw/misc/xlnx-zynqmp-apu-ctrl.h93
-rw-r--r--include/hw/misc/xlnx-zynqmp-crf.h211
-rw-r--r--include/hw/net/allwinner-sun8i-emac.h104
-rw-r--r--include/hw/net/allwinner_emac.h8
-rw-r--r--include/hw/net/cadence_gem.h17
-rw-r--r--include/hw/net/dp8393x.h60
-rw-r--r--include/hw/net/ftgmac100.h24
-rw-r--r--include/hw/net/imx_fec.h10
-rw-r--r--include/hw/net/lan9118.h20
-rw-r--r--include/hw/net/lance.h11
-rw-r--r--include/hw/net/lasi_82596.h31
-rw-r--r--include/hw/net/mii.h16
-rw-r--r--include/hw/net/msf2-emac.h53
-rw-r--r--include/hw/net/mv88w8618_eth.h13
-rw-r--r--include/hw/net/ne2000-isa.h16
-rw-r--r--include/hw/net/npcm7xx_emc.h283
-rw-r--r--include/hw/net/npcm_gmac.h343
-rw-r--r--include/hw/net/smc91c111.h18
-rw-r--r--include/hw/net/xlnx-versal-canfd.h87
-rw-r--r--include/hw/net/xlnx-zynqmp-can.h79
-rw-r--r--include/hw/nmi.h14
-rw-r--r--include/hw/nubus/mac-nubus-bridge.h29
-rw-r--r--include/hw/nubus/nubus-virtio-mmio.h36
-rw-r--r--include/hw/nubus/nubus.h75
-rw-r--r--include/hw/nvram/chrp_nvram.h5
-rw-r--r--include/hw/nvram/eeprom_at24c.h39
-rw-r--r--include/hw/nvram/fw_cfg.h143
-rw-r--r--include/hw/nvram/fw_cfg_acpi.h14
-rw-r--r--include/hw/nvram/mac_nvram.h52
-rw-r--r--include/hw/nvram/npcm7xx_otp.h79
-rw-r--r--include/hw/nvram/nrf51_nvm.h65
-rw-r--r--include/hw/nvram/xlnx-bbram.h54
-rw-r--r--include/hw/nvram/xlnx-efuse.h132
-rw-r--r--include/hw/nvram/xlnx-versal-efuse.h68
-rw-r--r--include/hw/nvram/xlnx-zynqmp-efuse.h44
-rw-r--r--include/hw/openrisc/boot.h34
-rw-r--r--include/hw/or-irq.h7
-rw-r--r--include/hw/pci-bridge/cxl_upstream_port.h19
-rw-r--r--include/hw/pci-bridge/pci_expander_bridge.h12
-rw-r--r--include/hw/pci-bridge/simba.h13
-rw-r--r--include/hw/pci-bridge/xio3130_downstream.h15
-rw-r--r--include/hw/pci-host/articia.h17
-rw-r--r--include/hw/pci-host/astro.h94
-rw-r--r--include/hw/pci-host/bonito.h18
-rw-r--r--include/hw/pci-host/designware.h20
-rw-r--r--include/hw/pci-host/dino.h146
-rw-r--r--include/hw/pci-host/gpex.h50
-rw-r--r--include/hw/pci-host/grackle.h44
-rw-r--r--include/hw/pci-host/i440fx.h37
-rw-r--r--include/hw/pci-host/ls7a.h50
-rw-r--r--include/hw/pci-host/mv64361.h8
-rw-r--r--include/hw/pci-host/pam.h8
-rw-r--r--include/hw/pci-host/pnv_phb3.h170
-rw-r--r--include/hw/pci-host/pnv_phb3_regs.h434
-rw-r--r--include/hw/pci-host/pnv_phb4.h226
-rw-r--r--include/hw/pci-host/pnv_phb4_regs.h558
-rw-r--r--include/hw/pci-host/ppc4xx.h17
-rw-r--r--include/hw/pci-host/q35.h49
-rw-r--r--include/hw/pci-host/remote.h30
-rw-r--r--include/hw/pci-host/sabre.h21
-rw-r--r--include/hw/pci-host/spapr.h80
-rw-r--r--include/hw/pci-host/uninorth.h34
-rw-r--r--include/hw/pci-host/xilinx-pcie.h19
-rw-r--r--include/hw/pci/msi.h6
-rw-r--r--include/hw/pci/msix.h6
-rw-r--r--include/hw/pci/pci.h453
-rw-r--r--include/hw/pci/pci_bridge.h63
-rw-r--r--include/hw/pci/pci_bus.h34
-rw-r--r--include/hw/pci/pci_device.h350
-rw-r--r--include/hw/pci/pci_host.h20
-rw-r--r--include/hw/pci/pci_ids.h25
-rw-r--r--include/hw/pci/pci_regs.h6
-rw-r--r--include/hw/pci/pcie.h33
-rw-r--r--include/hw/pci/pcie_aer.h5
-rw-r--r--include/hw/pci/pcie_doe.h122
-rw-r--r--include/hw/pci/pcie_host.h10
-rw-r--r--include/hw/pci/pcie_port.h32
-rw-r--r--include/hw/pci/pcie_regs.h27
-rw-r--r--include/hw/pci/pcie_sriov.h82
-rw-r--r--include/hw/pci/shpc.h6
-rw-r--r--include/hw/pci/slotid_cap.h1
-rw-r--r--include/hw/pcmcia.h46
-rw-r--r--include/hw/platform-bus.h11
-rw-r--r--include/hw/ppc/fdt.h8
-rw-r--r--include/hw/ppc/mac_dbdma.h8
-rw-r--r--include/hw/ppc/openpic.h40
-rw-r--r--include/hw/ppc/pnv.h275
-rw-r--r--include/hw/ppc/pnv_chip.h162
-rw-r--r--include/hw/ppc/pnv_chiptod.h53
-rw-r--r--include/hw/ppc/pnv_core.h69
-rw-r--r--include/hw/ppc/pnv_homer.h59
-rw-r--r--include/hw/ppc/pnv_i2c.h38
-rw-r--r--include/hw/ppc/pnv_lpc.h48
-rw-r--r--include/hw/ppc/pnv_n1_chiplet.h32
-rw-r--r--include/hw/ppc/pnv_nest_pervasive.h32
-rw-r--r--include/hw/ppc/pnv_occ.h45
-rw-r--r--include/hw/ppc/pnv_pnor.h33
-rw-r--r--include/hw/ppc/pnv_psi.h75
-rw-r--r--include/hw/ppc/pnv_sbe.h56
-rw-r--r--include/hw/ppc/pnv_xive.h168
-rw-r--r--include/hw/ppc/pnv_xscom.h181
-rw-r--r--include/hw/ppc/ppc.h31
-rw-r--r--include/hw/ppc/ppc4xx.h144
-rw-r--r--include/hw/ppc/spapr.h453
-rw-r--r--include/hw/ppc/spapr_cpu_core.h42
-rw-r--r--include/hw/ppc/spapr_drc.h135
-rw-r--r--include/hw/ppc/spapr_irq.h147
-rw-r--r--include/hw/ppc/spapr_nested.h524
-rw-r--r--include/hw/ppc/spapr_numa.h37
-rw-r--r--include/hw/ppc/spapr_nvdimm.h26
-rw-r--r--include/hw/ppc/spapr_ovec.h40
-rw-r--r--include/hw/ppc/spapr_rtas.h10
-rw-r--r--include/hw/ppc/spapr_tpm_proxy.h30
-rw-r--r--include/hw/ppc/spapr_vio.h118
-rw-r--r--include/hw/ppc/spapr_xive.h71
-rw-r--r--include/hw/ppc/vof.h65
-rw-r--r--include/hw/ppc/xics.h97
-rw-r--r--include/hw/ppc/xics_spapr.h43
-rw-r--r--include/hw/ppc/xive.h269
-rw-r--r--include/hw/ppc/xive2.h111
-rw-r--r--include/hw/ppc/xive2_regs.h212
-rw-r--r--include/hw/ppc/xive_regs.h90
-rw-r--r--include/hw/ptimer.h130
-rw-r--r--include/hw/qdev-clock.h164
-rw-r--r--include/hw/qdev-core.h966
-rw-r--r--include/hw/qdev-dma.h6
-rw-r--r--include/hw/qdev-properties-system.h97
-rw-r--r--include/hw/qdev-properties.h310
-rw-r--r--include/hw/qdev.h8
-rw-r--r--include/hw/register.h23
-rw-r--r--include/hw/registerfields.h176
-rw-r--r--include/hw/remote/iohub.h42
-rw-r--r--include/hw/remote/iommu.h40
-rw-r--r--include/hw/remote/machine.h42
-rw-r--r--include/hw/remote/memory.h19
-rw-r--r--include/hw/remote/mpqemu-link.h99
-rw-r--r--include/hw/remote/proxy-memory-listener.h28
-rw-r--r--include/hw/remote/proxy.h48
-rw-r--r--include/hw/remote/vfio-user-obj.h6
-rw-r--r--include/hw/resettable.h247
-rw-r--r--include/hw/riscv/boot.h66
-rw-r--r--include/hw/riscv/boot_opensbi.h61
-rw-r--r--include/hw/riscv/microchip_pfsoc.h168
-rw-r--r--include/hw/riscv/numa.h114
-rw-r--r--include/hw/riscv/opentitan.h122
-rw-r--r--include/hw/riscv/riscv_hart.h15
-rw-r--r--include/hw/riscv/shakti_c.h75
-rw-r--r--include/hw/riscv/sifive_clint.h54
-rw-r--r--include/hw/riscv/sifive_cpu.h (renamed from include/hw/riscv/sifive_prci.h)30
-rw-r--r--include/hw/riscv/sifive_e.h78
-rw-r--r--include/hw/riscv/sifive_u.h134
-rw-r--r--include/hw/riscv/spike.h35
-rw-r--r--include/hw/riscv/virt.h114
-rw-r--r--include/hw/rtc/allwinner-rtc.h129
-rw-r--r--include/hw/rtc/aspeed_rtc.h28
-rw-r--r--include/hw/rtc/goldfish_rtc.h49
-rw-r--r--include/hw/rtc/m48t59.h50
-rw-r--r--include/hw/rtc/mc146818rtc.h60
-rw-r--r--include/hw/rtc/mc146818rtc_regs.h (renamed from include/hw/timer/mc146818rtc_regs.h)7
-rw-r--r--include/hw/rtc/pl031.h48
-rw-r--r--include/hw/rtc/sun4v-rtc.h19
-rw-r--r--include/hw/rtc/xlnx-zynqmp-rtc.h (renamed from include/hw/timer/xlnx-zynqmp-rtc.h)16
-rw-r--r--include/hw/rx/rx62n.h77
-rw-r--r--include/hw/s390x/3270-ccw.h16
-rw-r--r--include/hw/s390x/ap-device.h15
-rw-r--r--include/hw/s390x/cpu-topology.h83
-rw-r--r--include/hw/s390x/css-bridge.h17
-rw-r--r--include/hw/s390x/css.h28
-rw-r--r--include/hw/s390x/event-facility.h39
-rw-r--r--include/hw/s390x/ioinst.h14
-rw-r--r--include/hw/s390x/s390-ccw.h25
-rw-r--r--include/hw/s390x/s390-pci-bus.h406
-rw-r--r--include/hw/s390x/s390-pci-clp.h216
-rw-r--r--include/hw/s390x/s390-pci-inst.h119
-rw-r--r--include/hw/s390x/s390-pci-kvm.h38
-rw-r--r--include/hw/s390x/s390-pci-vfio.h44
-rw-r--r--include/hw/s390x/s390-virtio-ccw.h22
-rw-r--r--include/hw/s390x/s390_flic.h32
-rw-r--r--include/hw/s390x/sclp.h51
-rw-r--r--include/hw/s390x/storage-attributes.h38
-rw-r--r--include/hw/s390x/storage-keys.h92
-rw-r--r--include/hw/s390x/tod.h33
-rw-r--r--include/hw/s390x/vfio-ccw.h25
-rw-r--r--include/hw/scsi/emulation.h2
-rw-r--r--include/hw/scsi/esp.h55
-rw-r--r--include/hw/scsi/scsi.h114
-rw-r--r--include/hw/sd/allwinner-sdhost.h146
-rw-r--r--include/hw/sd/aspeed_sdhci.h35
-rw-r--r--include/hw/sd/bcm2835_sdhost.h8
-rw-r--r--include/hw/sd/cadence_sdhci.h47
-rw-r--r--include/hw/sd/npcm7xx_sdhci.h65
-rw-r--r--include/hw/sd/sd.h156
-rw-r--r--include/hw/sd/sdcard_legacy.h50
-rw-r--r--include/hw/sd/sdhci.h24
-rw-r--r--include/hw/sensor/emc141x_regs.h37
-rw-r--r--include/hw/sensor/isl_pmbus_vr.h57
-rw-r--r--include/hw/sensor/tmp105.h55
-rw-r--r--include/hw/sensor/tmp105_regs.h (renamed from include/hw/misc/tmp105_regs.h)0
-rw-r--r--include/hw/sh4/sh.h62
-rw-r--r--include/hw/sh4/sh_intc.h5
-rw-r--r--include/hw/southbridge/ich9.h (renamed from include/hw/i386/ich9.h)60
-rw-r--r--include/hw/southbridge/piix.h84
-rw-r--r--include/hw/sparc/grlib.h120
-rw-r--r--include/hw/sparc/sparc32_dma.h34
-rw-r--r--include/hw/sparc/sparc64.h6
-rw-r--r--include/hw/sparc/sun4m_iommu.h10
-rw-r--r--include/hw/sparc/sun4u_iommu.h10
-rw-r--r--include/hw/ssi/aspeed_smc.h99
-rw-r--r--include/hw/ssi/bcm2835_spi.h81
-rw-r--r--include/hw/ssi/ibex_spi_host.h92
-rw-r--r--include/hw/ssi/imx_spi.h12
-rw-r--r--include/hw/ssi/mss-spi.h7
-rw-r--r--include/hw/ssi/npcm7xx_fiu.h73
-rw-r--r--include/hw/ssi/npcm_pspi.h53
-rw-r--r--include/hw/ssi/pl022.h12
-rw-r--r--include/hw/ssi/sifive_spi.h50
-rw-r--r--include/hw/ssi/ssi.h99
-rw-r--r--include/hw/ssi/stm32f2xx_spi.h9
-rw-r--r--include/hw/ssi/xilinx_spips.h37
-rw-r--r--include/hw/ssi/xlnx-versal-ospi.h111
-rw-r--r--include/hw/stream.h46
-rw-r--r--include/hw/sysbus.h55
-rw-r--r--include/hw/timer/a9gtimer.h4
-rw-r--r--include/hw/timer/allwinner-a10-pit.h5
-rw-r--r--include/hw/timer/arm_mptimer.h8
-rw-r--r--include/hw/timer/armv7m_systick.h24
-rw-r--r--include/hw/timer/aspeed_timer.h23
-rw-r--r--include/hw/timer/avr_timer16.h93
-rw-r--r--include/hw/timer/bcm2835_systmr.h42
-rw-r--r--include/hw/timer/cadence_ttc.h54
-rw-r--r--include/hw/timer/cmsdk-apb-dualtimer.h10
-rw-r--r--include/hw/timer/cmsdk-apb-timer.h38
-rw-r--r--include/hw/timer/digic-timer.h7
-rw-r--r--include/hw/timer/grlib_gptimer.h32
-rw-r--r--include/hw/timer/hpet.h2
-rw-r--r--include/hw/timer/i8254.h23
-rw-r--r--include/hw/timer/i8254_internal.h12
-rw-r--r--include/hw/timer/ibex_timer.h55
-rw-r--r--include/hw/timer/imx_epit.h15
-rw-r--r--include/hw/timer/imx_gpt.h10
-rw-r--r--include/hw/timer/m48t59.h32
-rw-r--r--include/hw/timer/mc146818rtc.h14
-rw-r--r--include/hw/timer/mss-timer.h9
-rw-r--r--include/hw/timer/npcm7xx_timer.h113
-rw-r--r--include/hw/timer/nrf51_timer.h8
-rw-r--r--include/hw/timer/renesas_cmt.h43
-rw-r--r--include/hw/timer/renesas_tmr.h58
-rw-r--r--include/hw/timer/sifive_pwm.h62
-rw-r--r--include/hw/timer/sse-counter.h105
-rw-r--r--include/hw/timer/sse-timer.h54
-rw-r--r--include/hw/timer/stellaris-gptm.h51
-rw-r--r--include/hw/timer/stm32f2xx_timer.h11
-rw-r--r--include/hw/timer/sun4v-rtc.h1
-rw-r--r--include/hw/timer/tmu012.h23
-rw-r--r--include/hw/tricore/tc27x_soc.h129
-rw-r--r--include/hw/tricore/triboard.h48
-rw-r--r--include/hw/tricore/tricore.h1
-rw-r--r--include/hw/tricore/tricore_testdevice.h (renamed from include/exec/tb-context.h)31
-rw-r--r--include/hw/unicore32/puv3.h40
-rw-r--r--include/hw/usb.h196
-rw-r--r--include/hw/usb/chipidea.h7
-rw-r--r--include/hw/usb/dwc2-regs.h899
-rw-r--r--include/hw/usb/hcd-dwc3.h56
-rw-r--r--include/hw/usb/hcd-musb.h49
-rw-r--r--include/hw/usb/hid.h17
-rw-r--r--include/hw/usb/imx-usb-phy.h54
-rw-r--r--include/hw/usb/msd.h55
-rw-r--r--include/hw/usb/xhci.h21
-rw-r--r--include/hw/usb/xlnx-usb-subsystem.h47
-rw-r--r--include/hw/usb/xlnx-versal-usb2-ctrl-regs.h48
-rw-r--r--include/hw/vfio/vfio-amd-xgbe.h11
-rw-r--r--include/hw/vfio/vfio-calxeda-xgmac.h21
-rw-r--r--include/hw/vfio/vfio-common.h144
-rw-r--r--include/hw/vfio/vfio-container-base.h141
-rw-r--r--include/hw/vfio/vfio-platform.h20
-rw-r--r--include/hw/vfio/vfio.h7
-rw-r--r--include/hw/virtio/vdpa-dev.h43
-rw-r--r--include/hw/virtio/vhost-backend.h104
-rw-r--r--include/hw/virtio/vhost-scsi-common.h15
-rw-r--r--include/hw/virtio/vhost-scsi.h10
-rw-r--r--include/hw/virtio/vhost-user-base.h49
-rw-r--r--include/hw/virtio/vhost-user-blk.h29
-rw-r--r--include/hw/virtio/vhost-user-fs.h47
-rw-r--r--include/hw/virtio/vhost-user-gpio.h24
-rw-r--r--include/hw/virtio/vhost-user-i2c.h25
-rw-r--r--include/hw/virtio/vhost-user-rng.h24
-rw-r--r--include/hw/virtio/vhost-user-scmi.h31
-rw-r--r--include/hw/virtio/vhost-user-scsi.h18
-rw-r--r--include/hw/virtio/vhost-user-snd.h24
-rw-r--r--include/hw/virtio/vhost-user-vsock.h36
-rw-r--r--include/hw/virtio/vhost-user.h92
-rw-r--r--include/hw/virtio/vhost-vdpa.h95
-rw-r--r--include/hw/virtio/vhost-vsock-common.h53
-rw-r--r--include/hw/virtio/vhost-vsock.h23
-rw-r--r--include/hw/virtio/vhost.h370
-rw-r--r--include/hw/virtio/virtio-access.h9
-rw-r--r--include/hw/virtio/virtio-acpi.h15
-rw-r--r--include/hw/virtio/virtio-balloon.h39
-rw-r--r--include/hw/virtio/virtio-blk-common.h20
-rw-r--r--include/hw/virtio/virtio-blk.h47
-rw-r--r--include/hw/virtio/virtio-bus.h26
-rw-r--r--include/hw/virtio/virtio-crypto.h19
-rw-r--r--include/hw/virtio/virtio-dmabuf.h100
-rw-r--r--include/hw/virtio/virtio-gpu-bswap.h95
-rw-r--r--include/hw/virtio/virtio-gpu-pci.h39
-rw-r--r--include/hw/virtio/virtio-gpu-pixman.h45
-rw-r--r--include/hw/virtio/virtio-gpu.h241
-rw-r--r--include/hw/virtio/virtio-input.h46
-rw-r--r--include/hw/virtio/virtio-iommu.h74
-rw-r--r--include/hw/virtio/virtio-md-pci.h44
-rw-r--r--include/hw/virtio/virtio-mem.h134
-rw-r--r--include/hw/virtio/virtio-mmio.h75
-rw-r--r--include/hw/virtio/virtio-net.h145
-rw-r--r--include/hw/virtio/virtio-pci.h272
-rw-r--r--include/hw/virtio/virtio-pmem.h46
-rw-r--r--include/hw/virtio/virtio-rng.h11
-rw-r--r--include/hw/virtio/virtio-scsi.h96
-rw-r--r--include/hw/virtio/virtio-serial.h23
-rw-r--r--include/hw/virtio/virtio.h234
-rw-r--r--include/hw/vmstate-if.h39
-rw-r--r--include/hw/watchdog/allwinner-wdt.h123
-rw-r--r--include/hw/watchdog/cmsdk-apb-watchdog.h21
-rw-r--r--include/hw/watchdog/sbsa_gwdt.h79
-rw-r--r--include/hw/watchdog/wdt_aspeed.h37
-rw-r--r--include/hw/watchdog/wdt_diag288.h21
-rw-r--r--include/hw/watchdog/wdt_imx2.h91
-rw-r--r--include/hw/xen/arch_hvm.h5
-rw-r--r--include/hw/xen/interface/arch-arm.h509
-rw-r--r--include/hw/xen/interface/arch-x86/cpuid.h113
-rw-r--r--include/hw/xen/interface/arch-x86/xen-x86_32.h177
-rw-r--r--include/hw/xen/interface/arch-x86/xen-x86_64.h224
-rw-r--r--include/hw/xen/interface/arch-x86/xen.h378
-rw-r--r--include/hw/xen/interface/event_channel.h371
-rw-r--r--include/hw/xen/interface/features.h126
-rw-r--r--include/hw/xen/interface/grant_table.h669
-rw-r--r--include/hw/xen/interface/hvm/hvm_op.h378
-rw-r--r--include/hw/xen/interface/hvm/params.h301
-rw-r--r--include/hw/xen/interface/io/blkif.h713
-rw-r--r--include/hw/xen/interface/io/console.h39
-rw-r--r--include/hw/xen/interface/io/fbif.h159
-rw-r--r--include/hw/xen/interface/io/kbdif.h559
-rw-r--r--include/hw/xen/interface/io/netif.h1091
-rw-r--r--include/hw/xen/interface/io/protocols.h25
-rw-r--r--include/hw/xen/interface/io/ring.h (renamed from include/hw/xen/io/ring.h)126
-rw-r--r--include/hw/xen/interface/io/usbif.h408
-rw-r--r--include/hw/xen/interface/io/xenbus.h63
-rw-r--r--include/hw/xen/interface/io/xs_wire.h147
-rw-r--r--include/hw/xen/interface/memory.h746
-rw-r--r--include/hw/xen/interface/physdev.h366
-rw-r--r--include/hw/xen/interface/sched.h185
-rw-r--r--include/hw/xen/interface/trace.h324
-rw-r--r--include/hw/xen/interface/vcpu.h231
-rw-r--r--include/hw/xen/interface/version.h96
-rw-r--r--include/hw/xen/interface/xen-compat.h29
-rw-r--r--include/hw/xen/interface/xen.h1032
-rw-r--r--include/hw/xen/start_info.h146
-rw-r--r--include/hw/xen/xen-backend.h2
-rw-r--r--include/hw/xen/xen-block.h35
-rw-r--r--include/hw/xen/xen-bus-helper.h36
-rw-r--r--include/hw/xen/xen-bus.h81
-rw-r--r--include/hw/xen/xen-hvm-common.h98
-rw-r--r--include/hw/xen/xen-legacy-backend.h42
-rw-r--r--include/hw/xen/xen-x86.h15
-rw-r--r--include/hw/xen/xen.h48
-rw-r--r--include/hw/xen/xen_backend_ops.h408
-rw-r--r--include/hw/xen/xen_igd.h33
-rw-r--r--include/hw/xen/xen_native.h (renamed from include/hw/xen/xen_common.h)222
-rw-r--r--include/hw/xen/xen_pvdev.h8
-rw-r--r--include/hw/xtensa/mx_pic.h43
-rw-r--r--include/hw/xtensa/xtensa-isa.h6
-rw-r--r--include/io/channel-buffer.h7
-rw-r--r--include/io/channel-command.h37
-rw-r--r--include/io/channel-file.h25
-rw-r--r--include/io/channel-null.h55
-rw-r--r--include/io/channel-socket.h13
-rw-r--r--include/io/channel-tls.h8
-rw-r--r--include/io/channel-util.h25
-rw-r--r--include/io/channel-watch.h2
-rw-r--r--include/io/channel-websock.h7
-rw-r--r--include/io/channel.h359
-rw-r--r--include/io/dns-resolver.h16
-rw-r--r--include/io/net-listener.h16
-rw-r--r--include/io/task.h40
-rw-r--r--include/libdecnumber/dconfig.h2
-rw-r--r--include/libdecnumber/decNumber.h4
-rw-r--r--include/libdecnumber/decNumberLocal.h3
-rw-r--r--include/migration/blocker.h74
-rw-r--r--include/migration/client-options.h25
-rw-r--r--include/migration/colo.h26
-rw-r--r--include/migration/cpu.h5
-rw-r--r--include/migration/failover.h1
-rw-r--r--include/migration/global_state.h3
-rw-r--r--include/migration/misc.h98
-rw-r--r--include/migration/qemu-file-types.h26
-rw-r--r--include/migration/register.h290
-rw-r--r--include/migration/snapshot.h54
-rw-r--r--include/migration/vmstate.h270
-rw-r--r--include/monitor/hmp-target.h19
-rw-r--r--include/monitor/hmp.h184
-rw-r--r--include/monitor/monitor.h60
-rw-r--r--include/monitor/qdev.h30
-rw-r--r--include/monitor/qmp-helpers.h29
-rw-r--r--include/net/announce.h44
-rw-r--r--include/net/can_emu.h22
-rw-r--r--include/net/can_host.h20
-rw-r--r--include/net/checksum.h7
-rw-r--r--include/net/eth.h64
-rw-r--r--include/net/filter.h17
-rw-r--r--include/net/net.h153
-rw-r--r--include/net/queue.h9
-rw-r--r--include/net/tap.h1
-rw-r--r--include/net/vhost-user.h1
-rw-r--r--include/net/vhost-vdpa.h21
-rw-r--r--include/net/vhost_net.h23
-rw-r--r--include/qapi/clone-visitor.h8
-rw-r--r--include/qapi/compat-policy.h45
-rw-r--r--include/qapi/error.h295
-rw-r--r--include/qapi/forward-visitor.h27
-rw-r--r--include/qapi/qmp-event.h6
-rw-r--r--include/qapi/qmp/dispatch.h25
-rw-r--r--include/qapi/qmp/json-writer.h35
-rw-r--r--include/qapi/qmp/qbool.h6
-rw-r--r--include/qapi/qmp/qdict.h9
-rw-r--r--include/qapi/qmp/qerror.h56
-rw-r--r--include/qapi/qmp/qjson.h12
-rw-r--r--include/qapi/qmp/qlist.h16
-rw-r--r--include/qapi/qmp/qnull.h4
-rw-r--r--include/qapi/qmp/qnum.h7
-rw-r--r--include/qapi/qmp/qobject.h19
-rw-r--r--include/qapi/qmp/qstring.h17
-rw-r--r--include/qapi/string-output-visitor.h6
-rw-r--r--include/qapi/type-helpers.h22
-rw-r--r--include/qapi/util.h49
-rw-r--r--include/qapi/visitor-impl.h46
-rw-r--r--include/qapi/visitor.h334
-rw-r--r--include/qemu-common.h163
-rw-r--r--include/qemu-io.h1
-rw-r--r--include/qemu-main.h11
-rw-r--r--include/qemu/accel.h (renamed from include/sysemu/accel.h)62
-rw-r--r--include/qemu/async-teardown.h20
-rw-r--r--include/qemu/atomic.h436
-rw-r--r--include/qemu/atomic128.h137
-rw-r--r--include/qemu/base64.h3
-rw-r--r--include/qemu/bitmap.h79
-rw-r--r--include/qemu/bitops.h166
-rw-r--r--include/qemu/bswap.h262
-rw-r--r--include/qemu/buffer.h5
-rw-r--r--include/qemu/cacheflush.h35
-rw-r--r--include/qemu/cacheinfo.h21
-rw-r--r--include/qemu/chardev_open.h16
-rw-r--r--include/qemu/clang-tsa.h114
-rw-r--r--include/qemu/co-shared-resource.h69
-rw-r--r--include/qemu/compiler.h206
-rw-r--r--include/qemu/config-file.h17
-rw-r--r--include/qemu/coroutine-core.h154
-rw-r--r--include/qemu/coroutine-tls.h165
-rw-r--r--include/qemu/coroutine.h269
-rw-r--r--include/qemu/coroutine_int.h5
-rw-r--r--include/qemu/cpu-float.h64
-rw-r--r--include/qemu/cpuid.h45
-rw-r--r--include/qemu/crc-ccitt.h33
-rw-r--r--include/qemu/crc32c.h2
-rw-r--r--include/qemu/ctype.h27
-rw-r--r--include/qemu/cutils.h96
-rw-r--r--include/qemu/datadir.h28
-rw-r--r--include/qemu/dbus.h42
-rw-r--r--include/qemu/defer-call.h16
-rw-r--r--include/qemu/drm.h4
-rw-r--r--include/qemu/envlist.h8
-rw-r--r--include/qemu/error-report.h30
-rw-r--r--include/qemu/event_notifier.h3
-rw-r--r--include/qemu/fifo8.h56
-rw-r--r--include/qemu/filemonitor.h127
-rw-r--r--include/qemu/fprintf-fn.h14
-rw-r--r--include/qemu/guest-random.h68
-rw-r--r--include/qemu/hbitmap.h123
-rw-r--r--include/qemu/help-texts.h13
-rw-r--r--include/qemu/help_option.h11
-rw-r--r--include/qemu/host-utils.h473
-rw-r--r--include/qemu/hw-version.h27
-rw-r--r--include/qemu/id.h2
-rw-r--r--include/qemu/int128.h207
-rw-r--r--include/qemu/interval-tree.h99
-rw-r--r--include/qemu/iov.h95
-rw-r--r--include/qemu/iova-tree.h52
-rw-r--r--include/qemu/jhash.h6
-rw-r--r--include/qemu/job.h362
-rw-r--r--include/qemu/keyval.h15
-rw-r--r--include/qemu/lockable.h155
-rw-r--r--include/qemu/log-for-trace.h2
-rw-r--r--include/qemu/log.h65
-rw-r--r--include/qemu/madvise.h95
-rw-r--r--include/qemu/main-loop.h178
-rw-r--r--include/qemu/memalign.h61
-rw-r--r--include/qemu/memfd.h4
-rw-r--r--include/qemu/mmap-alloc.h60
-rw-r--r--include/qemu/module.h134
-rw-r--r--include/qemu/mprotect.h14
-rw-r--r--include/qemu/notify.h10
-rw-r--r--include/qemu/nvdimm-utils.h6
-rw-r--r--include/qemu/option.h35
-rw-r--r--include/qemu/osdep.h608
-rw-r--r--include/qemu/plugin-event.h26
-rw-r--r--include/qemu/plugin-memory.h36
-rw-r--r--include/qemu/plugin.h333
-rw-r--r--include/qemu/pmem.h2
-rw-r--r--include/qemu/processor.h2
-rw-r--r--include/qemu/progress_meter.h62
-rw-r--r--include/qemu/qdist.h1
-rw-r--r--include/qemu/qemu-plugin.h840
-rw-r--r--include/qemu/qemu-print.h23
-rw-r--r--include/qemu/qemu-progress.h8
-rw-r--r--include/qemu/qsp.h6
-rw-r--r--include/qemu/qtree.h200
-rw-r--r--include/qemu/queue.h102
-rw-r--r--include/qemu/range.h31
-rw-r--r--include/qemu/ratelimit.h28
-rw-r--r--include/qemu/rcu.h79
-rw-r--r--include/qemu/rcu_queue.h129
-rw-r--r--include/qemu/readline.h4
-rw-r--r--include/qemu/reserved-region.h32
-rw-r--r--include/qemu/selfmap.h44
-rw-r--r--include/qemu/seqlock.h12
-rw-r--r--include/qemu/sockets.h49
-rw-r--r--include/qemu/stats64.h38
-rw-r--r--include/qemu/sys_membarrier.h6
-rw-r--r--include/qemu/systemd.h2
-rw-r--r--include/qemu/thread-context.h57
-rw-r--r--include/qemu/thread-posix.h23
-rw-r--r--include/qemu/thread-win32.h8
-rw-r--r--include/qemu/thread.h131
-rw-r--r--include/qemu/throttle.h17
-rw-r--r--include/qemu/timer.h136
-rw-r--r--include/qemu/transactions.h66
-rw-r--r--include/qemu/tsan.h71
-rw-r--r--include/qemu/typedefs.h82
-rw-r--r--include/qemu/units.h73
-rw-r--r--include/qemu/uri.h52
-rw-r--r--include/qemu/userfaultfd.h46
-rw-r--r--include/qemu/uuid.h49
-rw-r--r--include/qemu/vfio-helpers.h4
-rw-r--r--include/qemu/vhost-user-server.h73
-rw-r--r--include/qemu/win_dump_defs.h115
-rw-r--r--include/qemu/xattr.h8
-rw-r--r--include/qemu/xxhash.h121
-rw-r--r--include/qemu/yank.h87
-rw-r--r--include/qom/object.h1307
-rw-r--r--include/qom/object_interfaces.h103
-rw-r--r--include/qom/qom-qobject.h11
-rw-r--r--include/scsi/constants.h8
-rw-r--r--include/scsi/pr-manager.h17
-rw-r--r--include/scsi/utils.h34
-rw-r--r--include/semihosting/common-semi.h39
-rw-r--r--include/semihosting/console.h59
-rw-r--r--include/semihosting/guestfd.h91
-rw-r--r--include/semihosting/semihost.h (renamed from include/exec/semihost.h)25
-rw-r--r--include/semihosting/syscalls.h75
-rw-r--r--include/semihosting/uaccess.h63
-rw-r--r--include/standard-headers/asm-m68k/bootinfo-mac.h120
-rw-r--r--include/standard-headers/asm-m68k/bootinfo-virt.h21
-rw-r--r--include/standard-headers/asm-m68k/bootinfo.h172
-rw-r--r--include/standard-headers/asm-x86/bootparam.h30
-rw-r--r--include/standard-headers/asm-x86/kvm_para.h36
-rw-r--r--include/standard-headers/asm-x86/setup_data.h83
-rw-r--r--include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h667
-rw-r--r--include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h114
-rw-r--r--include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h383
-rw-r--r--include/standard-headers/drm/drm_fourcc.h1023
-rw-r--r--include/standard-headers/linux/const.h36
-rw-r--r--include/standard-headers/linux/ethtool.h571
-rw-r--r--include/standard-headers/linux/fuse.h1185
-rw-r--r--include/standard-headers/linux/input-event-codes.h146
-rw-r--r--include/standard-headers/linux/input.h21
-rw-r--r--include/standard-headers/linux/kernel.h9
-rw-r--r--include/standard-headers/linux/pci_regs.h384
-rw-r--r--include/standard-headers/linux/pvpanic.h9
-rw-r--r--include/standard-headers/linux/udmabuf.h32
-rw-r--r--include/standard-headers/linux/vhost_types.h196
-rw-r--r--include/standard-headers/linux/virtio_9p.h6
-rw-r--r--include/standard-headers/linux/virtio_balloon.h16
-rw-r--r--include/standard-headers/linux/virtio_blk.h194
-rw-r--r--include/standard-headers/linux/virtio_bt.h39
-rw-r--r--include/standard-headers/linux/virtio_config.h47
-rw-r--r--include/standard-headers/linux/virtio_console.h8
-rw-r--r--include/standard-headers/linux/virtio_crypto.h82
-rw-r--r--include/standard-headers/linux/virtio_fs.h22
-rw-r--r--include/standard-headers/linux/virtio_gpio.h72
-rw-r--r--include/standard-headers/linux/virtio_gpu.h149
-rw-r--r--include/standard-headers/linux/virtio_i2c.h47
-rw-r--r--include/standard-headers/linux/virtio_ids.h65
-rw-r--r--include/standard-headers/linux/virtio_iommu.h171
-rw-r--r--include/standard-headers/linux/virtio_mem.h214
-rw-r--r--include/standard-headers/linux/virtio_mmio.h11
-rw-r--r--include/standard-headers/linux/virtio_net.h161
-rw-r--r--include/standard-headers/linux/virtio_pci.h92
-rw-r--r--include/standard-headers/linux/virtio_pcidev.h65
-rw-r--r--include/standard-headers/linux/virtio_pmem.h41
-rw-r--r--include/standard-headers/linux/virtio_ring.h108
-rw-r--r--include/standard-headers/linux/virtio_scmi.h24
-rw-r--r--include/standard-headers/linux/virtio_scsi.h20
-rw-r--r--include/standard-headers/linux/virtio_snd.h488
-rw-r--r--include/standard-headers/linux/virtio_vsock.h10
-rw-r--r--include/standard-headers/rdma/vmw_pvrdma-abi.h297
-rw-r--r--include/sysemu/accel-blocker.h55
-rw-r--r--include/sysemu/accel-ops.h58
-rw-r--r--include/sysemu/arch_init.h23
-rw-r--r--include/sysemu/balloon.h5
-rw-r--r--include/sysemu/block-backend-common.h103
-rw-r--r--include/sysemu/block-backend-global-state.h133
-rw-r--r--include/sysemu/block-backend-io.h230
-rw-r--r--include/sysemu/block-backend.h225
-rw-r--r--include/sysemu/block-ram-registrar.h37
-rw-r--r--include/sysemu/blockdev.h20
-rw-r--r--include/sysemu/bt.h20
-rw-r--r--include/sysemu/cpu-throttle.h68
-rw-r--r--include/sysemu/cpu-timers-internal.h71
-rw-r--r--include/sysemu/cpu-timers.h103
-rw-r--r--include/sysemu/cpus.h56
-rw-r--r--include/sysemu/cryptodev-vhost-user.h5
-rw-r--r--include/sysemu/cryptodev-vhost.h5
-rw-r--r--include/sysemu/cryptodev.h247
-rw-r--r--include/sysemu/device_tree.h39
-rw-r--r--include/sysemu/dirtylimit.h39
-rw-r--r--include/sysemu/dirtyrate.h30
-rw-r--r--include/sysemu/dma.h233
-rw-r--r--include/sysemu/dump-arch.h4
-rw-r--r--include/sysemu/dump.h44
-rw-r--r--include/sysemu/event-loop-base.h40
-rw-r--r--include/sysemu/hax.h56
-rw-r--r--include/sysemu/hostmem.h46
-rw-r--r--include/sysemu/hvf.h124
-rw-r--r--include/sysemu/hvf_int.h70
-rw-r--r--include/sysemu/hw_accel.h62
-rw-r--r--include/sysemu/iommufd.h36
-rw-r--r--include/sysemu/iothread.h24
-rw-r--r--include/sysemu/kvm.h217
-rw-r--r--include/sysemu/kvm_int.h117
-rw-r--r--include/sysemu/kvm_xen.h44
-rw-r--r--include/sysemu/memory_mapping.h6
-rw-r--r--include/sysemu/numa.h104
-rw-r--r--include/sysemu/nvmm.h29
-rw-r--r--include/sysemu/os-posix.h22
-rw-r--r--include/sysemu/os-win32.h132
-rw-r--r--include/sysemu/qtest.h20
-rw-r--r--include/sysemu/replay.h87
-rw-r--r--include/sysemu/reset.h118
-rw-r--r--include/sysemu/rng-random.h3
-rw-r--r--include/sysemu/rng.h14
-rw-r--r--include/sysemu/rtc.h58
-rw-r--r--include/sysemu/runstate-action.h19
-rw-r--r--include/sysemu/runstate.h110
-rw-r--r--include/sysemu/sev.h21
-rw-r--r--include/sysemu/stats.h45
-rw-r--r--include/sysemu/sysemu.h117
-rw-r--r--include/sysemu/tcg.h20
-rw-r--r--include/sysemu/tpm.h45
-rw-r--r--include/sysemu/tpm_backend.h19
-rw-r--r--include/sysemu/tpm_util.h72
-rw-r--r--include/sysemu/vhost-user-backend.h48
-rw-r--r--include/sysemu/watchdog.h13
-rw-r--r--include/sysemu/whpx.h19
-rw-r--r--include/sysemu/xen-mapcache.h5
-rw-r--r--include/sysemu/xen.h52
-rw-r--r--include/tcg/debug-assert.h17
-rw-r--r--include/tcg/debuginfo.h79
-rw-r--r--include/tcg/helper-info.h64
-rw-r--r--include/tcg/insn-start-words.h17
-rw-r--r--include/tcg/oversized-guest.h23
-rw-r--r--include/tcg/perf.h49
-rw-r--r--include/tcg/startup.h58
-rw-r--r--include/tcg/tcg-cond.h133
-rw-r--r--include/tcg/tcg-gvec-desc.h66
-rw-r--r--include/tcg/tcg-ldst.h63
-rw-r--r--include/tcg/tcg-mo.h48
-rw-r--r--include/tcg/tcg-op-common.h561
-rw-r--r--include/tcg/tcg-op-gvec-common.h432
-rw-r--r--include/tcg/tcg-op-gvec.h49
-rw-r--r--include/tcg/tcg-op.h410
-rw-r--r--include/tcg/tcg-opc.h318
-rw-r--r--include/tcg/tcg-temp-internal.h45
-rw-r--r--include/tcg/tcg.h1075
-rw-r--r--include/trace-tcg.h6
-rw-r--r--include/ui/clipboard.h277
-rw-r--r--include/ui/console.h311
-rw-r--r--include/ui/dbus-display.h17
-rw-r--r--include/ui/dbus-module.h11
-rw-r--r--include/ui/egl-context.h7
-rw-r--r--include/ui/egl-helpers.h39
-rw-r--r--include/ui/gtk.h104
-rw-r--r--include/ui/input.h13
-rw-r--r--include/ui/kbd-state.h113
-rw-r--r--include/ui/pixman-minimal.h239
-rw-r--r--include/ui/qemu-pixman.h45
-rw-r--r--include/ui/qemu-spice-module.h44
-rw-r--r--include/ui/qemu-spice.h51
-rw-r--r--include/ui/rect.h57
-rw-r--r--include/ui/sdl2.h28
-rw-r--r--include/ui/spice-display.h14
-rw-r--r--include/ui/surface.h95
-rw-r--r--include/ui/win32-kbd-hook.h14
-rw-r--r--include/user/safe-syscall.h140
-rw-r--r--include/user/syscall-trace.h43
1242 files changed, 94318 insertions, 23457 deletions
diff --git a/include/authz/base.h b/include/authz/base.h
new file mode 100644
index 0000000000..b53e4e4507
--- /dev/null
+++ b/include/authz/base.h
@@ -0,0 +1,101 @@
+/*
+ * QEMU authorization framework base class
+ *
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QAUTHZ_BASE_H
+#define QAUTHZ_BASE_H
+
+#include "qapi/error.h"
+#include "qom/object.h"
+
+
+#define TYPE_QAUTHZ "authz"
+
+OBJECT_DECLARE_TYPE(QAuthZ, QAuthZClass,
+ QAUTHZ)
+
+
+/**
+ * QAuthZ:
+ *
+ * The QAuthZ class defines an API contract to be used
+ * for providing an authorization driver for services
+ * with user identities.
+ */
+
+struct QAuthZ {
+ Object parent_obj;
+};
+
+
+struct QAuthZClass {
+ ObjectClass parent_class;
+
+ bool (*is_allowed)(QAuthZ *authz,
+ const char *identity,
+ Error **errp);
+};
+
+
+/**
+ * qauthz_is_allowed:
+ * @authz: the authorization object
+ * @identity: the user identity to authorize
+ * @errp: pointer to a NULL initialized error object
+ *
+ * Check if a user @identity is authorized. If an error
+ * occurs this method will return false to indicate
+ * denial, as well as setting @errp to contain the details.
+ * Callers are recommended to treat the denial and error
+ * scenarios identically. Specifically the error info in
+ * @errp should never be fed back to the user being
+ * authorized, it is merely for benefit of administrator
+ * debugging.
+ *
+ * Returns: true if @identity is authorized, false if denied or if
+ * an error occurred.
+ */
+bool qauthz_is_allowed(QAuthZ *authz,
+ const char *identity,
+ Error **errp);
+
+
+/**
+ * qauthz_is_allowed_by_id:
+ * @authzid: ID of the authorization object
+ * @identity: the user identity to authorize
+ * @errp: pointer to a NULL initialized error object
+ *
+ * Check if a user @identity is authorized. If an error
+ * occurs this method will return false to indicate
+ * denial, as well as setting @errp to contain the details.
+ * Callers are recommended to treat the denial and error
+ * scenarios identically. Specifically the error info in
+ * @errp should never be fed back to the user being
+ * authorized, it is merely for benefit of administrator
+ * debugging.
+ *
+ * Returns: true if @identity is authorized, false if denied or if
+ * an error occurred.
+ */
+bool qauthz_is_allowed_by_id(const char *authzid,
+ const char *identity,
+ Error **errp);
+
+#endif /* QAUTHZ_BASE_H */
diff --git a/include/authz/list.h b/include/authz/list.h
new file mode 100644
index 0000000000..7ef4ad4b4e
--- /dev/null
+++ b/include/authz/list.h
@@ -0,0 +1,94 @@
+/*
+ * QEMU list authorization driver
+ *
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QAUTHZ_LIST_H
+#define QAUTHZ_LIST_H
+
+#include "authz/base.h"
+#include "qapi/qapi-types-authz.h"
+#include "qom/object.h"
+
+#define TYPE_QAUTHZ_LIST "authz-list"
+
+OBJECT_DECLARE_SIMPLE_TYPE(QAuthZList,
+ QAUTHZ_LIST)
+
+
+
+/**
+ * QAuthZList:
+ *
+ * This authorization driver provides a list mechanism
+ * for granting access by matching user names against a
+ * list of globs. Each match rule has an associated policy
+ * and a catch all policy applies if no rule matches
+ *
+ * To create an instance of this class via QMP:
+ *
+ * {
+ * "execute": "object-add",
+ * "arguments": {
+ * "qom-type": "authz-list",
+ * "id": "authz0",
+ * "props": {
+ * "rules": [
+ * { "match": "fred", "policy": "allow", "format": "exact" },
+ * { "match": "bob", "policy": "allow", "format": "exact" },
+ * { "match": "danb", "policy": "deny", "format": "exact" },
+ * { "match": "dan*", "policy": "allow", "format": "glob" }
+ * ],
+ * "policy": "deny"
+ * }
+ * }
+ * }
+ *
+ */
+struct QAuthZList {
+ QAuthZ parent_obj;
+
+ QAuthZListPolicy policy;
+ QAuthZListRuleList *rules;
+};
+
+
+
+
+QAuthZList *qauthz_list_new(const char *id,
+ QAuthZListPolicy policy,
+ Error **errp);
+
+ssize_t qauthz_list_append_rule(QAuthZList *auth,
+ const char *match,
+ QAuthZListPolicy policy,
+ QAuthZListFormat format,
+ Error **errp);
+
+ssize_t qauthz_list_insert_rule(QAuthZList *auth,
+ const char *match,
+ QAuthZListPolicy policy,
+ QAuthZListFormat format,
+ size_t index,
+ Error **errp);
+
+ssize_t qauthz_list_delete_rule(QAuthZList *auth,
+ const char *match);
+
+
+#endif /* QAUTHZ_LIST_H */
diff --git a/include/authz/listfile.h b/include/authz/listfile.h
new file mode 100644
index 0000000000..0b7fe72198
--- /dev/null
+++ b/include/authz/listfile.h
@@ -0,0 +1,97 @@
+/*
+ * QEMU list file authorization driver
+ *
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QAUTHZ_LISTFILE_H
+#define QAUTHZ_LISTFILE_H
+
+#include "authz/list.h"
+#include "qemu/filemonitor.h"
+#include "qom/object.h"
+
+#define TYPE_QAUTHZ_LIST_FILE "authz-list-file"
+
+OBJECT_DECLARE_SIMPLE_TYPE(QAuthZListFile,
+ QAUTHZ_LIST_FILE)
+
+
+
+/**
+ * QAuthZListFile:
+ *
+ * This authorization driver provides a file mechanism
+ * for granting access by matching user names against a
+ * file of globs. Each match rule has an associated policy
+ * and a catch all policy applies if no rule matches
+ *
+ * To create an instance of this class via QMP:
+ *
+ * {
+ * "execute": "object-add",
+ * "arguments": {
+ * "qom-type": "authz-list-file",
+ * "id": "authz0",
+ * "props": {
+ * "filename": "/etc/qemu/myvm-vnc.acl",
+ * "refresh": true
+ * }
+ * }
+ * }
+ *
+ * If 'refresh' is 'yes', inotify is used to monitor for changes
+ * to the file and auto-reload the rules.
+ *
+ * The myvm-vnc.acl file should contain the parameters for
+ * the QAuthZList object in JSON format:
+ *
+ * {
+ * "rules": [
+ * { "match": "fred", "policy": "allow", "format": "exact" },
+ * { "match": "bob", "policy": "allow", "format": "exact" },
+ * { "match": "danb", "policy": "deny", "format": "exact" },
+ * { "match": "dan*", "policy": "allow", "format": "glob" }
+ * ],
+ * "policy": "deny"
+ * }
+ *
+ * The object can be created on the command line using
+ *
+ * -object authz-list-file,id=authz0,\
+ * filename=/etc/qemu/myvm-vnc.acl,refresh=on
+ *
+ */
+struct QAuthZListFile {
+ QAuthZ parent_obj;
+
+ QAuthZ *list;
+ char *filename;
+ bool refresh;
+ QFileMonitor *file_monitor;
+ int64_t file_watch;
+};
+
+
+
+
+QAuthZListFile *qauthz_list_file_new(const char *id,
+ const char *filename,
+ bool refresh,
+ Error **errp);
+
+#endif /* QAUTHZ_LISTFILE_H */
diff --git a/include/authz/pamacct.h b/include/authz/pamacct.h
new file mode 100644
index 0000000000..592edb2bf0
--- /dev/null
+++ b/include/authz/pamacct.h
@@ -0,0 +1,88 @@
+/*
+ * QEMU PAM authorization driver
+ *
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QAUTHZ_PAMACCT_H
+#define QAUTHZ_PAMACCT_H
+
+#include "authz/base.h"
+#include "qom/object.h"
+
+
+#define TYPE_QAUTHZ_PAM "authz-pam"
+
+OBJECT_DECLARE_SIMPLE_TYPE(QAuthZPAM,
+ QAUTHZ_PAM)
+
+
+
+/**
+ * QAuthZPAM:
+ *
+ * This authorization driver provides a PAM mechanism
+ * for granting access by matching user names against a
+ * list of globs. Each match rule has an associated policy
+ * and a catch all policy applies if no rule matches
+ *
+ * To create an instance of this class via QMP:
+ *
+ * {
+ * "execute": "object-add",
+ * "arguments": {
+ * "qom-type": "authz-pam",
+ * "id": "authz0",
+ * "parameters": {
+ * "service": "qemu-vnc-tls"
+ * }
+ * }
+ * }
+ *
+ * The driver only uses the PAM "account" verification
+ * subsystem. The above config would require a config
+ * file /etc/pam.d/qemu-vnc-tls. For a simple file
+ * lookup it would contain
+ *
+ * account requisite pam_listfile.so item=user sense=allow \
+ * file=/etc/qemu/vnc.allow
+ *
+ * The external file would then contain a list of usernames.
+ * If x509 cert was being used as the username, a suitable
+ * entry would match the distinguish name:
+ *
+ * CN=laptop.berrange.com,O=Berrange Home,L=London,ST=London,C=GB
+ *
+ * On the command line it can be created using
+ *
+ * -object authz-pam,id=authz0,service=qemu-vnc-tls
+ *
+ */
+struct QAuthZPAM {
+ QAuthZ parent_obj;
+
+ char *service;
+};
+
+
+
+
+QAuthZPAM *qauthz_pam_new(const char *id,
+ const char *service,
+ Error **errp);
+
+#endif /* QAUTHZ_PAMACCT_H */
diff --git a/include/authz/simple.h b/include/authz/simple.h
new file mode 100644
index 0000000000..c46a5ac5a1
--- /dev/null
+++ b/include/authz/simple.h
@@ -0,0 +1,72 @@
+/*
+ * QEMU simple authorization driver
+ *
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QAUTHZ_SIMPLE_H
+#define QAUTHZ_SIMPLE_H
+
+#include "authz/base.h"
+#include "qom/object.h"
+
+#define TYPE_QAUTHZ_SIMPLE "authz-simple"
+
+OBJECT_DECLARE_SIMPLE_TYPE(QAuthZSimple,
+ QAUTHZ_SIMPLE)
+
+
+
+/**
+ * QAuthZSimple:
+ *
+ * This authorization driver provides a simple mechanism
+ * for granting access based on an exact matched username.
+ *
+ * To create an instance of this class via QMP:
+ *
+ * {
+ * "execute": "object-add",
+ * "arguments": {
+ * "qom-type": "authz-simple",
+ * "id": "authz0",
+ * "props": {
+ * "identity": "fred"
+ * }
+ * }
+ * }
+ *
+ * Or via the command line
+ *
+ * -object authz-simple,id=authz0,identity=fred
+ *
+ */
+struct QAuthZSimple {
+ QAuthZ parent_obj;
+
+ char *identity;
+};
+
+
+
+
+QAuthZSimple *qauthz_simple_new(const char *id,
+ const char *identity,
+ Error **errp);
+
+
+#endif /* QAUTHZ_SIMPLE_H */
diff --git a/include/block/accounting.h b/include/block/accounting.h
index d1f67b10dd..a59e39f49d 100644
--- a/include/block/accounting.h
+++ b/include/block/accounting.h
@@ -27,15 +27,18 @@
#include "qemu/timed-average.h"
#include "qemu/thread.h"
-#include "qapi/qapi-builtin-types.h"
+#include "qapi/qapi-types-common.h"
typedef struct BlockAcctTimedStats BlockAcctTimedStats;
typedef struct BlockAcctStats BlockAcctStats;
enum BlockAcctType {
+ BLOCK_ACCT_NONE = 0,
BLOCK_ACCT_READ,
BLOCK_ACCT_WRITE,
BLOCK_ACCT_FLUSH,
+ BLOCK_ACCT_ZONE_APPEND,
+ BLOCK_ACCT_UNMAP,
BLOCK_MAX_IOTYPE,
};
@@ -98,8 +101,8 @@ typedef struct BlockAcctCookie {
} BlockAcctCookie;
void block_acct_init(BlockAcctStats *stats);
-void block_acct_setup(BlockAcctStats *stats, bool account_invalid,
- bool account_failed);
+void block_acct_setup(BlockAcctStats *stats, enum OnOffAuto account_invalid,
+ enum OnOffAuto account_failed);
void block_acct_cleanup(BlockAcctStats *stats);
void block_acct_add_interval(BlockAcctStats *stats, unsigned interval_length);
BlockAcctTimedStats *block_acct_interval_next(BlockAcctStats *stats,
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index afd0ff7eb8..cf5e8bde1c 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -26,6 +26,7 @@
#define QEMU_AIO_WAIT_H
#include "block/aio.h"
+#include "qemu/main-loop.h"
/**
* AioWait:
@@ -58,7 +59,7 @@ typedef struct {
extern AioWait global_aio_wait;
/**
- * AIO_WAIT_WHILE:
+ * AIO_WAIT_WHILE_INTERNAL:
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
* caller does not hold a lock) are involved in the polling condition.
* @cond: wait while this conditional expression is true
@@ -74,12 +75,14 @@ extern AioWait global_aio_wait;
* wait on conditions between two IOThreads since that could lead to deadlock,
* go via the main loop instead.
*/
-#define AIO_WAIT_WHILE(ctx, cond) ({ \
+#define AIO_WAIT_WHILE_INTERNAL(ctx, cond) ({ \
bool waited_ = false; \
AioWait *wait_ = &global_aio_wait; \
AioContext *ctx_ = (ctx); \
/* Increment wait_->num_waiters before evaluating cond. */ \
- atomic_inc(&wait_->num_waiters); \
+ qatomic_inc(&wait_->num_waiters); \
+ /* Paired with smp_mb in aio_wait_kick(). */ \
+ smp_mb__after_rmw(); \
if (ctx_ && in_aio_context_home_thread(ctx_)) { \
while ((cond)) { \
aio_poll(ctx_, true); \
@@ -89,19 +92,20 @@ extern AioWait global_aio_wait;
assert(qemu_get_current_aio_context() == \
qemu_get_aio_context()); \
while ((cond)) { \
- if (ctx_) { \
- aio_context_release(ctx_); \
- } \
aio_poll(qemu_get_aio_context(), true); \
- if (ctx_) { \
- aio_context_acquire(ctx_); \
- } \
waited_ = true; \
} \
} \
- atomic_dec(&wait_->num_waiters); \
+ qatomic_dec(&wait_->num_waiters); \
waited_; })
+#define AIO_WAIT_WHILE(ctx, cond) \
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond)
+
+/* TODO replace this with AIO_WAIT_WHILE() in a future patch */
+#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond)
+
/**
* aio_wait_kick:
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
@@ -119,9 +123,30 @@ void aio_wait_kick(void);
*
* Run a BH in @ctx and wait for it to complete.
*
- * Must be called from the main loop thread with @ctx acquired exactly once.
+ * Must be called from the main loop thread without @ctx acquired.
* Note that main loop event processing may occur.
*/
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
-#endif /* QEMU_AIO_WAIT */
+/**
+ * in_aio_context_home_thread:
+ * @ctx: the aio context
+ *
+ * Return whether we are running in the thread that normally runs @ctx. Note
+ * that acquiring/releasing ctx does not affect the outcome, each AioContext
+ * still only has one home thread that is responsible for running it.
+ */
+static inline bool in_aio_context_home_thread(AioContext *ctx)
+{
+ if (ctx == qemu_get_current_aio_context()) {
+ return true;
+ }
+
+ if (ctx == qemu_get_aio_context()) {
+ return bql_locked();
+ } else {
+ return false;
+ }
+}
+
+#endif /* QEMU_AIO_WAIT_H */
diff --git a/include/block/aio.h b/include/block/aio.h
index 0ca25dfec6..8378553eb9 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -14,18 +14,23 @@
#ifndef QEMU_AIO_H
#define QEMU_AIO_H
-#include "qemu-common.h"
+#ifdef CONFIG_LINUX_IO_URING
+#include <liburing.h>
+#endif
+#include "qemu/coroutine-core.h"
#include "qemu/queue.h"
#include "qemu/event_notifier.h"
#include "qemu/thread.h"
#include "qemu/timer.h"
+#include "block/graph-lock.h"
+#include "hw/qdev-core.h"
+
typedef struct BlockAIOCB BlockAIOCB;
typedef void BlockCompletionFunc(void *opaque, int ret);
typedef struct AIOCBInfo {
void (*cancel_async)(BlockAIOCB *acb);
- AioContext *(*get_aio_context)(BlockAIOCB *acb);
size_t aiocb_size;
} AIOCBInfo;
@@ -43,13 +48,79 @@ void qemu_aio_unref(void *p);
void qemu_aio_ref(void *p);
typedef struct AioHandler AioHandler;
+typedef QLIST_HEAD(, AioHandler) AioHandlerList;
typedef void QEMUBHFunc(void *opaque);
typedef bool AioPollFn(void *opaque);
typedef void IOHandler(void *opaque);
-struct Coroutine;
struct ThreadPool;
struct LinuxAioState;
+typedef struct LuringState LuringState;
+
+/* Is polling disabled? */
+bool aio_poll_disabled(AioContext *ctx);
+
+/* Callbacks for file descriptor monitoring implementations */
+typedef struct {
+ /*
+ * update:
+ * @ctx: the AioContext
+ * @old_node: the existing handler or NULL if this file descriptor is being
+ * monitored for the first time
+ * @new_node: the new handler or NULL if this file descriptor is being
+ * removed
+ *
+ * Add/remove/modify a monitored file descriptor.
+ *
+ * Called with ctx->list_lock acquired.
+ */
+ void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
+
+ /*
+ * wait:
+ * @ctx: the AioContext
+ * @ready_list: list for handlers that become ready
+ * @timeout: maximum duration to wait, in nanoseconds
+ *
+ * Wait for file descriptors to become ready and place them on ready_list.
+ *
+ * Called with ctx->list_lock incremented but not locked.
+ *
+ * Returns: number of ready file descriptors.
+ */
+ int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
+
+ /*
+ * need_wait:
+ * @ctx: the AioContext
+ *
+ * Tell aio_poll() when to stop userspace polling early because ->wait()
+ * has fds ready.
+ *
+ * File descriptor monitoring implementations that cannot poll fd readiness
+ * from userspace should use aio_poll_disabled() here. This ensures that
+ * file descriptors are not starved by handlers that frequently make
+ * progress via userspace polling.
+ *
+ * Returns: true if ->wait() should be called, false otherwise.
+ */
+ bool (*need_wait)(AioContext *ctx);
+} FDMonOps;
+
+/*
+ * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
+ * scheduled BHs are not processed until the next aio_bh_poll() call. All
+ * active aio_bh_poll() calls chain their slices together in a list, so that
+ * nested aio_bh_poll() calls process all scheduled bottom halves.
+ */
+typedef QSLIST_HEAD(, QEMUBH) BHList;
+typedef struct BHListSlice BHListSlice;
+struct BHListSlice {
+ BHList bh_list;
+ QSIMPLEQ_ENTRY(BHListSlice) next;
+};
+
+typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
struct AioContext {
GSource source;
@@ -57,16 +128,31 @@ struct AioContext {
/* Used by AioContext users to protect from multi-threaded access. */
QemuRecMutex lock;
+ /*
+ * Keep track of readers and writers of the block layer graph.
+ * This is essential to avoid performing additions and removal
+ * of nodes and edges from block graph while some
+ * other thread is traversing it.
+ */
+ BdrvGraphRWlock *bdrv_graph;
+
/* The list of registered AIO handlers. Protected by ctx->list_lock. */
- QLIST_HEAD(, AioHandler) aio_handlers;
+ AioHandlerList aio_handlers;
+
+ /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
+ AioHandlerList deleted_aio_handlers;
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
- * accessed with atomic primitives. If this field is 0, everything
- * (file descriptors, bottom halves, timers) will be re-evaluated
- * before the next blocking poll(), thus the event_notifier_set call
- * can be skipped. If it is non-zero, you may need to wake up a
- * concurrent aio_poll or the glib main event loop, making
- * event_notifier_set necessary.
+ * only written from the AioContext home thread, or under the BQL in
+ * the case of the main AioContext. However, it is read from any
+ * thread so it is still accessed with atomic primitives.
+ *
+ * If this field is 0, everything (file descriptors, bottom halves,
+ * timers) will be re-evaluated before the next blocking poll() or
+ * io_uring wait; therefore, the event_notifier_set call can be
+ * skipped. If it is non-zero, you may need to wake up a concurrent
+ * aio_poll or the glib main event loop, making event_notifier_set
+ * necessary.
*
* Bit 0 is reserved for GSource usage of the AioContext, and is 1
* between a call to aio_ctx_prepare and the next call to aio_ctx_check.
@@ -91,8 +177,11 @@ struct AioContext {
*/
QemuLockCnt list_lock;
- /* Anchor of the list of Bottom Halves belonging to the context */
- struct QEMUBH *first_bh;
+ /* Bottom Halves pending aio_bh_poll() processing */
+ BHList bh_list;
+
+ /* Chained BH list slices for each nested aio_bh_poll() call */
+ QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
/* Used by aio_notify.
*
@@ -112,25 +201,29 @@ struct AioContext {
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
QEMUBH *co_schedule_bh;
+ int thread_pool_min;
+ int thread_pool_max;
/* Thread pool for performing work and receiving completion callbacks.
* Has its own locking.
*/
struct ThreadPool *thread_pool;
#ifdef CONFIG_LINUX_AIO
- /* State for native Linux AIO. Uses aio_context_acquire/release for
- * locking.
- */
struct LinuxAioState *linux_aio;
#endif
+#ifdef CONFIG_LINUX_IO_URING
+ LuringState *linux_io_uring;
+
+ /* State for file descriptor monitoring using Linux io_uring */
+ struct io_uring fdmon_io_uring;
+ AioHandlerSList submit_list;
+#endif
/* TimerLists for calling timers - one per clock type. Has its own
* locking.
*/
QEMUTimerListGroup tlg;
- int external_disable_cnt;
-
/* Number of AioHandlers without .io_poll() */
int poll_disable_cnt;
@@ -140,13 +233,24 @@ struct AioContext {
int64_t poll_grow; /* polling time growth factor */
int64_t poll_shrink; /* polling time shrink factor */
+ /* AIO engine parameters */
+ int64_t aio_max_batch; /* maximum number of requests in a batch */
+
+ /*
+ * List of handlers participating in userspace polling. Protected by
+ * ctx->list_lock. Iterated and modified mostly by the event loop thread
+ * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
+ * only touches the list to delete nodes if ctx->list_lock's count is zero.
+ */
+ AioHandlerList poll_aio_handlers;
+
/* Are we in polling mode or monitoring file descriptors? */
bool poll_started;
/* epoll(7) state used when built with CONFIG_EPOLL */
int epollfd;
- bool epoll_enabled;
- bool epoll_available;
+
+ const FDMonOps *fdmon_ops;
};
/**
@@ -174,37 +278,57 @@ void aio_context_ref(AioContext *ctx);
*/
void aio_context_unref(AioContext *ctx);
-/* Take ownership of the AioContext. If the AioContext will be shared between
- * threads, and a thread does not want to be interrupted, it will have to
- * take ownership around calls to aio_poll(). Otherwise, aio_poll()
- * automatically takes care of calling aio_context_acquire and
- * aio_context_release.
- *
- * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
- * thread still has to call those to avoid being interrupted by the guest.
+/**
+ * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
+ * run only once and as soon as possible.
*
- * Bottom halves, timers and callbacks can be created or removed without
- * acquiring the AioContext.
+ * @name: A human-readable identifier for debugging purposes.
*/
-void aio_context_acquire(AioContext *ctx);
-
-/* Relinquish ownership of the AioContext. */
-void aio_context_release(AioContext *ctx);
+void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
+ const char *name);
/**
* aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
* only once and as soon as possible.
+ *
+ * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
+ * name string.
*/
-void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
+#define aio_bh_schedule_oneshot(ctx, cb, opaque) \
+ aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
/**
- * aio_bh_new: Allocate a new bottom half structure.
+ * aio_bh_new_full: Allocate a new bottom half structure.
*
* Bottom halves are lightweight callbacks whose invocation is guaranteed
* to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
* is opaque and must be allocated prior to its use.
+ *
+ * @name: A human-readable identifier for debugging purposes.
+ * @reentrancy_guard: A guard set when entering a cb to prevent
+ * device-reentrancy issues
+ */
+QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
+ const char *name, MemReentrancyGuard *reentrancy_guard);
+
+/**
+ * aio_bh_new: Allocate a new bottom half structure
+ *
+ * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
+ * string.
+ */
+#define aio_bh_new(ctx, cb, opaque) \
+ aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
+
+/**
+ * aio_bh_new_guarded: Allocate a new bottom half structure with a
+ * reentrancy_guard
+ *
+ * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
+ * string.
*/
-QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
+#define aio_bh_new_guarded(ctx, cb, opaque, guard) \
+ aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
/**
* aio_notify: Force processing of pending events.
@@ -326,7 +450,7 @@ void aio_dispatch(AioContext *ctx);
* or more AIO events have completed, to ensure something has moved
* before returning.
*/
-bool aio_poll(AioContext *ctx, bool blocking);
+bool no_coroutine_fn aio_poll(AioContext *ctx, bool blocking);
/* Register a file descriptor and associated callbacks. Behaves very similarly
* to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
@@ -337,20 +461,12 @@ bool aio_poll(AioContext *ctx, bool blocking);
*/
void aio_set_fd_handler(AioContext *ctx,
int fd,
- bool is_external,
IOHandler *io_read,
IOHandler *io_write,
AioPollFn *io_poll,
+ IOHandler *io_poll_ready,
void *opaque);
-/* Set polling begin/end callbacks for a file descriptor that has already been
- * registered with aio_set_fd_handler. Do nothing if the file descriptor is
- * not registered.
- */
-void aio_set_fd_poll(AioContext *ctx, int fd,
- IOHandler *io_poll_begin,
- IOHandler *io_poll_end);
-
/* Register an event notifier and associated callbacks. Behaves very similarly
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
* will be invoked when using aio_poll().
@@ -360,13 +476,18 @@ void aio_set_fd_poll(AioContext *ctx, int fd,
*/
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier,
- bool is_external,
EventNotifierHandler *io_read,
- AioPollFn *io_poll);
+ AioPollFn *io_poll,
+ EventNotifierHandler *io_poll_ready);
-/* Set polling begin/end callbacks for an event notifier that has already been
+/*
+ * Set polling begin/end callbacks for an event notifier that has already been
* registered with aio_set_event_notifier. Do nothing if the event notifier is
* not registered.
+ *
+ * Note that if the io_poll_end() callback (or the entire notifier) is removed
+ * during polling, it will not be called, so an io_poll_begin() is not
+ * necessarily always followed by an io_poll_end().
*/
void aio_set_event_notifier_poll(AioContext *ctx,
EventNotifier *notifier,
@@ -387,6 +508,11 @@ struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
/* Return the LinuxAioState bound to this AioContext */
struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
+/* Setup the LuringState bound to this AioContext */
+LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
+
+/* Return the LuringState bound to this AioContext */
+LuringState *aio_get_linux_io_uring(AioContext *ctx);
/**
* aio_timer_new_with_attrs:
* @ctx: the aio context
@@ -484,59 +610,6 @@ static inline void aio_timer_init(AioContext *ctx,
int64_t aio_compute_timeout(AioContext *ctx);
/**
- * aio_disable_external:
- * @ctx: the aio context
- *
- * Disable the further processing of external clients.
- */
-static inline void aio_disable_external(AioContext *ctx)
-{
- atomic_inc(&ctx->external_disable_cnt);
-}
-
-/**
- * aio_enable_external:
- * @ctx: the aio context
- *
- * Enable the processing of external clients.
- */
-static inline void aio_enable_external(AioContext *ctx)
-{
- int old;
-
- old = atomic_fetch_dec(&ctx->external_disable_cnt);
- assert(old > 0);
- if (old == 1) {
- /* Kick event loop so it re-arms file descriptors */
- aio_notify(ctx);
- }
-}
-
-/**
- * aio_external_disabled:
- * @ctx: the aio context
- *
- * Return true if the external clients are disabled.
- */
-static inline bool aio_external_disabled(AioContext *ctx)
-{
- return atomic_read(&ctx->external_disable_cnt);
-}
-
-/**
- * aio_node_check:
- * @ctx: the aio context
- * @is_external: Whether or not the checked node is an external event source.
- *
- * Check if the node's is_external flag is okay to be polled by the ctx at this
- * moment. True means green light.
- */
-static inline bool aio_node_check(AioContext *ctx, bool is_external)
-{
- return !is_external || !atomic_read(&ctx->external_disable_cnt);
-}
-
-/**
* aio_co_schedule:
* @ctx: the aio context
* @co: the coroutine
@@ -548,7 +621,16 @@ static inline bool aio_node_check(AioContext *ctx, bool is_external)
* is the context in which the coroutine is running (i.e. the value of
* qemu_get_current_aio_context() from the coroutine itself).
*/
-void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
+void aio_co_schedule(AioContext *ctx, Coroutine *co);
+
+/**
+ * aio_co_reschedule_self:
+ * @new_ctx: the new context
+ *
+ * Move the currently running coroutine to new_ctx. If the coroutine is already
+ * running in new_ctx, do nothing.
+ */
+void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
/**
* aio_co_wake:
@@ -562,7 +644,7 @@ void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
* context. The coroutine must not be entered by anyone else while
* aio_co_wake() is active.
*/
-void aio_co_wake(struct Coroutine *co);
+void aio_co_wake(Coroutine *co);
/**
* aio_co_enter:
@@ -571,28 +653,18 @@ void aio_co_wake(struct Coroutine *co);
*
* Enter a coroutine in the specified AioContext.
*/
-void aio_co_enter(AioContext *ctx, struct Coroutine *co);
+void aio_co_enter(AioContext *ctx, Coroutine *co);
/**
* Return the AioContext whose event loop runs in the current thread.
*
* If called from an IOThread this will be the IOThread's AioContext. If
- * called from another thread it will be the main loop AioContext.
+ * called from the main thread or with the "big QEMU lock" taken it
+ * will be the main loop AioContext.
*/
AioContext *qemu_get_current_aio_context(void);
-/**
- * in_aio_context_home_thread:
- * @ctx: the aio context
- *
- * Return whether we are running in the thread that normally runs @ctx. Note
- * that acquiring/releasing ctx does not affect the outcome, each AioContext
- * still only has one home thread that is responsible for running it.
- */
-static inline bool in_aio_context_home_thread(AioContext *ctx)
-{
- return ctx == qemu_get_current_aio_context();
-}
+void qemu_set_current_aio_context(AioContext *ctx);
/**
* aio_context_setup:
@@ -610,6 +682,9 @@ void aio_context_setup(AioContext *ctx);
*/
void aio_context_destroy(AioContext *ctx);
+/* Used internally, do not call outside AioContext code */
+void aio_context_use_g_source(AioContext *ctx);
+
/**
* aio_context_set_poll_params:
* @ctx: the aio context
@@ -623,4 +698,20 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
int64_t grow, int64_t shrink,
Error **errp);
+/**
+ * aio_context_set_aio_params:
+ * @ctx: the aio context
+ * @max_batch: maximum number of requests in a batch, 0 means that the
+ * engine will use its default
+ */
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch);
+
+/**
+ * aio_context_set_thread_pool_params:
+ * @ctx: the aio context
+ * @min: min number of threads to have readily available in the thread pool
+ * @min: max number of threads the thread pool can contain
+ */
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
+ int64_t max, Error **errp);
#endif
diff --git a/include/block/aio_task.h b/include/block/aio_task.h
new file mode 100644
index 0000000000..18a9c41f4e
--- /dev/null
+++ b/include/block/aio_task.h
@@ -0,0 +1,52 @@
+/*
+ * Aio tasks loops
+ *
+ * Copyright (c) 2019 Virtuozzo International GmbH.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef BLOCK_AIO_TASK_H
+#define BLOCK_AIO_TASK_H
+
+typedef struct AioTaskPool AioTaskPool;
+typedef struct AioTask AioTask;
+typedef int coroutine_fn (*AioTaskFunc)(AioTask *task);
+struct AioTask {
+ AioTaskPool *pool;
+ AioTaskFunc func;
+ int ret;
+};
+
+AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks);
+void aio_task_pool_free(AioTaskPool *);
+
+/* error code of failed task or 0 if all is OK */
+int aio_task_pool_status(AioTaskPool *pool);
+
+bool aio_task_pool_empty(AioTaskPool *pool);
+
+/* User provides filled @task, however task->pool will be set automatically */
+void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task);
+
+void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool);
+void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool);
+void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool);
+
+#endif /* BLOCK_AIO_TASK_H */
diff --git a/include/block/block-common.h b/include/block/block-common.h
new file mode 100644
index 0000000000..a846023a09
--- /dev/null
+++ b/include/block/block-common.h
@@ -0,0 +1,565 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_COMMON_H
+#define BLOCK_COMMON_H
+
+#include "qapi/qapi-types-block-core.h"
+#include "qemu/queue.h"
+
+/*
+ * co_wrapper{*}: Function specifiers used by block-coroutine-wrapper.py
+ *
+ * Function specifiers, which do nothing but mark functions to be
+ * generated by scripts/block-coroutine-wrapper.py
+ *
+ * Usage: read docs/devel/block-coroutine-wrapper.rst
+ *
+ * There are 4 kind of specifiers:
+ * - co_wrapper functions can be called by only non-coroutine context, because
+ * they always generate a new coroutine.
+ * - co_wrapper_mixed functions can be called by both coroutine and
+ * non-coroutine context.
+ * - co_wrapper_bdrv_rdlock are co_wrapper functions but automatically take and
+ * release the graph rdlock when creating a new coroutine
+ * - co_wrapper_mixed_bdrv_rdlock are co_wrapper_mixed functions but
+ * automatically take and release the graph rdlock when creating a new
+ * coroutine.
+ *
+ * These functions should not be called from a coroutine_fn; instead,
+ * call the wrapped function directly.
+ */
+#define co_wrapper no_coroutine_fn
+#define co_wrapper_mixed no_coroutine_fn coroutine_mixed_fn
+#define co_wrapper_bdrv_rdlock no_coroutine_fn
+#define co_wrapper_mixed_bdrv_rdlock no_coroutine_fn coroutine_mixed_fn
+
+/*
+ * no_co_wrapper: Function specifier used by block-coroutine-wrapper.py
+ *
+ * Function specifier which does nothing but mark functions to be generated by
+ * scripts/block-coroutine-wrapper.py.
+ *
+ * A no_co_wrapper function declaration creates a coroutine_fn wrapper around
+ * functions that must not be called in coroutine context. It achieves this by
+ * scheduling a BH in the bottom half that runs the respective non-coroutine
+ * function. The coroutine yields after scheduling the BH and is reentered when
+ * the wrapped function returns.
+ *
+ * A no_co_wrapper_bdrv_rdlock function is a no_co_wrapper function that
+ * automatically takes the graph rdlock when calling the wrapped function. In
+ * the same way, no_co_wrapper_bdrv_wrlock functions automatically take the
+ * graph wrlock.
+ */
+#define no_co_wrapper
+#define no_co_wrapper_bdrv_rdlock
+#define no_co_wrapper_bdrv_wrlock
+
+#include "block/blockjob.h"
+
+/* block.c */
+typedef struct BlockDriver BlockDriver;
+typedef struct BdrvChild BdrvChild;
+typedef struct BdrvChildClass BdrvChildClass;
+
+typedef enum BlockZoneOp {
+ BLK_ZO_OPEN,
+ BLK_ZO_CLOSE,
+ BLK_ZO_FINISH,
+ BLK_ZO_RESET,
+} BlockZoneOp;
+
+typedef enum BlockZoneModel {
+ BLK_Z_NONE = 0x0, /* Regular block device */
+ BLK_Z_HM = 0x1, /* Host-managed zoned block device */
+ BLK_Z_HA = 0x2, /* Host-aware zoned block device */
+} BlockZoneModel;
+
+typedef enum BlockZoneState {
+ BLK_ZS_NOT_WP = 0x0,
+ BLK_ZS_EMPTY = 0x1,
+ BLK_ZS_IOPEN = 0x2,
+ BLK_ZS_EOPEN = 0x3,
+ BLK_ZS_CLOSED = 0x4,
+ BLK_ZS_RDONLY = 0xD,
+ BLK_ZS_FULL = 0xE,
+ BLK_ZS_OFFLINE = 0xF,
+} BlockZoneState;
+
+typedef enum BlockZoneType {
+ BLK_ZT_CONV = 0x1, /* Conventional random writes supported */
+ BLK_ZT_SWR = 0x2, /* Sequential writes required */
+ BLK_ZT_SWP = 0x3, /* Sequential writes preferred */
+} BlockZoneType;
+
+/*
+ * Zone descriptor data structure.
+ * Provides information on a zone with all position and size values in bytes.
+ */
+typedef struct BlockZoneDescriptor {
+ uint64_t start;
+ uint64_t length;
+ uint64_t cap;
+ uint64_t wp;
+ BlockZoneType type;
+ BlockZoneState state;
+} BlockZoneDescriptor;
+
+/*
+ * Track write pointers of a zone in bytes.
+ */
+typedef struct BlockZoneWps {
+ CoMutex colock;
+ uint64_t wp[];
+} BlockZoneWps;
+
+typedef struct BlockDriverInfo {
+ /* in bytes, 0 if irrelevant */
+ int cluster_size;
+ /*
+ * A fraction of cluster_size, if supported (currently QCOW2 only); if
+ * disabled or unsupported, set equal to cluster_size.
+ */
+ int subcluster_size;
+ /* offset at which the VM state can be saved (0 if not possible) */
+ int64_t vm_state_offset;
+ bool is_dirty;
+ /*
+ * True if this block driver only supports compressed writes
+ */
+ bool needs_compressed_writes;
+} BlockDriverInfo;
+
+typedef struct BlockFragInfo {
+ uint64_t allocated_clusters;
+ uint64_t total_clusters;
+ uint64_t fragmented_clusters;
+ uint64_t compressed_clusters;
+} BlockFragInfo;
+
+typedef enum {
+ BDRV_REQ_COPY_ON_READ = 0x1,
+ BDRV_REQ_ZERO_WRITE = 0x2,
+
+ /*
+ * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate
+ * that the block driver should unmap (discard) blocks if it is guaranteed
+ * that the result will read back as zeroes. The flag is only passed to the
+ * driver if the block device is opened with BDRV_O_UNMAP.
+ */
+ BDRV_REQ_MAY_UNMAP = 0x4,
+
+ /*
+ * An optimization hint when all QEMUIOVector elements are within
+ * previously registered bdrv_register_buf() memory ranges.
+ *
+ * Code that replaces the user's QEMUIOVector elements with bounce buffers
+ * must take care to clear this flag.
+ */
+ BDRV_REQ_REGISTERED_BUF = 0x8,
+
+ BDRV_REQ_FUA = 0x10,
+ BDRV_REQ_WRITE_COMPRESSED = 0x20,
+
+ /*
+ * Signifies that this write request will not change the visible disk
+ * content.
+ */
+ BDRV_REQ_WRITE_UNCHANGED = 0x40,
+
+ /*
+ * Forces request serialisation. Use only with write requests.
+ */
+ BDRV_REQ_SERIALISING = 0x80,
+
+ /*
+ * Execute the request only if the operation can be offloaded or otherwise
+ * be executed efficiently, but return an error instead of using a slow
+ * fallback.
+ */
+ BDRV_REQ_NO_FALLBACK = 0x100,
+
+ /*
+ * BDRV_REQ_PREFETCH makes sense only in the context of copy-on-read
+ * (i.e., together with the BDRV_REQ_COPY_ON_READ flag or when a COR
+ * filter is involved), in which case it signals that the COR operation
+ * need not read the data into memory (qiov) but only ensure they are
+ * copied to the top layer (i.e., that COR operation is done).
+ */
+ BDRV_REQ_PREFETCH = 0x200,
+
+ /*
+ * If we need to wait for other requests, just fail immediately. Used
+ * only together with BDRV_REQ_SERIALISING. Used only with requests aligned
+ * to request_alignment (corresponding assertions are in block/io.c).
+ */
+ BDRV_REQ_NO_WAIT = 0x400,
+
+ /* Mask of valid flags */
+ BDRV_REQ_MASK = 0x7ff,
+} BdrvRequestFlags;
+
+#define BDRV_O_NO_SHARE 0x0001 /* don't share permissions */
+#define BDRV_O_RDWR 0x0002
+#define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */
+#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save
+ writes in a snapshot */
+#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
+#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
+#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the
+ thread pool */
+#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
+#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
+#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
+#define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
+#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
+#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
+#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
+#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
+ select an appropriate protocol driver,
+ ignoring the format layer */
+#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
+#define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening
+ read-write fails */
+#define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */
+
+#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
+
+
+/* Option names of options parsed by the block layer */
+
+#define BDRV_OPT_CACHE_WB "cache.writeback"
+#define BDRV_OPT_CACHE_DIRECT "cache.direct"
+#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
+#define BDRV_OPT_READ_ONLY "read-only"
+#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
+#define BDRV_OPT_DISCARD "discard"
+#define BDRV_OPT_FORCE_SHARE "force-share"
+
+
+#define BDRV_SECTOR_BITS 9
+#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
+
+/*
+ * Get the first most significant bit of wp. If it is zero, then
+ * the zone type is SWR.
+ */
+#define BDRV_ZT_IS_CONV(wp) (wp & (1ULL << 63))
+
+#define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
+ INT_MAX >> BDRV_SECTOR_BITS)
+#define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
+
+/*
+ * We want allow aligning requests and disk length up to any 32bit alignment
+ * and don't afraid of overflow.
+ * To achieve it, and in the same time use some pretty number as maximum disk
+ * size, let's define maximum "length" (a limit for any offset/bytes request and
+ * for disk size) to be the greatest power of 2 less than INT64_MAX.
+ */
+#define BDRV_MAX_ALIGNMENT (1L << 30)
+#define BDRV_MAX_LENGTH (QEMU_ALIGN_DOWN(INT64_MAX, BDRV_MAX_ALIGNMENT))
+
+/*
+ * Allocation status flags for bdrv_block_status() and friends.
+ *
+ * Public flags:
+ * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer
+ * BDRV_BLOCK_ZERO: offset reads as zero
+ * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data
+ * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
+ * layer rather than any backing, set by block layer
+ * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this
+ * layer, set by block layer
+ * BDRV_BLOCK_COMPRESSED: the underlying data is compressed; only valid for
+ * the formats supporting compression: qcow, qcow2
+ *
+ * Internal flags:
+ * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request
+ * that the block layer recompute the answer from the returned
+ * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID.
+ * BDRV_BLOCK_RECURSE: request that the block layer will recursively search for
+ * zeroes in file child of current block node inside
+ * returned region. Only valid together with both
+ * BDRV_BLOCK_DATA and BDRV_BLOCK_OFFSET_VALID. Should not
+ * appear with BDRV_BLOCK_ZERO.
+ *
+ * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the
+ * host offset within the returned BDS that is allocated for the
+ * corresponding raw guest data. However, whether that offset
+ * actually contains data also depends on BDRV_BLOCK_DATA, as follows:
+ *
+ * DATA ZERO OFFSET_VALID
+ * t t t sectors read as zero, returned file is zero at offset
+ * t f t sectors read as valid from file at offset
+ * f t t sectors preallocated, read as zero, returned file not
+ * necessarily zero at offset
+ * f f t sectors preallocated but read from backing_hd,
+ * returned file contains garbage at offset
+ * t t f sectors preallocated, read as zero, unknown offset
+ * t f f sectors read from unknown file or offset
+ * f t f not allocated or unknown offset, read as zero
+ * f f f not allocated or unknown offset, read from backing_hd
+ */
+#define BDRV_BLOCK_DATA 0x01
+#define BDRV_BLOCK_ZERO 0x02
+#define BDRV_BLOCK_OFFSET_VALID 0x04
+#define BDRV_BLOCK_RAW 0x08
+#define BDRV_BLOCK_ALLOCATED 0x10
+#define BDRV_BLOCK_EOF 0x20
+#define BDRV_BLOCK_RECURSE 0x40
+#define BDRV_BLOCK_COMPRESSED 0x80
+
+typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
+
+typedef struct BDRVReopenState {
+ BlockDriverState *bs;
+ int flags;
+ BlockdevDetectZeroesOptions detect_zeroes;
+ bool backing_missing;
+ BlockDriverState *old_backing_bs; /* keep pointer for permissions update */
+ BlockDriverState *old_file_bs; /* keep pointer for permissions update */
+ QDict *options;
+ QDict *explicit_options;
+ void *opaque;
+} BDRVReopenState;
+
+/*
+ * Block operation types
+ */
+typedef enum BlockOpType {
+ BLOCK_OP_TYPE_BACKUP_SOURCE,
+ BLOCK_OP_TYPE_BACKUP_TARGET,
+ BLOCK_OP_TYPE_CHANGE,
+ BLOCK_OP_TYPE_COMMIT_SOURCE,
+ BLOCK_OP_TYPE_COMMIT_TARGET,
+ BLOCK_OP_TYPE_DATAPLANE,
+ BLOCK_OP_TYPE_DRIVE_DEL,
+ BLOCK_OP_TYPE_EJECT,
+ BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
+ BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
+ BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
+ BLOCK_OP_TYPE_MIRROR_SOURCE,
+ BLOCK_OP_TYPE_MIRROR_TARGET,
+ BLOCK_OP_TYPE_RESIZE,
+ BLOCK_OP_TYPE_STREAM,
+ BLOCK_OP_TYPE_REPLACE,
+ BLOCK_OP_TYPE_MAX,
+} BlockOpType;
+
+/* Block node permission constants */
+enum {
+ /**
+ * A user that has the "permission" of consistent reads is guaranteed that
+ * their view of the contents of the block device is complete and
+ * self-consistent, representing the contents of a disk at a specific
+ * point.
+ *
+ * For most block devices (including their backing files) this is true, but
+ * the property cannot be maintained in a few situations like for
+ * intermediate nodes of a commit block job.
+ */
+ BLK_PERM_CONSISTENT_READ = 0x01,
+
+ /** This permission is required to change the visible disk contents. */
+ BLK_PERM_WRITE = 0x02,
+
+ /**
+ * This permission (which is weaker than BLK_PERM_WRITE) is both enough and
+ * required for writes to the block node when the caller promises that
+ * the visible disk content doesn't change.
+ *
+ * As the BLK_PERM_WRITE permission is strictly stronger, either is
+ * sufficient to perform an unchanging write.
+ */
+ BLK_PERM_WRITE_UNCHANGED = 0x04,
+
+ /** This permission is required to change the size of a block node. */
+ BLK_PERM_RESIZE = 0x08,
+
+ /**
+ * There was a now-removed bit BLK_PERM_GRAPH_MOD, with value of 0x10. QEMU
+ * 6.1 and earlier may still lock the corresponding byte in block/file-posix
+ * locking. So, implementing some new permission should be very careful to
+ * not interfere with this old unused thing.
+ */
+
+ BLK_PERM_ALL = 0x0f,
+
+ DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ
+ | BLK_PERM_WRITE
+ | BLK_PERM_WRITE_UNCHANGED
+ | BLK_PERM_RESIZE,
+
+ DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH,
+};
+
+/*
+ * Flags that parent nodes assign to child nodes to specify what kind of
+ * role(s) they take.
+ *
+ * At least one of DATA, METADATA, FILTERED, or COW must be set for
+ * every child.
+ *
+ *
+ * = Connection with bs->children, bs->file and bs->backing fields =
+ *
+ * 1. Filters
+ *
+ * Filter drivers have drv->is_filter = true.
+ *
+ * Filter node has exactly one FILTERED|PRIMARY child, and may have other
+ * children which must not have these bits (one example is the
+ * copy-before-write filter, which also has its target DATA child).
+ *
+ * Filter nodes never have COW children.
+ *
+ * For most filters, the filtered child is linked in bs->file, bs->backing is
+ * NULL. For some filters (as an exception), it is the other way around; those
+ * drivers will have drv->filtered_child_is_backing set to true (see that
+ * field’s documentation for what drivers this concerns)
+ *
+ * 2. "raw" driver (block/raw-format.c)
+ *
+ * Formally it's not a filter (drv->is_filter = false)
+ *
+ * bs->backing is always NULL
+ *
+ * Only has one child, linked in bs->file. Its role is either FILTERED|PRIMARY
+ * (like filter) or DATA|PRIMARY depending on options.
+ *
+ * 3. Other drivers
+ *
+ * Don't have any FILTERED children.
+ *
+ * May have at most one COW child. In this case it's linked in bs->backing.
+ * Otherwise bs->backing is NULL. COW child is never PRIMARY.
+ *
+ * May have at most one PRIMARY child. In this case it's linked in bs->file.
+ * Otherwise bs->file is NULL.
+ *
+ * May also have some other children that don't have the PRIMARY or COW bit set.
+ */
+enum BdrvChildRoleBits {
+ /*
+ * This child stores data.
+ * Any node may have an arbitrary number of such children.
+ */
+ BDRV_CHILD_DATA = (1 << 0),
+
+ /*
+ * This child stores metadata.
+ * Any node may have an arbitrary number of metadata-storing
+ * children.
+ */
+ BDRV_CHILD_METADATA = (1 << 1),
+
+ /*
+ * A child that always presents exactly the same visible data as
+ * the parent, e.g. by virtue of the parent forwarding all reads
+ * and writes.
+ * This flag is mutually exclusive with DATA, METADATA, and COW.
+ * Any node may have at most one filtered child at a time.
+ */
+ BDRV_CHILD_FILTERED = (1 << 2),
+
+ /*
+ * Child from which to read all data that isn't allocated in the
+ * parent (i.e., the backing child); such data is copied to the
+ * parent through COW (and optionally COR).
+ * This field is mutually exclusive with DATA, METADATA, and
+ * FILTERED.
+ * Any node may have at most one such backing child at a time.
+ */
+ BDRV_CHILD_COW = (1 << 3),
+
+ /*
+ * The primary child. For most drivers, this is the child whose
+ * filename applies best to the parent node.
+ * Any node may have at most one primary child at a time.
+ */
+ BDRV_CHILD_PRIMARY = (1 << 4),
+
+ /* Useful combination of flags */
+ BDRV_CHILD_IMAGE = BDRV_CHILD_DATA
+ | BDRV_CHILD_METADATA
+ | BDRV_CHILD_PRIMARY,
+};
+
+/* Mask of BdrvChildRoleBits values */
+typedef unsigned int BdrvChildRole;
+
+typedef struct BdrvCheckResult {
+ int corruptions;
+ int leaks;
+ int check_errors;
+ int corruptions_fixed;
+ int leaks_fixed;
+ int64_t image_end_offset;
+ BlockFragInfo bfi;
+} BdrvCheckResult;
+
+typedef enum {
+ BDRV_FIX_LEAKS = 1,
+ BDRV_FIX_ERRORS = 2,
+} BdrvCheckMode;
+
+typedef struct BlockSizes {
+ uint32_t phys;
+ uint32_t log;
+} BlockSizes;
+
+typedef struct HDGeometry {
+ uint32_t heads;
+ uint32_t sectors;
+ uint32_t cylinders;
+} HDGeometry;
+
+/*
+ * Common functions that are neither I/O nor Global State.
+ *
+ * These functions must never call any function from other categories
+ * (I/O, "I/O or GS", Global State) except this one, but can be invoked by
+ * all of them.
+ */
+
+char *bdrv_perm_names(uint64_t perm);
+uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm);
+
+void bdrv_init_with_whitelist(void);
+bool bdrv_uses_whitelist(void);
+int bdrv_is_whitelisted(BlockDriver *drv, bool read_only);
+
+int bdrv_parse_aio(const char *mode, int *flags);
+int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);
+int bdrv_parse_discard_flags(const char *mode, int *flags);
+
+int path_has_protocol(const char *path);
+int path_is_absolute(const char *path);
+char *path_combine(const char *base_path, const char *filename);
+
+char *bdrv_get_full_backing_filename_from_filename(const char *backed,
+ const char *backing,
+ Error **errp);
+
+#endif /* BLOCK_COMMON_H */
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
new file mode 100644
index 0000000000..0700953ab8
--- /dev/null
+++ b/include/block/block-copy.h
@@ -0,0 +1,101 @@
+/*
+ * block_copy API
+ *
+ * Copyright (C) 2013 Proxmox Server Solutions
+ * Copyright (c) 2019 Virtuozzo International GmbH.
+ *
+ * Authors:
+ * Dietmar Maurer (dietmar@proxmox.com)
+ * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef BLOCK_COPY_H
+#define BLOCK_COPY_H
+
+#include "block/block-common.h"
+#include "qemu/progress_meter.h"
+
+/* All APIs are thread-safe */
+
+typedef void (*BlockCopyAsyncCallbackFunc)(void *opaque);
+typedef struct BlockCopyState BlockCopyState;
+typedef struct BlockCopyCallState BlockCopyCallState;
+
+BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
+ const BdrvDirtyBitmap *bitmap,
+ Error **errp);
+
+/* Function should be called prior any actual copy request */
+void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
+ bool compress);
+void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
+
+void block_copy_state_free(BlockCopyState *s);
+
+void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes);
+
+int64_t coroutine_fn GRAPH_RDLOCK
+block_copy_reset_unallocated(BlockCopyState *s, int64_t offset, int64_t *count);
+
+int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
+ bool ignore_ratelimit, uint64_t timeout_ns,
+ BlockCopyAsyncCallbackFunc cb,
+ void *cb_opaque);
+
+/*
+ * Run block-copy in a coroutine, create corresponding BlockCopyCallState
+ * object and return pointer to it. Never returns NULL.
+ *
+ * Caller is responsible to call block_copy_call_free() to free
+ * BlockCopyCallState object.
+ *
+ * @max_workers means maximum of parallel coroutines to execute sub-requests,
+ * must be > 0.
+ *
+ * @max_chunk means maximum length for one IO operation. Zero means unlimited.
+ */
+BlockCopyCallState *block_copy_async(BlockCopyState *s,
+ int64_t offset, int64_t bytes,
+ int max_workers, int64_t max_chunk,
+ BlockCopyAsyncCallbackFunc cb,
+ void *cb_opaque);
+
+/*
+ * Free finished BlockCopyCallState. Trying to free running
+ * block-copy will crash.
+ */
+void block_copy_call_free(BlockCopyCallState *call_state);
+
+/*
+ * Note, that block-copy call is marked finished prior to calling
+ * the callback.
+ */
+bool block_copy_call_finished(BlockCopyCallState *call_state);
+bool block_copy_call_succeeded(BlockCopyCallState *call_state);
+bool block_copy_call_failed(BlockCopyCallState *call_state);
+bool block_copy_call_cancelled(BlockCopyCallState *call_state);
+int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read);
+
+void block_copy_set_speed(BlockCopyState *s, uint64_t speed);
+void block_copy_kick(BlockCopyCallState *call_state);
+
+/*
+ * Cancel running block-copy call.
+ *
+ * Cancel leaves block-copy state valid: dirty bits are correct and you may use
+ * cancel + <run block_copy with same parameters> to emulate pause/resume.
+ *
+ * Note also, that the cancel is async: it only marks block-copy call to be
+ * cancelled. So, the call may be cancelled (block_copy_call_cancelled() reports
+ * true) but not yet finished (block_copy_call_finished() reports false).
+ */
+void block_copy_call_cancel(BlockCopyCallState *call_state);
+
+BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s);
+int64_t block_copy_cluster_size(BlockCopyState *s);
+void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip);
+
+#endif /* BLOCK_COPY_H */
diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h
new file mode 100644
index 0000000000..bd7cecd1cf
--- /dev/null
+++ b/include/block/block-global-state.h
@@ -0,0 +1,305 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_GLOBAL_STATE_H
+#define BLOCK_GLOBAL_STATE_H
+
+#include "block/block-common.h"
+#include "qemu/coroutine.h"
+#include "qemu/transactions.h"
+
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * If a function modifies the graph, it also uses the graph lock to be sure it
+ * has unique access. The graph lock is needed together with BQL because of the
+ * thread-safe I/O API that concurrently runs and accesses the graph without
+ * the BQL.
+ *
+ * It is important to note that not all of these functions are
+ * necessarily limited to running under the BQL, but they would
+ * require additional auditing and many small thread-safety changes
+ * to move them into the I/O API. Often it's not worth doing that
+ * work since the APIs are only used with the BQL held at the
+ * moment, so they have been placed in the GS API (for now).
+ *
+ * These functions can call any function from this and other categories
+ * (I/O, "I/O or GS", Common), but must be invoked only by other GS APIs.
+ *
+ * All functions in this header must use the macro
+ * GLOBAL_STATE_CODE();
+ * to catch when they are accidentally called without the BQL.
+ */
+
+void bdrv_init(void);
+BlockDriver *bdrv_find_protocol(const char *filename,
+ bool allow_protocol_prefix,
+ Error **errp);
+BlockDriver *bdrv_find_format(const char *format_name);
+
+int coroutine_fn GRAPH_UNLOCKED
+bdrv_co_create(BlockDriver *drv, const char *filename, QemuOpts *opts,
+ Error **errp);
+
+int co_wrapper bdrv_create(BlockDriver *drv, const char *filename,
+ QemuOpts *opts, Error **errp);
+
+int coroutine_fn GRAPH_UNLOCKED
+bdrv_co_create_file(const char *filename, QemuOpts *opts, Error **errp);
+
+BlockDriverState *bdrv_new(void);
+int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
+ Error **errp);
+
+int GRAPH_WRLOCK
+bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, Error **errp);
+
+int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
+ Error **errp);
+BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options,
+ int flags, Error **errp);
+int bdrv_drop_filter(BlockDriverState *bs, Error **errp);
+
+BdrvChild * no_coroutine_fn
+bdrv_open_child(const char *filename, QDict *options, const char *bdref_key,
+ BlockDriverState *parent, const BdrvChildClass *child_class,
+ BdrvChildRole child_role, bool allow_none, Error **errp);
+
+BdrvChild * coroutine_fn no_co_wrapper
+bdrv_co_open_child(const char *filename, QDict *options, const char *bdref_key,
+ BlockDriverState *parent, const BdrvChildClass *child_class,
+ BdrvChildRole child_role, bool allow_none, Error **errp);
+
+int bdrv_open_file_child(const char *filename,
+ QDict *options, const char *bdref_key,
+ BlockDriverState *parent, Error **errp);
+
+BlockDriverState * no_coroutine_fn
+bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp);
+
+BlockDriverState * coroutine_fn no_co_wrapper
+bdrv_co_open_blockdev_ref(BlockdevRef *ref, Error **errp);
+
+int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
+ Error **errp);
+int GRAPH_WRLOCK
+bdrv_set_backing_hd_drained(BlockDriverState *bs, BlockDriverState *backing_hd,
+ Error **errp);
+
+int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
+ const char *bdref_key, Error **errp);
+
+BlockDriverState * no_coroutine_fn
+bdrv_open(const char *filename, const char *reference, QDict *options,
+ int flags, Error **errp);
+
+BlockDriverState * coroutine_fn no_co_wrapper
+bdrv_co_open(const char *filename, const char *reference,
+ QDict *options, int flags, Error **errp);
+
+BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv,
+ const char *node_name,
+ QDict *options, int flags,
+ Error **errp);
+BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
+ int flags, Error **errp);
+BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
+ BlockDriverState *bs, QDict *options,
+ bool keep_old_opts);
+void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue);
+int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
+int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
+ Error **errp);
+int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
+ Error **errp);
+BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
+ const char *backing_file);
+void GRAPH_RDLOCK bdrv_refresh_filename(BlockDriverState *bs);
+
+void GRAPH_RDLOCK
+bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp);
+
+int bdrv_commit(BlockDriverState *bs);
+int GRAPH_RDLOCK bdrv_make_empty(BdrvChild *c, Error **errp);
+
+void bdrv_register(BlockDriver *bdrv);
+int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
+ const char *backing_file_str,
+ bool backing_mask_protocol);
+
+BlockDriverState * GRAPH_RDLOCK
+bdrv_find_overlay(BlockDriverState *active, BlockDriverState *bs);
+
+BlockDriverState * GRAPH_RDLOCK bdrv_find_base(BlockDriverState *bs);
+
+int GRAPH_RDLOCK
+bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base,
+ Error **errp);
+void GRAPH_RDLOCK
+bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base);
+
+/*
+ * The units of offset and total_work_size may be chosen arbitrarily by the
+ * block driver; total_work_size may change during the course of the amendment
+ * operation
+ */
+typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
+ int64_t total_work_size, void *opaque);
+int GRAPH_RDLOCK
+bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
+ BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
+ bool force, Error **errp);
+
+/* check if a named node can be replaced when doing drive-mirror */
+BlockDriverState * GRAPH_RDLOCK
+check_to_replace_node(BlockDriverState *parent_bs, const char *node_name,
+ Error **errp);
+
+int no_coroutine_fn GRAPH_RDLOCK
+bdrv_activate(BlockDriverState *bs, Error **errp);
+
+int coroutine_fn no_co_wrapper_bdrv_rdlock
+bdrv_co_activate(BlockDriverState *bs, Error **errp);
+
+void bdrv_activate_all(Error **errp);
+int bdrv_inactivate_all(void);
+
+int bdrv_flush_all(void);
+void bdrv_close_all(void);
+void bdrv_drain_all_begin(void);
+void bdrv_drain_all_begin_nopoll(void);
+void bdrv_drain_all_end(void);
+void bdrv_drain_all(void);
+
+void bdrv_aio_cancel(BlockAIOCB *acb);
+
+int bdrv_has_zero_init_1(BlockDriverState *bs);
+int coroutine_mixed_fn GRAPH_RDLOCK bdrv_has_zero_init(BlockDriverState *bs);
+BlockDriverState *bdrv_find_node(const char *node_name);
+BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp);
+XDbgBlockGraph * GRAPH_RDLOCK bdrv_get_xdbg_block_graph(Error **errp);
+BlockDriverState *bdrv_lookup_bs(const char *device,
+ const char *node_name,
+ Error **errp);
+bool GRAPH_RDLOCK
+bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base);
+
+BlockDriverState *bdrv_next_node(BlockDriverState *bs);
+BlockDriverState *bdrv_next_all_states(BlockDriverState *bs);
+
+typedef struct BdrvNextIterator {
+ enum {
+ BDRV_NEXT_BACKEND_ROOTS,
+ BDRV_NEXT_MONITOR_OWNED,
+ } phase;
+ BlockBackend *blk;
+ BlockDriverState *bs;
+} BdrvNextIterator;
+
+BlockDriverState * GRAPH_RDLOCK bdrv_first(BdrvNextIterator *it);
+BlockDriverState * GRAPH_RDLOCK bdrv_next(BdrvNextIterator *it);
+void bdrv_next_cleanup(BdrvNextIterator *it);
+
+BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
+void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
+ void *opaque, bool read_only);
+
+char * GRAPH_RDLOCK
+bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp);
+
+char * GRAPH_RDLOCK bdrv_dirname(BlockDriverState *bs, Error **errp);
+
+void bdrv_img_create(const char *filename, const char *fmt,
+ const char *base_filename, const char *base_fmt,
+ char *options, uint64_t img_size, int flags,
+ bool quiet, Error **errp);
+
+void bdrv_ref(BlockDriverState *bs);
+void no_coroutine_fn bdrv_unref(BlockDriverState *bs);
+void coroutine_fn no_co_wrapper bdrv_co_unref(BlockDriverState *bs);
+void GRAPH_WRLOCK bdrv_schedule_unref(BlockDriverState *bs);
+
+void GRAPH_WRLOCK
+bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
+
+void coroutine_fn no_co_wrapper_bdrv_wrlock
+bdrv_co_unref_child(BlockDriverState *parent, BdrvChild *child);
+
+BdrvChild * GRAPH_WRLOCK
+bdrv_attach_child(BlockDriverState *parent_bs,
+ BlockDriverState *child_bs,
+ const char *child_name,
+ const BdrvChildClass *child_class,
+ BdrvChildRole child_role,
+ Error **errp);
+
+bool GRAPH_RDLOCK
+bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
+
+void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
+void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
+void bdrv_op_block_all(BlockDriverState *bs, Error *reason);
+void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason);
+bool bdrv_op_blocker_is_empty(BlockDriverState *bs);
+
+int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
+ const char *tag);
+int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
+int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
+bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
+
+bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx,
+ GHashTable *visited, Transaction *tran,
+ Error **errp);
+int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
+ BdrvChild *ignore_child, Error **errp);
+
+int GRAPH_RDLOCK bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
+int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
+
+void GRAPH_WRLOCK
+bdrv_add_child(BlockDriverState *parent, BlockDriverState *child, Error **errp);
+
+void GRAPH_WRLOCK
+bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
+
+/**
+ *
+ * bdrv_register_buf/bdrv_unregister_buf:
+ *
+ * Register/unregister a buffer for I/O. For example, VFIO drivers are
+ * interested to know the memory areas that would later be used for I/O, so
+ * that they can prepare IOMMU mapping etc., to get better performance.
+ *
+ * Buffers must not overlap and they must be unregistered with the same <host,
+ * size> values that they were registered with.
+ *
+ * Returns: true on success, false on failure
+ */
+bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
+ Error **errp);
+void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size);
+
+void bdrv_cancel_in_flight(BlockDriverState *bs);
+
+#endif /* BLOCK_GLOBAL_STATE_H */
diff --git a/include/block/block-hmp-cmds.h b/include/block/block-hmp-cmds.h
new file mode 100644
index 0000000000..71113cd7ef
--- /dev/null
+++ b/include/block/block-hmp-cmds.h
@@ -0,0 +1,56 @@
+/*
+ * HMP commands related to the block layer
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2020 Red Hat, Inc.
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef BLOCK_BLOCK_HMP_CMDS_H
+#define BLOCK_BLOCK_HMP_CMDS_H
+
+#include "qemu/coroutine.h"
+
+void hmp_drive_add(Monitor *mon, const QDict *qdict);
+
+void hmp_commit(Monitor *mon, const QDict *qdict);
+void hmp_drive_del(Monitor *mon, const QDict *qdict);
+
+void hmp_drive_mirror(Monitor *mon, const QDict *qdict);
+void hmp_drive_backup(Monitor *mon, const QDict *qdict);
+
+void hmp_block_job_set_speed(Monitor *mon, const QDict *qdict);
+void hmp_block_job_cancel(Monitor *mon, const QDict *qdict);
+void hmp_block_job_pause(Monitor *mon, const QDict *qdict);
+void hmp_block_job_resume(Monitor *mon, const QDict *qdict);
+void hmp_block_job_complete(Monitor *mon, const QDict *qdict);
+
+void hmp_snapshot_blkdev(Monitor *mon, const QDict *qdict);
+void hmp_snapshot_blkdev_internal(Monitor *mon, const QDict *qdict);
+void hmp_snapshot_delete_blkdev_internal(Monitor *mon, const QDict *qdict);
+
+void hmp_nbd_server_start(Monitor *mon, const QDict *qdict);
+void hmp_nbd_server_add(Monitor *mon, const QDict *qdict);
+void hmp_nbd_server_remove(Monitor *mon, const QDict *qdict);
+void hmp_nbd_server_stop(Monitor *mon, const QDict *qdict);
+
+void coroutine_fn hmp_block_resize(Monitor *mon, const QDict *qdict);
+void hmp_block_stream(Monitor *mon, const QDict *qdict);
+void hmp_block_passwd(Monitor *mon, const QDict *qdict);
+void hmp_block_set_io_throttle(Monitor *mon, const QDict *qdict);
+void hmp_eject(Monitor *mon, const QDict *qdict);
+
+void hmp_qemu_io(Monitor *mon, const QDict *qdict);
+
+void hmp_info_block(Monitor *mon, const QDict *qdict);
+void hmp_info_blockstats(Monitor *mon, const QDict *qdict);
+void hmp_info_block_jobs(Monitor *mon, const QDict *qdict);
+void hmp_info_snapshots(Monitor *mon, const QDict *qdict);
+
+#endif
diff --git a/include/block/block-io.h b/include/block/block-io.h
new file mode 100644
index 0000000000..b49e0537dd
--- /dev/null
+++ b/include/block/block-io.h
@@ -0,0 +1,455 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_IO_H
+#define BLOCK_IO_H
+
+#include "block/aio-wait.h"
+#include "block/block-common.h"
+#include "qemu/coroutine.h"
+#include "qemu/iov.h"
+
+/*
+ * I/O API functions. These functions are thread-safe, and therefore
+ * can run in any thread.
+ *
+ * These functions can only call functions from I/O and Common categories,
+ * but can be invoked by GS, "I/O or GS" and I/O APIs.
+ *
+ * All functions in this category must use the macro
+ * IO_CODE();
+ * to catch when they are accidentally called by the wrong API.
+ */
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, int64_t bytes,
+ BdrvRequestFlags flags);
+
+int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pread(BdrvChild *child, int64_t offset, int64_t bytes, void *buf,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pwrite(BdrvChild *child, int64_t offset,int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pwrite_sync(BdrvChild *child, int64_t offset, int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+/*
+ * Efficiently zero a region of the disk image. Note that this is a regular
+ * I/O request like read or write and should have a reasonable size. This
+ * function is not suitable for zeroing the entire image in a single request
+ * because it may allocate memory for the entire region.
+ */
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, int64_t bytes,
+ BdrvRequestFlags flags);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
+
+int64_t coroutine_fn GRAPH_RDLOCK bdrv_co_nb_sectors(BlockDriverState *bs);
+int64_t coroutine_mixed_fn bdrv_nb_sectors(BlockDriverState *bs);
+
+int64_t coroutine_fn GRAPH_RDLOCK bdrv_co_getlength(BlockDriverState *bs);
+int64_t co_wrapper_mixed_bdrv_rdlock bdrv_getlength(BlockDriverState *bs);
+
+int64_t coroutine_fn GRAPH_RDLOCK
+bdrv_co_get_allocated_file_size(BlockDriverState *bs);
+
+int64_t co_wrapper_bdrv_rdlock
+bdrv_get_allocated_file_size(BlockDriverState *bs);
+
+BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
+ BlockDriverState *in_bs, Error **errp);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_delete_file(BlockDriverState *bs, Error **errp);
+
+void coroutine_fn GRAPH_RDLOCK
+bdrv_co_delete_file_noerr(BlockDriverState *bs);
+
+
+/* async block I/O */
+void bdrv_aio_cancel_async(BlockAIOCB *acb);
+
+/* sg packet commands */
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
+
+/* Ensure contents are flushed to disk. */
+int coroutine_fn GRAPH_RDLOCK bdrv_co_flush(BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
+ int64_t bytes);
+
+/* Report zone information of zone block device. */
+int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_report(BlockDriverState *bs,
+ int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_mgmt(BlockDriverState *bs,
+ BlockZoneOp op,
+ int64_t offset, int64_t len);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_append(BlockDriverState *bs,
+ int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map, BlockDriverState **file);
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map, BlockDriverState **file);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_block_status_above(BlockDriverState *bs, BlockDriverState *base,
+ int64_t offset, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file);
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
+ int64_t offset, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
+ int64_t *pnum);
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
+ int64_t bytes, int64_t *pnum);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
+ bool include_base, int64_t offset, int64_t bytes,
+ int64_t *pnum);
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_is_allocated_above(BlockDriverState *bs, BlockDriverState *base,
+ bool include_base, int64_t offset,
+ int64_t bytes, int64_t *pnum);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+int GRAPH_RDLOCK
+bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
+ Error **errp);
+
+bool bdrv_is_read_only(BlockDriverState *bs);
+bool bdrv_is_writable(BlockDriverState *bs);
+bool bdrv_is_sg(BlockDriverState *bs);
+int bdrv_get_flags(BlockDriverState *bs);
+
+bool coroutine_fn GRAPH_RDLOCK bdrv_co_is_inserted(BlockDriverState *bs);
+bool co_wrapper_bdrv_rdlock bdrv_is_inserted(BlockDriverState *bs);
+
+void coroutine_fn GRAPH_RDLOCK
+bdrv_co_lock_medium(BlockDriverState *bs, bool locked);
+
+void coroutine_fn GRAPH_RDLOCK
+bdrv_co_eject(BlockDriverState *bs, bool eject_flag);
+
+const char *bdrv_get_format_name(BlockDriverState *bs);
+
+bool GRAPH_RDLOCK bdrv_supports_compressed_writes(BlockDriverState *bs);
+const char *bdrv_get_node_name(const BlockDriverState *bs);
+
+const char * GRAPH_RDLOCK
+bdrv_get_device_name(const BlockDriverState *bs);
+
+const char * GRAPH_RDLOCK
+bdrv_get_device_or_node_name(const BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
+
+ImageInfoSpecific * GRAPH_RDLOCK
+bdrv_get_specific_info(BlockDriverState *bs, Error **errp);
+
+BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
+void bdrv_round_to_subclusters(BlockDriverState *bs,
+ int64_t offset, int64_t bytes,
+ int64_t *cluster_offset,
+ int64_t *cluster_bytes);
+
+void bdrv_get_backing_filename(BlockDriverState *bs,
+ char *filename, int filename_size);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
+ const char *backing_fmt, bool warn);
+
+int co_wrapper_bdrv_rdlock
+bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file,
+ const char *backing_fmt, bool warn);
+
+int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
+ int64_t pos, int size);
+
+int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
+ int64_t pos, int size);
+
+/*
+ * Returns the alignment in bytes that is required so that no bounce buffer
+ * is required throughout the stack
+ */
+size_t bdrv_min_mem_align(BlockDriverState *bs);
+/* Returns optimal alignment in bytes for bounce buffer */
+size_t bdrv_opt_mem_align(BlockDriverState *bs);
+void *qemu_blockalign(BlockDriverState *bs, size_t size);
+void *qemu_blockalign0(BlockDriverState *bs, size_t size);
+void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
+void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
+
+void bdrv_enable_copy_on_read(BlockDriverState *bs);
+void bdrv_disable_copy_on_read(BlockDriverState *bs);
+
+void coroutine_fn GRAPH_RDLOCK
+bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event);
+
+void co_wrapper_mixed_bdrv_rdlock
+bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
+
+#define BLKDBG_CO_EVENT(child, evt) \
+ do { \
+ if (child) { \
+ bdrv_co_debug_event(child->bs, evt); \
+ } \
+ } while (0)
+
+#define BLKDBG_EVENT(child, evt) \
+ do { \
+ if (child) { \
+ bdrv_debug_event(child->bs, evt); \
+ } \
+ } while (0)
+
+/**
+ * bdrv_get_aio_context:
+ *
+ * Returns: the currently bound #AioContext
+ */
+AioContext *bdrv_get_aio_context(BlockDriverState *bs);
+
+AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c);
+
+/**
+ * Move the current coroutine to the AioContext of @bs and return the old
+ * AioContext of the coroutine. Increase bs->in_flight so that draining @bs
+ * will wait for the operation to proceed until the corresponding
+ * bdrv_co_leave().
+ *
+ * Consequently, you can't call drain inside a bdrv_co_enter/leave() section as
+ * this will deadlock.
+ */
+AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs);
+
+/**
+ * Ends a section started by bdrv_co_enter(). Move the current coroutine back
+ * to old_ctx and decrease bs->in_flight again.
+ */
+void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx);
+
+AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c);
+
+bool coroutine_fn GRAPH_RDLOCK
+bdrv_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
+ uint32_t granularity, Error **errp);
+bool co_wrapper_bdrv_rdlock
+bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
+ uint32_t granularity, Error **errp);
+
+/**
+ *
+ * bdrv_co_copy_range:
+ *
+ * Do offloaded copy between two children. If the operation is not implemented
+ * by the driver, or if the backend storage doesn't support it, a negative
+ * error code will be returned.
+ *
+ * Note: block layer doesn't emulate or fallback to a bounce buffer approach
+ * because usually the caller shouldn't attempt offloaded copy any more (e.g.
+ * calling copy_file_range(2)) after the first error, thus it should fall back
+ * to a read+write path in the caller level.
+ *
+ * @src: Source child to copy data from
+ * @src_offset: offset in @src image to read data
+ * @dst: Destination child to copy data to
+ * @dst_offset: offset in @dst image to write data
+ * @bytes: number of bytes to copy
+ * @flags: request flags. Supported flags:
+ * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
+ * write on @dst as if bdrv_co_pwrite_zeroes is
+ * called. Used to simplify caller code, or
+ * during BlockDriver.bdrv_co_copy_range_from()
+ * recursion.
+ * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
+ * requests currently in flight.
+ *
+ * Returns: 0 if succeeded; negative error code if failed.
+ **/
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+
+/*
+ * "I/O or GS" API functions. These functions can run without
+ * the BQL, but only in one specific iothread/main loop.
+ *
+ * More specifically, these functions use BDRV_POLL_WHILE(bs), which requires
+ * the caller to be either in the main thread or directly in the home thread
+ * that runs the bs AioContext. Calling them from another thread in another
+ * AioContext would cause deadlocks.
+ *
+ * Therefore, these functions are not proper I/O, because they
+ * can't run in *any* iothreads, but only in a specific one.
+ *
+ * These functions can call any function from I/O, Common and this
+ * categories, but must be invoked only by other "I/O or GS" and GS APIs.
+ *
+ * All functions in this category must use the macro
+ * IO_OR_GS_CODE();
+ * to catch when they are accidentally called by the wrong API.
+ */
+
+#define BDRV_POLL_WHILE(bs, cond) ({ \
+ BlockDriverState *bs_ = (bs); \
+ IO_OR_GS_CODE(); \
+ AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
+ cond); })
+
+void bdrv_drain(BlockDriverState *bs);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
+
+/* Invalidate any cached metadata used by image formats */
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
+
+int co_wrapper_mixed_bdrv_rdlock bdrv_flush(BlockDriverState *bs);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
+
+/**
+ * bdrv_parent_drained_begin_single:
+ *
+ * Begin a quiesced section for the parent of @c.
+ */
+void GRAPH_RDLOCK bdrv_parent_drained_begin_single(BdrvChild *c);
+
+/**
+ * bdrv_parent_drained_poll_single:
+ *
+ * Returns true if there is any pending activity to cease before @c can be
+ * called quiesced, false otherwise.
+ */
+bool GRAPH_RDLOCK bdrv_parent_drained_poll_single(BdrvChild *c);
+
+/**
+ * bdrv_parent_drained_end_single:
+ *
+ * End a quiesced section for the parent of @c.
+ */
+void GRAPH_RDLOCK bdrv_parent_drained_end_single(BdrvChild *c);
+
+/**
+ * bdrv_drain_poll:
+ *
+ * Poll for pending requests in @bs and its parents (except for @ignore_parent).
+ *
+ * If @ignore_bds_parents is true, parents that are BlockDriverStates must
+ * ignore the drain request because they will be drained separately (used for
+ * drain_all).
+ *
+ * This is part of bdrv_drained_begin.
+ */
+bool GRAPH_RDLOCK
+bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
+ bool ignore_bds_parents);
+
+/**
+ * bdrv_drained_begin:
+ *
+ * Begin a quiesced section for exclusive access to the BDS, by disabling
+ * external request sources including NBD server, block jobs, and device model.
+ *
+ * This function can only be invoked by the main loop or a coroutine
+ * (regardless of the AioContext where it is running).
+ * If the coroutine is running in an Iothread AioContext, this function will
+ * just schedule a BH to run in the main loop.
+ * However, it cannot be directly called by an Iothread.
+ *
+ * This function can be recursive.
+ */
+void bdrv_drained_begin(BlockDriverState *bs);
+
+/**
+ * bdrv_do_drained_begin_quiesce:
+ *
+ * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
+ * running requests to complete.
+ */
+void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent);
+
+/**
+ * bdrv_drained_end:
+ *
+ * End a quiescent section started by bdrv_drained_begin().
+ *
+ * This function can only be invoked by the main loop or a coroutine
+ * (regardless of the AioContext where it is running).
+ * If the coroutine is running in an Iothread AioContext, this function will
+ * just schedule a BH to run in the main loop.
+ * However, it cannot be directly called by an Iothread.
+ */
+void bdrv_drained_end(BlockDriverState *bs);
+
+#endif /* BLOCK_IO_H */
diff --git a/include/block/block.h b/include/block/block.h
index f70a843b72..e2c647de27 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -1,717 +1,32 @@
-#ifndef BLOCK_H
-#define BLOCK_H
-
-#include "block/aio.h"
-#include "qapi/qapi-types-block-core.h"
-#include "block/aio-wait.h"
-#include "qemu/iov.h"
-#include "qemu/coroutine.h"
-#include "block/accounting.h"
-#include "block/dirty-bitmap.h"
-#include "block/blockjob.h"
-#include "qemu/hbitmap.h"
-
-/* block.c */
-typedef struct BlockDriver BlockDriver;
-typedef struct BdrvChild BdrvChild;
-typedef struct BdrvChildRole BdrvChildRole;
-
-typedef struct BlockDriverInfo {
- /* in bytes, 0 if irrelevant */
- int cluster_size;
- /* offset at which the VM state can be saved (0 if not possible) */
- int64_t vm_state_offset;
- bool is_dirty;
- /*
- * True if unallocated blocks read back as zeroes. This is equivalent
- * to the LBPRZ flag in the SCSI logical block provisioning page.
- */
- bool unallocated_blocks_are_zero;
- /*
- * True if this block driver only supports compressed writes
- */
- bool needs_compressed_writes;
-} BlockDriverInfo;
-
-typedef struct BlockFragInfo {
- uint64_t allocated_clusters;
- uint64_t total_clusters;
- uint64_t fragmented_clusters;
- uint64_t compressed_clusters;
-} BlockFragInfo;
-
-typedef enum {
- BDRV_REQ_COPY_ON_READ = 0x1,
- BDRV_REQ_ZERO_WRITE = 0x2,
-
- /*
- * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate
- * that the block driver should unmap (discard) blocks if it is guaranteed
- * that the result will read back as zeroes. The flag is only passed to the
- * driver if the block device is opened with BDRV_O_UNMAP.
- */
- BDRV_REQ_MAY_UNMAP = 0x4,
-
- /*
- * The BDRV_REQ_NO_SERIALISING flag is only valid for reads and means that
- * we don't want wait_serialising_requests() during the read operation.
- *
- * This flag is used for backup copy-on-write operations, when we need to
- * read old data before write (write notifier triggered). It is okay since
- * we already waited for other serializing requests in the initiating write
- * (see bdrv_aligned_pwritev), and it is necessary if the initiating write
- * is already serializing (without the flag, the read would deadlock
- * waiting for the serialising write to complete).
- */
- BDRV_REQ_NO_SERIALISING = 0x8,
- BDRV_REQ_FUA = 0x10,
- BDRV_REQ_WRITE_COMPRESSED = 0x20,
-
- /* Signifies that this write request will not change the visible disk
- * content. */
- BDRV_REQ_WRITE_UNCHANGED = 0x40,
-
- /*
- * BDRV_REQ_SERIALISING forces request serialisation for writes.
- * It is used to ensure that writes to the backing file of a backup process
- * target cannot race with a read of the backup target that defers to the
- * backing file.
- *
- * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to
- * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be
- * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long.
- */
- BDRV_REQ_SERIALISING = 0x80,
-
- /* Mask of valid flags */
- BDRV_REQ_MASK = 0xff,
-} BdrvRequestFlags;
-
-typedef struct BlockSizes {
- uint32_t phys;
- uint32_t log;
-} BlockSizes;
-
-typedef struct HDGeometry {
- uint32_t heads;
- uint32_t sectors;
- uint32_t cylinders;
-} HDGeometry;
-
-#define BDRV_O_RDWR 0x0002
-#define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */
-#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
-#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
-#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
-#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
-#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
-#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
-#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
-#define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
-#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
-#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
-#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
-#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
- select an appropriate protocol driver,
- ignoring the format layer */
-#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
-#define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */
-
-#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
-
-
-/* Option names of options parsed by the block layer */
-
-#define BDRV_OPT_CACHE_WB "cache.writeback"
-#define BDRV_OPT_CACHE_DIRECT "cache.direct"
-#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
-#define BDRV_OPT_READ_ONLY "read-only"
-#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
-#define BDRV_OPT_DISCARD "discard"
-#define BDRV_OPT_FORCE_SHARE "force-share"
-
-
-#define BDRV_SECTOR_BITS 9
-#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
-#define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1)
-
-#define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \
- INT_MAX >> BDRV_SECTOR_BITS)
-#define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
-
-/*
- * Allocation status flags for bdrv_block_status() and friends.
- *
- * Public flags:
- * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer
- * BDRV_BLOCK_ZERO: offset reads as zero
- * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data
- * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
- * layer rather than any backing, set by block layer
- * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this
- * layer, set by block layer
- *
- * Internal flag:
- * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request
- * that the block layer recompute the answer from the returned
- * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID.
- *
- * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the
- * host offset within the returned BDS that is allocated for the
- * corresponding raw guest data. However, whether that offset
- * actually contains data also depends on BDRV_BLOCK_DATA, as follows:
- *
- * DATA ZERO OFFSET_VALID
- * t t t sectors read as zero, returned file is zero at offset
- * t f t sectors read as valid from file at offset
- * f t t sectors preallocated, read as zero, returned file not
- * necessarily zero at offset
- * f f t sectors preallocated but read from backing_hd,
- * returned file contains garbage at offset
- * t t f sectors preallocated, read as zero, unknown offset
- * t f f sectors read from unknown file or offset
- * f t f not allocated or unknown offset, read as zero
- * f f f not allocated or unknown offset, read from backing_hd
- */
-#define BDRV_BLOCK_DATA 0x01
-#define BDRV_BLOCK_ZERO 0x02
-#define BDRV_BLOCK_OFFSET_VALID 0x04
-#define BDRV_BLOCK_RAW 0x08
-#define BDRV_BLOCK_ALLOCATED 0x10
-#define BDRV_BLOCK_EOF 0x20
-#define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK
-
-typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
-
-typedef struct BDRVReopenState {
- BlockDriverState *bs;
- int flags;
- BlockdevDetectZeroesOptions detect_zeroes;
- uint64_t perm, shared_perm;
- QDict *options;
- QDict *explicit_options;
- void *opaque;
-} BDRVReopenState;
-
-/*
- * Block operation types
- */
-typedef enum BlockOpType {
- BLOCK_OP_TYPE_BACKUP_SOURCE,
- BLOCK_OP_TYPE_BACKUP_TARGET,
- BLOCK_OP_TYPE_CHANGE,
- BLOCK_OP_TYPE_COMMIT_SOURCE,
- BLOCK_OP_TYPE_COMMIT_TARGET,
- BLOCK_OP_TYPE_DATAPLANE,
- BLOCK_OP_TYPE_DRIVE_DEL,
- BLOCK_OP_TYPE_EJECT,
- BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
- BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
- BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
- BLOCK_OP_TYPE_MIRROR_SOURCE,
- BLOCK_OP_TYPE_MIRROR_TARGET,
- BLOCK_OP_TYPE_RESIZE,
- BLOCK_OP_TYPE_STREAM,
- BLOCK_OP_TYPE_REPLACE,
- BLOCK_OP_TYPE_MAX,
-} BlockOpType;
-
-/* Block node permission constants */
-enum {
- /**
- * A user that has the "permission" of consistent reads is guaranteed that
- * their view of the contents of the block device is complete and
- * self-consistent, representing the contents of a disk at a specific
- * point.
- *
- * For most block devices (including their backing files) this is true, but
- * the property cannot be maintained in a few situations like for
- * intermediate nodes of a commit block job.
- */
- BLK_PERM_CONSISTENT_READ = 0x01,
-
- /** This permission is required to change the visible disk contents. */
- BLK_PERM_WRITE = 0x02,
-
- /**
- * This permission (which is weaker than BLK_PERM_WRITE) is both enough and
- * required for writes to the block node when the caller promises that
- * the visible disk content doesn't change.
- *
- * As the BLK_PERM_WRITE permission is strictly stronger, either is
- * sufficient to perform an unchanging write.
- */
- BLK_PERM_WRITE_UNCHANGED = 0x04,
-
- /** This permission is required to change the size of a block node. */
- BLK_PERM_RESIZE = 0x08,
-
- /**
- * This permission is required to change the node that this BdrvChild
- * points to.
- */
- BLK_PERM_GRAPH_MOD = 0x10,
-
- BLK_PERM_ALL = 0x1f,
-
- DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ
- | BLK_PERM_WRITE
- | BLK_PERM_WRITE_UNCHANGED
- | BLK_PERM_RESIZE,
-
- DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH,
-};
-
-char *bdrv_perm_names(uint64_t perm);
-
-/* disk I/O throttling */
-void bdrv_init(void);
-void bdrv_init_with_whitelist(void);
-bool bdrv_uses_whitelist(void);
-int bdrv_is_whitelisted(BlockDriver *drv, bool read_only);
-BlockDriver *bdrv_find_protocol(const char *filename,
- bool allow_protocol_prefix,
- Error **errp);
-BlockDriver *bdrv_find_format(const char *format_name);
-int bdrv_create(BlockDriver *drv, const char* filename,
- QemuOpts *opts, Error **errp);
-int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp);
-BlockDriverState *bdrv_new(void);
-void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
- Error **errp);
-void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
- Error **errp);
-
-int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);
-int bdrv_parse_discard_flags(const char *mode, int *flags);
-BdrvChild *bdrv_open_child(const char *filename,
- QDict *options, const char *bdref_key,
- BlockDriverState* parent,
- const BdrvChildRole *child_role,
- bool allow_none, Error **errp);
-BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp);
-void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
- Error **errp);
-int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
- const char *bdref_key, Error **errp);
-BlockDriverState *bdrv_open(const char *filename, const char *reference,
- QDict *options, int flags, Error **errp);
-BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
- int flags, Error **errp);
-BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
- BlockDriverState *bs, QDict *options);
-int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp);
-int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
- Error **errp);
-int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
- BlockReopenQueue *queue, Error **errp);
-void bdrv_reopen_commit(BDRVReopenState *reopen_state);
-void bdrv_reopen_abort(BDRVReopenState *reopen_state);
-int bdrv_read(BdrvChild *child, int64_t sector_num,
- uint8_t *buf, int nb_sectors);
-int bdrv_write(BdrvChild *child, int64_t sector_num,
- const uint8_t *buf, int nb_sectors);
-int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
- int bytes, BdrvRequestFlags flags);
-int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
-int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes);
-int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
-int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes);
-int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
-int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
- const void *buf, int count);
/*
- * Efficiently zero a region of the disk image. Note that this is a regular
- * I/O request like read or write and should have a reasonable size. This
- * function is not suitable for zeroing the entire image in a single request
- * because it may allocate memory for the entire region.
- */
-int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
- int bytes, BdrvRequestFlags flags);
-BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
- const char *backing_file);
-void bdrv_refresh_filename(BlockDriverState *bs);
-
-int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset,
- PreallocMode prealloc, Error **errp);
-int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
- Error **errp);
-
-int64_t bdrv_nb_sectors(BlockDriverState *bs);
-int64_t bdrv_getlength(BlockDriverState *bs);
-int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
-BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
- BlockDriverState *in_bs, Error **errp);
-void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
-void bdrv_refresh_limits(BlockDriverState *bs, Error **errp);
-int bdrv_commit(BlockDriverState *bs);
-int bdrv_change_backing_file(BlockDriverState *bs,
- const char *backing_file, const char *backing_fmt);
-void bdrv_register(BlockDriver *bdrv);
-int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
- const char *backing_file_str);
-BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
- BlockDriverState *bs);
-BlockDriverState *bdrv_find_base(BlockDriverState *bs);
-
-
-typedef struct BdrvCheckResult {
- int corruptions;
- int leaks;
- int check_errors;
- int corruptions_fixed;
- int leaks_fixed;
- int64_t image_end_offset;
- BlockFragInfo bfi;
-} BdrvCheckResult;
-
-typedef enum {
- BDRV_FIX_LEAKS = 1,
- BDRV_FIX_ERRORS = 2,
-} BdrvCheckMode;
-
-int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
-
-/* The units of offset and total_work_size may be chosen arbitrarily by the
- * block driver; total_work_size may change during the course of the amendment
- * operation */
-typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
- int64_t total_work_size, void *opaque);
-int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
- BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
- Error **errp);
-
-/* external snapshots */
-bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
- BlockDriverState *candidate);
-bool bdrv_is_first_non_filter(BlockDriverState *candidate);
-
-/* check if a named node can be replaced when doing drive-mirror */
-BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
- const char *node_name, Error **errp);
-
-/* async block I/O */
-void bdrv_aio_cancel(BlockAIOCB *acb);
-void bdrv_aio_cancel_async(BlockAIOCB *acb);
-
-/* sg packet commands */
-int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
-
-/* Invalidate any cached metadata used by image formats */
-void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
-void bdrv_invalidate_cache_all(Error **errp);
-int bdrv_inactivate_all(void);
-
-/* Ensure contents are flushed to disk. */
-int bdrv_flush(BlockDriverState *bs);
-int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
-int bdrv_flush_all(void);
-void bdrv_close_all(void);
-void bdrv_drain(BlockDriverState *bs);
-void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
-void bdrv_drain_all_begin(void);
-void bdrv_drain_all_end(void);
-void bdrv_drain_all(void);
-
-#define BDRV_POLL_WHILE(bs, cond) ({ \
- BlockDriverState *bs_ = (bs); \
- AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
- cond); })
-
-int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes);
-int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int bytes);
-int bdrv_has_zero_init_1(BlockDriverState *bs);
-int bdrv_has_zero_init(BlockDriverState *bs);
-bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);
-bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
-int bdrv_block_status(BlockDriverState *bs, int64_t offset,
- int64_t bytes, int64_t *pnum, int64_t *map,
- BlockDriverState **file);
-int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
- int64_t offset, int64_t bytes, int64_t *pnum,
- int64_t *map, BlockDriverState **file);
-int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
- int64_t *pnum);
-int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
- int64_t offset, int64_t bytes, int64_t *pnum);
-
-bool bdrv_is_read_only(BlockDriverState *bs);
-int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
- bool ignore_allow_rdw, Error **errp);
-int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
- Error **errp);
-bool bdrv_is_writable(BlockDriverState *bs);
-bool bdrv_is_sg(BlockDriverState *bs);
-bool bdrv_is_inserted(BlockDriverState *bs);
-void bdrv_lock_medium(BlockDriverState *bs, bool locked);
-void bdrv_eject(BlockDriverState *bs, bool eject_flag);
-const char *bdrv_get_format_name(BlockDriverState *bs);
-BlockDriverState *bdrv_find_node(const char *node_name);
-BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp);
-BlockDriverState *bdrv_lookup_bs(const char *device,
- const char *node_name,
- Error **errp);
-bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base);
-BlockDriverState *bdrv_next_node(BlockDriverState *bs);
-BlockDriverState *bdrv_next_all_states(BlockDriverState *bs);
-
-typedef struct BdrvNextIterator {
- enum {
- BDRV_NEXT_BACKEND_ROOTS,
- BDRV_NEXT_MONITOR_OWNED,
- } phase;
- BlockBackend *blk;
- BlockDriverState *bs;
-} BdrvNextIterator;
-
-BlockDriverState *bdrv_first(BdrvNextIterator *it);
-BlockDriverState *bdrv_next(BdrvNextIterator *it);
-void bdrv_next_cleanup(BdrvNextIterator *it);
-
-BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
-bool bdrv_is_encrypted(BlockDriverState *bs);
-void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
- void *opaque);
-const char *bdrv_get_node_name(const BlockDriverState *bs);
-const char *bdrv_get_device_name(const BlockDriverState *bs);
-const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
-int bdrv_get_flags(BlockDriverState *bs);
-int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
-ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs);
-void bdrv_round_to_clusters(BlockDriverState *bs,
- int64_t offset, int64_t bytes,
- int64_t *cluster_offset,
- int64_t *cluster_bytes);
-
-const char *bdrv_get_encrypted_filename(BlockDriverState *bs);
-void bdrv_get_backing_filename(BlockDriverState *bs,
- char *filename, int filename_size);
-void bdrv_get_full_backing_filename(BlockDriverState *bs,
- char *dest, size_t sz, Error **errp);
-void bdrv_get_full_backing_filename_from_filename(const char *backed,
- const char *backing,
- char *dest, size_t sz,
- Error **errp);
-
-int path_has_protocol(const char *path);
-int path_is_absolute(const char *path);
-void path_combine(char *dest, int dest_size,
- const char *base_path,
- const char *filename);
-
-int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
-int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
-int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
- int64_t pos, int size);
-
-int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
- int64_t pos, int size);
-
-void bdrv_img_create(const char *filename, const char *fmt,
- const char *base_filename, const char *base_fmt,
- char *options, uint64_t img_size, int flags,
- bool quiet, Error **errp);
-
-/* Returns the alignment in bytes that is required so that no bounce buffer
- * is required throughout the stack */
-size_t bdrv_min_mem_align(BlockDriverState *bs);
-/* Returns optimal alignment in bytes for bounce buffer */
-size_t bdrv_opt_mem_align(BlockDriverState *bs);
-void *qemu_blockalign(BlockDriverState *bs, size_t size);
-void *qemu_blockalign0(BlockDriverState *bs, size_t size);
-void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
-void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
-bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
-
-void bdrv_enable_copy_on_read(BlockDriverState *bs);
-void bdrv_disable_copy_on_read(BlockDriverState *bs);
-
-void bdrv_ref(BlockDriverState *bs);
-void bdrv_unref(BlockDriverState *bs);
-void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
-BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
- BlockDriverState *child_bs,
- const char *child_name,
- const BdrvChildRole *child_role,
- Error **errp);
-
-bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
-void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
-void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
-void bdrv_op_block_all(BlockDriverState *bs, Error *reason);
-void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason);
-bool bdrv_op_blocker_is_empty(BlockDriverState *bs);
-
-#define BLKDBG_EVENT(child, evt) \
- do { \
- if (child) { \
- bdrv_debug_event(child->bs, evt); \
- } \
- } while (0)
-
-void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
-
-int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
- const char *tag);
-int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
-int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
-bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
-
-/**
- * bdrv_get_aio_context:
- *
- * Returns: the currently bound #AioContext
- */
-AioContext *bdrv_get_aio_context(BlockDriverState *bs);
-
-/**
- * Transfer control to @co in the aio context of @bs
- */
-void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co);
-
-/**
- * bdrv_set_aio_context:
- *
- * Changes the #AioContext used for fd handlers, timers, and BHs by this
- * BlockDriverState and all its children.
- *
- * This function must be called with iothread lock held.
- */
-void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context);
-int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
-int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
-
-void bdrv_io_plug(BlockDriverState *bs);
-void bdrv_io_unplug(BlockDriverState *bs);
-
-/**
- * bdrv_parent_drained_begin:
- *
- * Begin a quiesced section of all users of @bs. This is part of
- * bdrv_drained_begin.
- */
-void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
- bool ignore_bds_parents);
-
-/**
- * bdrv_parent_drained_begin_single:
- *
- * Begin a quiesced section for the parent of @c. If @poll is true, wait for
- * any pending activity to cease.
- */
-void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
-
-/**
- * bdrv_parent_drained_end:
+ * QEMU System Emulator block driver
*
- * End a quiesced section of all users of @bs. This is part of
- * bdrv_drained_end.
- */
-void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
- bool ignore_bds_parents);
-
-/**
- * bdrv_drain_poll:
+ * Copyright (c) 2003 Fabrice Bellard
*
- * Poll for pending requests in @bs, its parents (except for @ignore_parent),
- * and if @recursive is true its children as well (used for subtree drain).
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
*
- * If @ignore_bds_parents is true, parents that are BlockDriverStates must
- * ignore the drain request because they will be drained separately (used for
- * drain_all).
- *
- * This is part of bdrv_drained_begin.
- */
-bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
- BdrvChild *ignore_parent, bool ignore_bds_parents);
-
-/**
- * bdrv_drained_begin:
- *
- * Begin a quiesced section for exclusive access to the BDS, by disabling
- * external request sources including NBD server and device model. Note that
- * this doesn't block timers or coroutines from submitting more requests, which
- * means block_job_pause is still necessary.
- *
- * This function can be recursive.
- */
-void bdrv_drained_begin(BlockDriverState *bs);
-
-/**
- * bdrv_do_drained_begin_quiesce:
- *
- * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
- * running requests to complete.
- */
-void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
- BdrvChild *parent, bool ignore_bds_parents);
-
-/**
- * Like bdrv_drained_begin, but recursively begins a quiesced section for
- * exclusive access to all child nodes as well.
- */
-void bdrv_subtree_drained_begin(BlockDriverState *bs);
-
-/**
- * bdrv_drained_end:
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * End a quiescent section started by bdrv_drained_begin().
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
*/
-void bdrv_drained_end(BlockDriverState *bs);
+#ifndef BLOCK_H
+#define BLOCK_H
-/**
- * End a quiescent section started by bdrv_subtree_drained_begin().
- */
-void bdrv_subtree_drained_end(BlockDriverState *bs);
+#include "block/block-global-state.h"
+#include "block/block-io.h"
-void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
- Error **errp);
-void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
+/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */
-bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
- uint32_t granularity, Error **errp);
-/**
- *
- * bdrv_register_buf/bdrv_unregister_buf:
- *
- * Register/unregister a buffer for I/O. For example, VFIO drivers are
- * interested to know the memory areas that would later be used for I/O, so
- * that they can prepare IOMMU mapping etc., to get better performance.
- */
-void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size);
-void bdrv_unregister_buf(BlockDriverState *bs, void *host);
-
-/**
- *
- * bdrv_co_copy_range:
- *
- * Do offloaded copy between two children. If the operation is not implemented
- * by the driver, or if the backend storage doesn't support it, a negative
- * error code will be returned.
- *
- * Note: block layer doesn't emulate or fallback to a bounce buffer approach
- * because usually the caller shouldn't attempt offloaded copy any more (e.g.
- * calling copy_file_range(2)) after the first error, thus it should fall back
- * to a read+write path in the caller level.
- *
- * @src: Source child to copy data from
- * @src_offset: offset in @src image to read data
- * @dst: Destination child to copy data to
- * @dst_offset: offset in @dst image to write data
- * @bytes: number of bytes to copy
- * @flags: request flags. Supported flags:
- * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
- * write on @dst as if bdrv_co_pwrite_zeroes is
- * called. Used to simplify caller code, or
- * during BlockDriver.bdrv_co_copy_range_from()
- * recursion.
- * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
- * requests currently in flight.
- *
- * Returns: 0 if succeeded; negative error code if failed.
- **/
-int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
- BdrvChild *dst, uint64_t dst_offset,
- uint64_t bytes, BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-#endif
+#endif /* BLOCK_H */
diff --git a/include/block/block_backup.h b/include/block/block_backup.h
index 157596c296..4d4d5ba153 100644
--- a/include/block/block_backup.h
+++ b/include/block/block_backup.h
@@ -18,7 +18,7 @@
#ifndef BLOCK_BACKUP_H
#define BLOCK_BACKUP_H
-#include "block/block_int.h"
+#include "block/blockjob.h"
void backup_do_checkpoint(BlockJob *job, Error **errp);
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
new file mode 100644
index 0000000000..761276127e
--- /dev/null
+++ b/include/block/block_int-common.h
@@ -0,0 +1,1321 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_INT_COMMON_H
+#define BLOCK_INT_COMMON_H
+
+#include "block/aio.h"
+#include "block/block-common.h"
+#include "block/block-global-state.h"
+#include "block/snapshot.h"
+#include "qemu/iov.h"
+#include "qemu/rcu.h"
+#include "qemu/stats64.h"
+
+#define BLOCK_FLAG_LAZY_REFCOUNTS 8
+
+#define BLOCK_OPT_SIZE "size"
+#define BLOCK_OPT_ENCRYPT "encryption"
+#define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format"
+#define BLOCK_OPT_COMPAT6 "compat6"
+#define BLOCK_OPT_HWVERSION "hwversion"
+#define BLOCK_OPT_BACKING_FILE "backing_file"
+#define BLOCK_OPT_BACKING_FMT "backing_fmt"
+#define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
+#define BLOCK_OPT_TABLE_SIZE "table_size"
+#define BLOCK_OPT_PREALLOC "preallocation"
+#define BLOCK_OPT_SUBFMT "subformat"
+#define BLOCK_OPT_COMPAT_LEVEL "compat"
+#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
+#define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
+#define BLOCK_OPT_REDUNDANCY "redundancy"
+#define BLOCK_OPT_NOCOW "nocow"
+#define BLOCK_OPT_EXTENT_SIZE_HINT "extent_size_hint"
+#define BLOCK_OPT_OBJECT_SIZE "object_size"
+#define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
+#define BLOCK_OPT_DATA_FILE "data_file"
+#define BLOCK_OPT_DATA_FILE_RAW "data_file_raw"
+#define BLOCK_OPT_COMPRESSION_TYPE "compression_type"
+#define BLOCK_OPT_EXTL2 "extended_l2"
+
+#define BLOCK_PROBE_BUF_SIZE 512
+
+enum BdrvTrackedRequestType {
+ BDRV_TRACKED_READ,
+ BDRV_TRACKED_WRITE,
+ BDRV_TRACKED_DISCARD,
+ BDRV_TRACKED_TRUNCATE,
+};
+
+/*
+ * That is not quite good that BdrvTrackedRequest structure is public,
+ * as block/io.c is very careful about incoming offset/bytes being
+ * correct. Be sure to assert bdrv_check_request() succeeded after any
+ * modification of BdrvTrackedRequest object out of block/io.c
+ */
+typedef struct BdrvTrackedRequest {
+ BlockDriverState *bs;
+ int64_t offset;
+ int64_t bytes;
+ enum BdrvTrackedRequestType type;
+
+ bool serialising;
+ int64_t overlap_offset;
+ int64_t overlap_bytes;
+
+ QLIST_ENTRY(BdrvTrackedRequest) list;
+ Coroutine *co; /* owner, used for deadlock detection */
+ CoQueue wait_queue; /* coroutines blocked on this request */
+
+ struct BdrvTrackedRequest *waiting_for;
+} BdrvTrackedRequest;
+
+
+struct BlockDriver {
+ /*
+ * These fields are initialized when this object is created,
+ * and are never changed afterwards.
+ */
+
+ const char *format_name;
+ int instance_size;
+
+ /*
+ * Set to true if the BlockDriver is a block filter. Block filters pass
+ * certain callbacks that refer to data (see block.c) to their bs->file
+ * or bs->backing (whichever one exists) if the driver doesn't implement
+ * them. Drivers that do not wish to forward must implement them and return
+ * -ENOTSUP.
+ * Note that filters are not allowed to modify data.
+ *
+ * Filters generally cannot have more than a single filtered child,
+ * because the data they present must at all times be the same as
+ * that on their filtered child. That would be impossible to
+ * achieve for multiple filtered children.
+ * (And this filtered child must then be bs->file or bs->backing.)
+ */
+ bool is_filter;
+ /*
+ * Only make sense for filter drivers, for others must be false.
+ * If true, filtered child is bs->backing. Otherwise it's bs->file.
+ * Two internal filters use bs->backing as filtered child and has this
+ * field set to true: mirror_top and commit_top. There also two such test
+ * filters in tests/unit/test-bdrv-graph-mod.c.
+ *
+ * Never create any more such filters!
+ *
+ * TODO: imagine how to deprecate this behavior and make all filters work
+ * similarly using bs->file as filtered child.
+ */
+ bool filtered_child_is_backing;
+
+ /*
+ * Set to true if the BlockDriver is a format driver. Format nodes
+ * generally do not expect their children to be other format nodes
+ * (except for backing files), and so format probing is disabled
+ * on those children.
+ */
+ bool is_format;
+
+ /*
+ * Set to true if the BlockDriver supports zoned children.
+ */
+ bool supports_zoned_children;
+
+ /*
+ * Drivers not implementing bdrv_parse_filename nor bdrv_open should have
+ * this field set to true, except ones that are defined only by their
+ * child's bs.
+ * An example of the last type will be the quorum block driver.
+ */
+ bool bdrv_needs_filename;
+
+ /*
+ * Set if a driver can support backing files. This also implies the
+ * following semantics:
+ *
+ * - Return status 0 of .bdrv_co_block_status means that corresponding
+ * blocks are not allocated in this layer of backing-chain
+ * - For such (unallocated) blocks, read will:
+ * - fill buffer with zeros if there is no backing file
+ * - read from the backing file otherwise, where the block layer
+ * takes care of reading zeros beyond EOF if backing file is short
+ */
+ bool supports_backing;
+
+ /*
+ * Drivers setting this field must be able to work with just a plain
+ * filename with '<protocol_name>:' as a prefix, and no other options.
+ * Options may be extracted from the filename by implementing
+ * bdrv_parse_filename.
+ */
+ const char *protocol_name;
+
+ /* List of options for creating images, terminated by name == NULL */
+ QemuOptsList *create_opts;
+
+ /* List of options for image amend */
+ QemuOptsList *amend_opts;
+
+ /*
+ * If this driver supports reopening images this contains a
+ * NULL-terminated list of the runtime options that can be
+ * modified. If an option in this list is unspecified during
+ * reopen then it _must_ be reset to its default value or return
+ * an error.
+ */
+ const char *const *mutable_opts;
+
+ /*
+ * Pointer to a NULL-terminated array of names of strong options
+ * that can be specified for bdrv_open(). A strong option is one
+ * that changes the data of a BDS.
+ * If this pointer is NULL, the array is considered empty.
+ * "filename" and "driver" are always considered strong.
+ */
+ const char *const *strong_runtime_opts;
+
+
+ /*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+ /*
+ * This function is invoked under BQL before .bdrv_co_amend()
+ * (which in contrast does not necessarily run under the BQL)
+ * to allow driver-specific initialization code that requires
+ * the BQL, like setting up specific permission flags.
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_amend_pre_run)(
+ BlockDriverState *bs, Error **errp);
+ /*
+ * This function is invoked under BQL after .bdrv_co_amend()
+ * to allow cleaning up what was done in .bdrv_amend_pre_run().
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_amend_clean)(BlockDriverState *bs);
+
+ /*
+ * Return true if @to_replace can be replaced by a BDS with the
+ * same data as @bs without it affecting @bs's behavior (that is,
+ * without it being visible to @bs's parents).
+ */
+ bool GRAPH_RDLOCK_PTR (*bdrv_recurse_can_replace)(
+ BlockDriverState *bs, BlockDriverState *to_replace);
+
+ int (*bdrv_probe_device)(const char *filename);
+
+ /*
+ * Any driver implementing this callback is expected to be able to handle
+ * NULL file names in its .bdrv_open() implementation.
+ */
+ void (*bdrv_parse_filename)(const char *filename, QDict *options,
+ Error **errp);
+
+ /* For handling image reopen for split or non-split files. */
+ int GRAPH_UNLOCKED_PTR (*bdrv_reopen_prepare)(
+ BDRVReopenState *reopen_state, BlockReopenQueue *queue, Error **errp);
+ void GRAPH_UNLOCKED_PTR (*bdrv_reopen_commit)(
+ BDRVReopenState *reopen_state);
+ void GRAPH_UNLOCKED_PTR (*bdrv_reopen_commit_post)(
+ BDRVReopenState *reopen_state);
+ void GRAPH_UNLOCKED_PTR (*bdrv_reopen_abort)(
+ BDRVReopenState *reopen_state);
+ void (*bdrv_join_options)(QDict *options, QDict *old_options);
+
+ int GRAPH_UNLOCKED_PTR (*bdrv_open)(
+ BlockDriverState *bs, QDict *options, int flags, Error **errp);
+
+ /* Protocol drivers should implement this instead of bdrv_open */
+ int GRAPH_UNLOCKED_PTR (*bdrv_file_open)(
+ BlockDriverState *bs, QDict *options, int flags, Error **errp);
+ void (*bdrv_close)(BlockDriverState *bs);
+
+ int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create)(
+ BlockdevCreateOptions *opts, Error **errp);
+
+ int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create_opts)(
+ BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_amend_options)(
+ BlockDriverState *bs, QemuOpts *opts,
+ BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
+ bool force, Error **errp);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_make_empty)(BlockDriverState *bs);
+
+ /*
+ * Refreshes the bs->exact_filename field. If that is impossible,
+ * bs->exact_filename has to be left empty.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_refresh_filename)(BlockDriverState *bs);
+
+ /*
+ * Gathers the open options for all children into @target.
+ * A simple format driver (without backing file support) might
+ * implement this function like this:
+ *
+ * QINCREF(bs->file->bs->full_open_options);
+ * qdict_put(target, "file", bs->file->bs->full_open_options);
+ *
+ * If not specified, the generic implementation will simply put
+ * all children's options under their respective name.
+ *
+ * @backing_overridden is true when bs->backing seems not to be
+ * the child that would result from opening bs->backing_file.
+ * Therefore, if it is true, the backing child's options should be
+ * gathered; otherwise, there is no need since the backing child
+ * is the one implied by the image header.
+ *
+ * Note that ideally this function would not be needed. Every
+ * block driver which implements it is probably doing something
+ * shady regarding its runtime option structure.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_gather_child_options)(
+ BlockDriverState *bs, QDict *target, bool backing_overridden);
+
+ /*
+ * Returns an allocated string which is the directory name of this BDS: It
+ * will be used to make relative filenames absolute by prepending this
+ * function's return value to them.
+ */
+ char * GRAPH_RDLOCK_PTR (*bdrv_dirname)(BlockDriverState *bs, Error **errp);
+
+ /*
+ * This informs the driver that we are no longer interested in the result
+ * of in-flight requests, so don't waste the time if possible.
+ *
+ * One example usage is to avoid waiting for an nbd target node reconnect
+ * timeout during job-cancel with force=true.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_cancel_in_flight)(BlockDriverState *bs);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_inactivate)(BlockDriverState *bs);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_snapshot_create)(
+ BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
+
+ int GRAPH_UNLOCKED_PTR (*bdrv_snapshot_goto)(
+ BlockDriverState *bs, const char *snapshot_id);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_snapshot_delete)(
+ BlockDriverState *bs, const char *snapshot_id, const char *name,
+ Error **errp);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_snapshot_list)(
+ BlockDriverState *bs, QEMUSnapshotInfo **psn_info);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_snapshot_load_tmp)(
+ BlockDriverState *bs, const char *snapshot_id, const char *name,
+ Error **errp);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_change_backing_file)(
+ BlockDriverState *bs, const char *backing_file,
+ const char *backing_fmt);
+
+ /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
+ int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
+ const char *tag);
+ int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
+ const char *tag);
+ int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
+ bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
+
+ void GRAPH_RDLOCK_PTR (*bdrv_refresh_limits)(
+ BlockDriverState *bs, Error **errp);
+
+ /*
+ * Returns 1 if newly created images are guaranteed to contain only
+ * zeros, 0 otherwise.
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_has_zero_init)(BlockDriverState *bs);
+
+ /*
+ * Remove fd handlers, timers, and other event loop callbacks so the event
+ * loop is no longer in use. Called with no in-flight requests and in
+ * depth-first traversal order with parents before child nodes.
+ */
+ void (*bdrv_detach_aio_context)(BlockDriverState *bs);
+
+ /*
+ * Add fd handlers, timers, and other event loop callbacks so I/O requests
+ * can be processed again. Called with no in-flight requests and in
+ * depth-first traversal order with child nodes before parent nodes.
+ */
+ void (*bdrv_attach_aio_context)(BlockDriverState *bs,
+ AioContext *new_context);
+
+ /**
+ * bdrv_drain_begin is called if implemented in the beginning of a
+ * drain operation to drain and stop any internal sources of requests in
+ * the driver.
+ * bdrv_drain_end is called if implemented at the end of the drain.
+ *
+ * They should be used by the driver to e.g. manage scheduled I/O
+ * requests, or toggle an internal state. After the end of the drain new
+ * requests will continue normally.
+ *
+ * Implementations of both functions must not call aio_poll().
+ */
+ void (*bdrv_drain_begin)(BlockDriverState *bs);
+ void (*bdrv_drain_end)(BlockDriverState *bs);
+
+ /**
+ * Try to get @bs's logical and physical block size.
+ * On success, store them in @bsz and return zero.
+ * On failure, return negative errno.
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_probe_blocksizes)(
+ BlockDriverState *bs, BlockSizes *bsz);
+ /**
+ * Try to get @bs's geometry (cyls, heads, sectors)
+ * On success, store them in @geo and return 0.
+ * On failure return -errno.
+ * Only drivers that want to override guest geometry implement this
+ * callback; see hd_geometry_guess().
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_probe_geometry)(
+ BlockDriverState *bs, HDGeometry *geo);
+
+ void GRAPH_WRLOCK_PTR (*bdrv_add_child)(
+ BlockDriverState *parent, BlockDriverState *child, Error **errp);
+
+ void GRAPH_WRLOCK_PTR (*bdrv_del_child)(
+ BlockDriverState *parent, BdrvChild *child, Error **errp);
+
+ /**
+ * Informs the block driver that a permission change is intended. The
+ * driver checks whether the change is permissible and may take other
+ * preparations for the change (e.g. get file system locks). This operation
+ * is always followed either by a call to either .bdrv_set_perm or
+ * .bdrv_abort_perm_update.
+ *
+ * Checks whether the requested set of cumulative permissions in @perm
+ * can be granted for accessing @bs and whether no other users are using
+ * permissions other than those given in @shared (both arguments take
+ * BLK_PERM_* bitmasks).
+ *
+ * If both conditions are met, 0 is returned. Otherwise, -errno is returned
+ * and errp is set to an error describing the conflict.
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm,
+ uint64_t shared, Error **errp);
+
+ /**
+ * Called to inform the driver that the set of cumulative set of used
+ * permissions for @bs has changed to @perm, and the set of shareable
+ * permission to @shared. The driver can use this to propagate changes to
+ * its children (i.e. request permissions only if a parent actually needs
+ * them).
+ *
+ * This function is only invoked after bdrv_check_perm(), so block drivers
+ * may rely on preparations made in their .bdrv_check_perm implementation.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_set_perm)(
+ BlockDriverState *bs, uint64_t perm, uint64_t shared);
+
+ /*
+ * Called to inform the driver that after a previous bdrv_check_perm()
+ * call, the permission update is not performed and any preparations made
+ * for it (e.g. taken file locks) need to be undone.
+ *
+ * This function can be called even for nodes that never saw a
+ * bdrv_check_perm() call. It is a no-op then.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_abort_perm_update)(BlockDriverState *bs);
+
+ /**
+ * Returns in @nperm and @nshared the permissions that the driver for @bs
+ * needs on its child @c, based on the cumulative permissions requested by
+ * the parents in @parent_perm and @parent_shared.
+ *
+ * If @c is NULL, return the permissions for attaching a new child for the
+ * given @child_class and @role.
+ *
+ * If @reopen_queue is non-NULL, don't return the currently needed
+ * permissions, but those that will be needed after applying the
+ * @reopen_queue.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_child_perm)(
+ BlockDriverState *bs, BdrvChild *c, BdrvChildRole role,
+ BlockReopenQueue *reopen_queue,
+ uint64_t parent_perm, uint64_t parent_shared,
+ uint64_t *nperm, uint64_t *nshared);
+
+ /**
+ * Register/unregister a buffer for I/O. For example, when the driver is
+ * interested to know the memory areas that will later be used in iovs, so
+ * that it can do IOMMU mapping with VFIO etc., in order to get better
+ * performance. In the case of VFIO drivers, this callback is used to do
+ * DMA mapping for hot buffers.
+ *
+ * Returns: true on success, false on failure
+ */
+ bool GRAPH_RDLOCK_PTR (*bdrv_register_buf)(
+ BlockDriverState *bs, void *host, size_t size, Error **errp);
+ void GRAPH_RDLOCK_PTR (*bdrv_unregister_buf)(
+ BlockDriverState *bs, void *host, size_t size);
+
+ /*
+ * This field is modified only under the BQL, and is part of
+ * the global state.
+ */
+ QLIST_ENTRY(BlockDriver) list;
+
+ /*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+ int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_amend)(
+ BlockDriverState *bs, BlockdevAmendOptions *opts, bool force,
+ Error **errp);
+
+ /* aio */
+ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_preadv)(BlockDriverState *bs,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque);
+
+ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_pwritev)(BlockDriverState *bs,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque);
+
+ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_flush)(
+ BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque);
+
+ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_pdiscard)(
+ BlockDriverState *bs, int64_t offset, int bytes,
+ BlockCompletionFunc *cb, void *opaque);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_readv)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+
+ /**
+ * @offset: position in bytes to read at
+ * @bytes: number of bytes to read
+ * @qiov: the buffers to fill with read data
+ * @flags: currently unused, always 0
+ *
+ * @offset and @bytes will be a multiple of 'request_alignment',
+ * but the length of individual @qiov elements does not have to
+ * be a multiple.
+ *
+ * @bytes will always equal the total size of @qiov, and will be
+ * no larger than 'max_transfer'.
+ *
+ * The buffer in @qiov may point directly to guest memory.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_preadv)(BlockDriverState *bs,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_preadv_part)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_writev)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+ int flags);
+ /**
+ * @offset: position in bytes to write at
+ * @bytes: number of bytes to write
+ * @qiov: the buffers containing data to write
+ * @flags: zero or more bits allowed by 'supported_write_flags'
+ *
+ * @offset and @bytes will be a multiple of 'request_alignment',
+ * but the length of individual @qiov elements does not have to
+ * be a multiple.
+ *
+ * @bytes will always equal the total size of @qiov, and will be
+ * no larger than 'max_transfer'.
+ *
+ * The buffer in @qiov may point directly to guest memory.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwritev)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwritev_part)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset, BdrvRequestFlags flags);
+
+ /*
+ * Efficiently zero a region of the disk image. Typically an image format
+ * would use a compact metadata representation to implement this. This
+ * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
+ * will be called instead.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwrite_zeroes)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ BdrvRequestFlags flags);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pdiscard)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+ /*
+ * Map [offset, offset + nbytes) range onto a child of @bs to copy from,
+ * and invoke bdrv_co_copy_range_from(child, ...), or invoke
+ * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from.
+ *
+ * See the comment of bdrv_co_copy_range for the parameter and return value
+ * semantics.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_copy_range_from)(
+ BlockDriverState *bs, BdrvChild *src, int64_t offset,
+ BdrvChild *dst, int64_t dst_offset, int64_t bytes,
+ BdrvRequestFlags read_flags, BdrvRequestFlags write_flags);
+
+ /*
+ * Map [offset, offset + nbytes) range onto a child of bs to copy data to,
+ * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy
+ * operation if @bs is the leaf and @src has the same BlockDriver. Return
+ * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver.
+ *
+ * See the comment of bdrv_co_copy_range for the parameter and return value
+ * semantics.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_copy_range_to)(
+ BlockDriverState *bs, BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset, int64_t bytes,
+ BdrvRequestFlags read_flags, BdrvRequestFlags write_flags);
+
+ /*
+ * Building block for bdrv_block_status[_above] and
+ * bdrv_is_allocated[_above]. The driver should answer only
+ * according to the current layer, and should only need to set
+ * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID,
+ * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing
+ * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See
+ * block.h for the overall meaning of the bits. As a hint, the
+ * flag want_zero is true if the caller cares more about precise
+ * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for
+ * overall allocation (favor larger *pnum, perhaps by reporting
+ * _DATA instead of _ZERO). The block layer guarantees input
+ * clamped to bdrv_getlength() and aligned to request_alignment,
+ * as well as non-NULL pnum, map, and file; in turn, the driver
+ * must return an error or set pnum to an aligned non-zero value.
+ *
+ * Note that @bytes is just a hint on how big of a region the
+ * caller wants to inspect. It is not a limit on *pnum.
+ * Implementations are free to return larger values of *pnum if
+ * doing so does not incur a performance penalty.
+ *
+ * block/io.c's bdrv_co_block_status() will utilize an unclamped
+ * *pnum value for the block-status cache on protocol nodes, prior
+ * to clamping *pnum for return to its caller.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_block_status)(
+ BlockDriverState *bs,
+ bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file);
+
+ /*
+ * Snapshot-access API.
+ *
+ * Block-driver may provide snapshot-access API: special functions to access
+ * some internal "snapshot". The functions are similar with normal
+ * read/block_status/discard handler, but don't have any specific handling
+ * in generic block-layer: no serializing, no alignment, no tracked
+ * requests. So, block-driver that realizes these APIs is fully responsible
+ * for synchronization between snapshot-access API and normal IO requests.
+ *
+ * TODO: To be able to support qcow2's internal snapshots, this API will
+ * need to be extended to:
+ * - be able to select a specific snapshot
+ * - receive the snapshot's actual length (which may differ from bs's
+ * length)
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_preadv_snapshot)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_snapshot_block_status)(
+ BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map, BlockDriverState **file);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pdiscard_snapshot)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+ /*
+ * Invalidate any cached meta-data.
+ */
+ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_invalidate_cache)(
+ BlockDriverState *bs, Error **errp);
+
+ /*
+ * Flushes all data for all layers by calling bdrv_co_flush for underlying
+ * layers, if needed. This function is needed for deterministic
+ * synchronization of the flush finishing callback.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_flush)(BlockDriverState *bs);
+
+ /* Delete a created file. */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_delete_file)(
+ BlockDriverState *bs, Error **errp);
+
+ /*
+ * Flushes all data that was already written to the OS all the way down to
+ * the disk (for example file-posix.c calls fsync()).
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_flush_to_disk)(
+ BlockDriverState *bs);
+
+ /*
+ * Flushes all internal caches to the OS. The data may still sit in a
+ * writeback cache of the host OS, but it will survive a crash of the qemu
+ * process.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_flush_to_os)(
+ BlockDriverState *bs);
+
+ /*
+ * Truncate @bs to @offset bytes using the given @prealloc mode
+ * when growing. Modes other than PREALLOC_MODE_OFF should be
+ * rejected when shrinking @bs.
+ *
+ * If @exact is true, @bs must be resized to exactly @offset.
+ * Otherwise, it is sufficient for @bs (if it is a host block
+ * device and thus there is no way to resize it) to be at least
+ * @offset bytes in length.
+ *
+ * If @exact is true and this function fails but would succeed
+ * with @exact = false, it should return -ENOTSUP.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_truncate)(
+ BlockDriverState *bs, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
+
+ int64_t coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_getlength)(
+ BlockDriverState *bs);
+
+ int64_t coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_get_allocated_file_size)(
+ BlockDriverState *bs);
+
+ BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs,
+ Error **errp);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwritev_compressed)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwritev_compressed_part)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_get_info)(
+ BlockDriverState *bs, BlockDriverInfo *bdi);
+
+ ImageInfoSpecific * GRAPH_RDLOCK_PTR (*bdrv_get_specific_info)(
+ BlockDriverState *bs, Error **errp);
+ BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_save_vmstate)(
+ BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_load_vmstate)(
+ BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
+
+ int coroutine_fn (*bdrv_co_zone_report)(BlockDriverState *bs,
+ int64_t offset, unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+ int coroutine_fn (*bdrv_co_zone_mgmt)(BlockDriverState *bs, BlockZoneOp op,
+ int64_t offset, int64_t len);
+ int coroutine_fn (*bdrv_co_zone_append)(BlockDriverState *bs,
+ int64_t *offset, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+ /* removable device specific */
+ bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_is_inserted)(
+ BlockDriverState *bs);
+ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_eject)(
+ BlockDriverState *bs, bool eject_flag);
+ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_lock_medium)(
+ BlockDriverState *bs, bool locked);
+
+ /* to control generic scsi devices */
+ BlockAIOCB *coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_aio_ioctl)(
+ BlockDriverState *bs, unsigned long int req, void *buf,
+ BlockCompletionFunc *cb, void *opaque);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_ioctl)(
+ BlockDriverState *bs, unsigned long int req, void *buf);
+
+ /*
+ * Returns 0 for completed check, -errno for internal errors.
+ * The check results are stored in result.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_check)(
+ BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix);
+
+ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_debug_event)(
+ BlockDriverState *bs, BlkdebugEvent event);
+
+ bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs);
+
+ bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_can_store_new_dirty_bitmap)(
+ BlockDriverState *bs, const char *name, uint32_t granularity,
+ Error **errp);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_remove_persistent_dirty_bitmap)(
+ BlockDriverState *bs, const char *name, Error **errp);
+};
+
+static inline bool TSA_NO_TSA block_driver_can_compress(BlockDriver *drv)
+{
+ return drv->bdrv_co_pwritev_compressed ||
+ drv->bdrv_co_pwritev_compressed_part;
+}
+
+typedef struct BlockLimits {
+ /*
+ * Alignment requirement, in bytes, for offset/length of I/O
+ * requests. Must be a power of 2 less than INT_MAX; defaults to
+ * 1 for drivers with modern byte interfaces, and to 512
+ * otherwise.
+ */
+ uint32_t request_alignment;
+
+ /*
+ * Maximum number of bytes that can be discarded at once. Must be multiple
+ * of pdiscard_alignment, but need not be power of 2. May be 0 if no
+ * inherent 64-bit limit.
+ */
+ int64_t max_pdiscard;
+
+ /*
+ * Optimal alignment for discard requests in bytes. A power of 2
+ * is best but not mandatory. Must be a multiple of
+ * bl.request_alignment, and must be less than max_pdiscard if
+ * that is set. May be 0 if bl.request_alignment is good enough
+ */
+ uint32_t pdiscard_alignment;
+
+ /*
+ * Maximum number of bytes that can zeroized at once. Must be multiple of
+ * pwrite_zeroes_alignment. 0 means no limit.
+ */
+ int64_t max_pwrite_zeroes;
+
+ /*
+ * Optimal alignment for write zeroes requests in bytes. A power
+ * of 2 is best but not mandatory. Must be a multiple of
+ * bl.request_alignment, and must be less than max_pwrite_zeroes
+ * if that is set. May be 0 if bl.request_alignment is good
+ * enough
+ */
+ uint32_t pwrite_zeroes_alignment;
+
+ /*
+ * Optimal transfer length in bytes. A power of 2 is best but not
+ * mandatory. Must be a multiple of bl.request_alignment, or 0 if
+ * no preferred size
+ */
+ uint32_t opt_transfer;
+
+ /*
+ * Maximal transfer length in bytes. Need not be power of 2, but
+ * must be multiple of opt_transfer and bl.request_alignment, or 0
+ * for no 32-bit limit. For now, anything larger than INT_MAX is
+ * clamped down.
+ */
+ uint32_t max_transfer;
+
+ /*
+ * Maximal hardware transfer length in bytes. Applies whenever
+ * transfers to the device bypass the kernel I/O scheduler, for
+ * example with SG_IO. If larger than max_transfer or if zero,
+ * blk_get_max_hw_transfer will fall back to max_transfer.
+ */
+ uint64_t max_hw_transfer;
+
+ /*
+ * Maximal number of scatter/gather elements allowed by the hardware.
+ * Applies whenever transfers to the device bypass the kernel I/O
+ * scheduler, for example with SG_IO. If larger than max_iov
+ * or if zero, blk_get_max_hw_iov will fall back to max_iov.
+ */
+ int max_hw_iov;
+
+
+ /* memory alignment, in bytes so that no bounce buffer is needed */
+ size_t min_mem_alignment;
+
+ /* memory alignment, in bytes, for bounce buffer */
+ size_t opt_mem_alignment;
+
+ /* maximum number of iovec elements */
+ int max_iov;
+
+ /*
+ * true if the length of the underlying file can change, and QEMU
+ * is expected to adjust automatically. Mostly for CD-ROM drives,
+ * whose length is zero when the tray is empty (they don't need
+ * an explicit monitor command to load the disk inside the guest).
+ */
+ bool has_variable_length;
+
+ /* device zone model */
+ BlockZoneModel zoned;
+
+ /* zone size expressed in bytes */
+ uint32_t zone_size;
+
+ /* total number of zones */
+ uint32_t nr_zones;
+
+ /* maximum sectors of a zone append write operation */
+ uint32_t max_append_sectors;
+
+ /* maximum number of open zones */
+ uint32_t max_open_zones;
+
+ /* maximum number of active zones */
+ uint32_t max_active_zones;
+
+ uint32_t write_granularity;
+} BlockLimits;
+
+typedef struct BdrvOpBlocker BdrvOpBlocker;
+
+typedef struct BdrvAioNotifier {
+ void (*attached_aio_context)(AioContext *new_context, void *opaque);
+ void (*detach_aio_context)(void *opaque);
+
+ void *opaque;
+ bool deleted;
+
+ QLIST_ENTRY(BdrvAioNotifier) list;
+} BdrvAioNotifier;
+
+struct BdrvChildClass {
+ /*
+ * If true, bdrv_replace_node() doesn't change the node this BdrvChild
+ * points to.
+ */
+ bool stay_at_node;
+
+ /*
+ * If true, the parent is a BlockDriverState and bdrv_next_all_states()
+ * will return it. This information is used for drain_all, where every node
+ * will be drained separately, so the drain only needs to be propagated to
+ * non-BDS parents.
+ */
+ bool parent_is_bds;
+
+ /*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+ void (*inherit_options)(BdrvChildRole role, bool parent_is_format,
+ int *child_flags, QDict *child_options,
+ int parent_flags, QDict *parent_options);
+ void (*change_media)(BdrvChild *child, bool load);
+
+ /*
+ * Returns a malloced string that describes the parent of the child for a
+ * human reader. This could be a node-name, BlockBackend name, qdev ID or
+ * QOM path of the device owning the BlockBackend, job type and ID etc. The
+ * caller is responsible for freeing the memory.
+ */
+ char *(*get_parent_desc)(BdrvChild *child);
+
+ /*
+ * Notifies the parent that the child has been activated/inactivated (e.g.
+ * when migration is completing) and it can start/stop requesting
+ * permissions and doing I/O on it.
+ */
+ void GRAPH_RDLOCK_PTR (*activate)(BdrvChild *child, Error **errp);
+ int GRAPH_RDLOCK_PTR (*inactivate)(BdrvChild *child);
+
+ void GRAPH_WRLOCK_PTR (*attach)(BdrvChild *child);
+ void GRAPH_WRLOCK_PTR (*detach)(BdrvChild *child);
+
+ /*
+ * If this pair of functions is implemented, the parent doesn't issue new
+ * requests after returning from .drained_begin() until .drained_end() is
+ * called.
+ *
+ * These functions must not change the graph (and therefore also must not
+ * call aio_poll(), which could change the graph indirectly).
+ *
+ * Note that this can be nested. If drained_begin() was called twice, new
+ * I/O is allowed only after drained_end() was called twice, too.
+ */
+ void GRAPH_RDLOCK_PTR (*drained_begin)(BdrvChild *child);
+ void GRAPH_RDLOCK_PTR (*drained_end)(BdrvChild *child);
+
+ /*
+ * Returns whether the parent has pending requests for the child. This
+ * callback is polled after .drained_begin() has been called until all
+ * activity on the child has stopped.
+ */
+ bool GRAPH_RDLOCK_PTR (*drained_poll)(BdrvChild *child);
+
+ /*
+ * Notifies the parent that the filename of its child has changed (e.g.
+ * because the direct child was removed from the backing chain), so that it
+ * can update its reference.
+ */
+ int (*update_filename)(BdrvChild *child, BlockDriverState *new_base,
+ const char *filename,
+ bool backing_mask_protocol,
+ Error **errp);
+
+ bool (*change_aio_ctx)(BdrvChild *child, AioContext *ctx,
+ GHashTable *visited, Transaction *tran,
+ Error **errp);
+
+ /*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+ void (*resize)(BdrvChild *child);
+
+ /*
+ * Returns a name that is supposedly more useful for human users than the
+ * node name for identifying the node in question (in particular, a BB
+ * name), or NULL if the parent can't provide a better name.
+ */
+ const char *(*get_name)(BdrvChild *child);
+
+ AioContext *(*get_parent_aio_context)(BdrvChild *child);
+};
+
+extern const BdrvChildClass child_of_bds;
+
+struct BdrvChild {
+ BlockDriverState *bs;
+ char *name;
+ const BdrvChildClass *klass;
+ BdrvChildRole role;
+ void *opaque;
+
+ /**
+ * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask)
+ */
+ uint64_t perm;
+
+ /**
+ * Permissions that can still be granted to other users of @bs while this
+ * BdrvChild is still attached to it. (BLK_PERM_* bitmask)
+ */
+ uint64_t shared_perm;
+
+ /*
+ * This link is frozen: the child can neither be replaced nor
+ * detached from the parent.
+ */
+ bool frozen;
+
+ /*
+ * True if the parent of this child has been drained by this BdrvChild
+ * (through klass->drained_*).
+ *
+ * It is generally true if bs->quiesce_counter > 0. It may differ while the
+ * child is entering or leaving a drained section.
+ */
+ bool quiesced_parent;
+
+ QLIST_ENTRY(BdrvChild GRAPH_RDLOCK_PTR) next;
+ QLIST_ENTRY(BdrvChild GRAPH_RDLOCK_PTR) next_parent;
+};
+
+/*
+ * Allows bdrv_co_block_status() to cache one data region for a
+ * protocol node.
+ *
+ * @valid: Whether the cache is valid (should be accessed with atomic
+ * functions so this can be reset by RCU readers)
+ * @data_start: Offset where we know (or strongly assume) is data
+ * @data_end: Offset where the data region ends (which is not necessarily
+ * the start of a zeroed region)
+ */
+typedef struct BdrvBlockStatusCache {
+ struct rcu_head rcu;
+
+ bool valid;
+ int64_t data_start;
+ int64_t data_end;
+} BdrvBlockStatusCache;
+
+struct BlockDriverState {
+ /*
+ * Protected by big QEMU lock or read-only after opening. No special
+ * locking needed during I/O...
+ */
+ int open_flags; /* flags used to open the file, re-used for re-open */
+ bool encrypted; /* if true, the media is encrypted */
+ bool sg; /* if true, the device is a /dev/sg* */
+ bool probed; /* if true, format was probed rather than specified */
+ bool force_share; /* if true, always allow all shared permissions */
+ bool implicit; /* if true, this filter node was automatically inserted */
+
+ BlockDriver *drv; /* NULL means no media */
+ void *opaque;
+
+ AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
+ /*
+ * long-running tasks intended to always use the same AioContext as this
+ * BDS may register themselves in this list to be notified of changes
+ * regarding this BDS's context
+ */
+ QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
+ bool walking_aio_notifiers; /* to make removal during iteration safe */
+
+ char filename[PATH_MAX];
+ /*
+ * If not empty, this image is a diff in relation to backing_file.
+ * Note that this is the name given in the image header and
+ * therefore may or may not be equal to .backing->bs->filename.
+ * If this field contains a relative path, it is to be resolved
+ * relatively to the overlay's location.
+ */
+ char backing_file[PATH_MAX];
+ /*
+ * The backing filename indicated by the image header. Contrary
+ * to backing_file, if we ever open this file, auto_backing_file
+ * is replaced by the resulting BDS's filename (i.e. after a
+ * bdrv_refresh_filename() run).
+ */
+ char auto_backing_file[PATH_MAX];
+ char backing_format[16]; /* if non-zero and backing_file exists */
+
+ QDict *full_open_options;
+ char exact_filename[PATH_MAX];
+
+ /* I/O Limits */
+ BlockLimits bl;
+
+ /*
+ * Flags honored during pread
+ */
+ BdrvRequestFlags supported_read_flags;
+ /*
+ * Flags honored during pwrite (so far: BDRV_REQ_FUA,
+ * BDRV_REQ_WRITE_UNCHANGED).
+ * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those
+ * writes will be issued as normal writes without the flag set.
+ * This is important to note for drivers that do not explicitly
+ * request a WRITE permission for their children and instead take
+ * the same permissions as their parent did (this is commonly what
+ * block filters do). Such drivers have to be aware that the
+ * parent may have taken a WRITE_UNCHANGED permission only and is
+ * issuing such requests. Drivers either must make sure that
+ * these requests do not result in plain WRITE accesses (usually
+ * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding
+ * every incoming write request as-is, including potentially that
+ * flag), or they have to explicitly take the WRITE permission for
+ * their children.
+ */
+ BdrvRequestFlags supported_write_flags;
+ /*
+ * Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
+ * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED)
+ */
+ BdrvRequestFlags supported_zero_flags;
+ /*
+ * Flags honoured during truncate (so far: BDRV_REQ_ZERO_WRITE).
+ *
+ * If BDRV_REQ_ZERO_WRITE is given, the truncate operation must make sure
+ * that any added space reads as all zeros. If this can't be guaranteed,
+ * the operation must fail.
+ */
+ BdrvRequestFlags supported_truncate_flags;
+
+ /* the following member gives a name to every node on the bs graph. */
+ char node_name[32];
+ /* element of the list of named nodes building the graph */
+ QTAILQ_ENTRY(BlockDriverState) node_list;
+ /* element of the list of all BlockDriverStates (all_bdrv_states) */
+ QTAILQ_ENTRY(BlockDriverState) bs_list;
+ /* element of the list of monitor-owned BDS */
+ QTAILQ_ENTRY(BlockDriverState) monitor_list;
+ int refcnt;
+
+ /* operation blockers. Protected by BQL. */
+ QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
+
+ /*
+ * The node that this node inherited default options from (and a reopen on
+ * which can affect this node by changing these defaults). This is always a
+ * parent node of this node.
+ */
+ BlockDriverState *inherits_from;
+
+ /*
+ * @backing and @file are some of @children or NULL. All these three fields
+ * (@file, @backing and @children) are modified only in
+ * bdrv_child_cb_attach() and bdrv_child_cb_detach().
+ *
+ * See also comment in include/block/block.h, to learn how backing and file
+ * are connected with BdrvChildRole.
+ */
+ QLIST_HEAD(, BdrvChild GRAPH_RDLOCK_PTR) children;
+ BdrvChild * GRAPH_RDLOCK_PTR backing;
+ BdrvChild * GRAPH_RDLOCK_PTR file;
+
+ QLIST_HEAD(, BdrvChild GRAPH_RDLOCK_PTR) parents;
+
+ QDict *options;
+ QDict *explicit_options;
+ BlockdevDetectZeroesOptions detect_zeroes;
+
+ /* The error object in use for blocking operations on backing_hd */
+ Error *backing_blocker;
+
+ /*
+ * If we are reading a disk image, give its size in sectors.
+ * Generally read-only; it is written to by load_snapshot and
+ * save_snaphost, but the block layer is quiescent during those.
+ */
+ int64_t total_sectors;
+
+ /* threshold limit for writes, in bytes. "High water mark". */
+ uint64_t write_threshold_offset;
+
+ /*
+ * Writing to the list requires the BQL _and_ the dirty_bitmap_mutex.
+ * Reading from the list can be done with either the BQL or the
+ * dirty_bitmap_mutex. Modifying a bitmap only requires
+ * dirty_bitmap_mutex.
+ */
+ QemuMutex dirty_bitmap_mutex;
+ QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
+
+ /* Offset after the highest byte written to */
+ Stat64 wr_highest_offset;
+
+ /*
+ * If true, copy read backing sectors into image. Can be >1 if more
+ * than one client has requested copy-on-read. Accessed with atomic
+ * ops.
+ */
+ int copy_on_read;
+
+ /*
+ * number of in-flight requests; overall and serialising.
+ * Accessed with atomic ops.
+ */
+ unsigned int in_flight;
+ unsigned int serialising_in_flight;
+
+ /* do we need to tell the quest if we have a volatile write cache? */
+ int enable_write_cache;
+
+ /* Accessed with atomic ops. */
+ int quiesce_counter;
+
+ unsigned int write_gen; /* Current data generation */
+
+ /* Protected by reqs_lock. */
+ QemuMutex reqs_lock;
+ QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
+ CoQueue flush_queue; /* Serializing flush queue */
+ bool active_flush_req; /* Flush request in flight? */
+
+ /* Only read/written by whoever has set active_flush_req to true. */
+ unsigned int flushed_gen; /* Flushed write generation */
+
+ /* BdrvChild links to this node may never be frozen */
+ bool never_freeze;
+
+ /* Lock for block-status cache RCU writers */
+ CoMutex bsc_modify_lock;
+ /* Always non-NULL, but must only be dereferenced under an RCU read guard */
+ BdrvBlockStatusCache *block_status_cache;
+
+ /* array of write pointers' location of each zone in the zoned device. */
+ BlockZoneWps *wps;
+};
+
+struct BlockBackendRootState {
+ int open_flags;
+ BlockdevDetectZeroesOptions detect_zeroes;
+};
+
+typedef enum BlockMirrorBackingMode {
+ /*
+ * Reuse the existing backing chain from the source for the target.
+ * - sync=full: Set backing BDS to NULL.
+ * - sync=top: Use source's backing BDS.
+ * - sync=none: Use source as the backing BDS.
+ */
+ MIRROR_SOURCE_BACKING_CHAIN,
+
+ /* Open the target's backing chain completely anew */
+ MIRROR_OPEN_BACKING_CHAIN,
+
+ /* Do not change the target's backing BDS after job completion */
+ MIRROR_LEAVE_BACKING_CHAIN,
+} BlockMirrorBackingMode;
+
+
+/*
+ * Essential block drivers which must always be statically linked into qemu, and
+ * which therefore can be accessed without using bdrv_find_format()
+ */
+extern BlockDriver bdrv_file;
+extern BlockDriver bdrv_raw;
+extern BlockDriver bdrv_qcow2;
+
+extern unsigned int bdrv_drain_all_count;
+extern QemuOptsList bdrv_create_opts_simple;
+
+/*
+ * Common functions that are neither I/O nor Global State.
+ *
+ * See include/block/block-common.h for more information about
+ * the Common API.
+ */
+
+static inline BlockDriverState *child_bs(BdrvChild *child)
+{
+ return child ? child->bs : NULL;
+}
+
+int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp);
+char *create_tmp_file(Error **errp);
+void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix,
+ QDict *options);
+
+
+int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ Error **errp);
+
+#ifdef _WIN32
+int is_windows_drive(const char *filename);
+#endif
+
+#endif /* BLOCK_INT_COMMON_H */
diff --git a/include/block/block_int-global-state.h b/include/block/block_int-global-state.h
new file mode 100644
index 0000000000..d2201e27f4
--- /dev/null
+++ b/include/block/block_int-global-state.h
@@ -0,0 +1,326 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef BLOCK_INT_GLOBAL_STATE_H
+#define BLOCK_INT_GLOBAL_STATE_H
+
+#include "block/blockjob.h"
+#include "block/block_int-common.h"
+#include "qemu/hbitmap.h"
+#include "qemu/main-loop.h"
+
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+/**
+ * stream_start:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Block device to operate on.
+ * @base: Block device that will become the new base, or %NULL to
+ * flatten the whole backing file chain onto @bs.
+ * @backing_file_str: The file name that will be written to @bs as the
+ * the new backing file if the job completes. Ignored if @base is %NULL.
+ * @backing_mask_protocol: Replace potential protocol name with 'raw' in
+ * 'backing file format' header
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @on_error: The action to take upon error.
+ * @filter_node_name: The node name that should be assigned to the filter
+ * driver that the stream job inserts into the graph above
+ * @bs. NULL means that a node name should be autogenerated.
+ * @errp: Error object.
+ *
+ * Start a streaming operation on @bs. Clusters that are unallocated
+ * in @bs, but allocated in any image between @base and @bs (both
+ * exclusive) will be written to @bs. At the end of a successful
+ * streaming job, the backing file of @bs will be changed to
+ * @backing_file_str in the written image and to @base in the live
+ * BlockDriverState.
+ */
+void stream_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, const char *backing_file_str,
+ bool backing_mask_protocol,
+ BlockDriverState *bottom,
+ int creation_flags, int64_t speed,
+ BlockdevOnError on_error,
+ const char *filter_node_name,
+ Error **errp);
+
+/**
+ * commit_start:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Active block device.
+ * @top: Top block device to be committed.
+ * @base: Block device that will be written into, and become the new top.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @on_error: The action to take upon error.
+ * @backing_file_str: String to use as the backing file in @top's overlay
+ * @backing_mask_protocol: Replace potential protocol name with 'raw' in
+ * 'backing file format' header
+ * @filter_node_name: The node name that should be assigned to the filter
+ * driver that the commit job inserts into the graph above @top. NULL means
+ * that a node name should be autogenerated.
+ * @errp: Error object.
+ *
+ */
+void commit_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, BlockDriverState *top,
+ int creation_flags, int64_t speed,
+ BlockdevOnError on_error, const char *backing_file_str,
+ bool backing_mask_protocol,
+ const char *filter_node_name, Error **errp);
+/**
+ * commit_active_start:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Active block device to be committed.
+ * @base: Block device that will be written into, and become the new top.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @on_error: The action to take upon error.
+ * @filter_node_name: The node name that should be assigned to the filter
+ * driver that the commit job inserts into the graph above @bs. NULL means that
+ * a node name should be autogenerated.
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @auto_complete: Auto complete the job.
+ * @errp: Error object.
+ *
+ */
+BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, int creation_flags,
+ int64_t speed, BlockdevOnError on_error,
+ const char *filter_node_name,
+ BlockCompletionFunc *cb, void *opaque,
+ bool auto_complete, Error **errp);
+/*
+ * mirror_start:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Block device to operate on.
+ * @target: Block device to write to.
+ * @replaces: Block graph node name to replace once the mirror is done. Can
+ * only be used when full mirroring is selected.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @granularity: The chosen granularity for the dirty bitmap.
+ * @buf_size: The amount of data that can be in flight at one time.
+ * @mode: Whether to collapse all images in the chain to the target.
+ * @backing_mode: How to establish the target's backing chain after completion.
+ * @zero_target: Whether the target should be explicitly zero-initialized
+ * @on_source_error: The action to take upon error reading from the source.
+ * @on_target_error: The action to take upon error writing to the target.
+ * @unmap: Whether to unmap target where source sectors only contain zeroes.
+ * @filter_node_name: The node name that should be assigned to the filter
+ * driver that the mirror job inserts into the graph above @bs. NULL means that
+ * a node name should be autogenerated.
+ * @copy_mode: When to trigger writes to the target.
+ * @errp: Error object.
+ *
+ * Start a mirroring operation on @bs. Clusters that are allocated
+ * in @bs will be written to @target until the job is cancelled or
+ * manually completed. At the end of a successful mirroring job,
+ * @bs will be switched to read from @target.
+ */
+void mirror_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *target, const char *replaces,
+ int creation_flags, int64_t speed,
+ uint32_t granularity, int64_t buf_size,
+ MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
+ bool zero_target,
+ BlockdevOnError on_source_error,
+ BlockdevOnError on_target_error,
+ bool unmap, const char *filter_node_name,
+ MirrorCopyMode copy_mode, Error **errp);
+
+/*
+ * backup_job_create:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Block device to operate on.
+ * @target: Block device to write to.
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @sync_mode: What parts of the disk image should be copied to the destination.
+ * @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental'
+ * @bitmap_mode: The bitmap synchronization policy to use.
+ * @perf: Performance options. All actual fields assumed to be present,
+ * all ".has_*" fields are ignored.
+ * @on_source_error: The action to take upon error reading from the source.
+ * @on_target_error: The action to take upon error writing to the target.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @txn: Transaction that this job is part of (may be NULL).
+ *
+ * Create a backup operation on @bs. Clusters in @bs are written to @target
+ * until the job is cancelled or manually completed.
+ */
+BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *target, int64_t speed,
+ MirrorSyncMode sync_mode,
+ BdrvDirtyBitmap *sync_bitmap,
+ BitmapSyncMode bitmap_mode,
+ bool compress,
+ const char *filter_node_name,
+ BackupPerf *perf,
+ BlockdevOnError on_source_error,
+ BlockdevOnError on_target_error,
+ int creation_flags,
+ BlockCompletionFunc *cb, void *opaque,
+ JobTxn *txn, Error **errp);
+
+BdrvChild * GRAPH_WRLOCK
+bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name,
+ const BdrvChildClass *child_class,
+ BdrvChildRole child_role,
+ uint64_t perm, uint64_t shared_perm,
+ void *opaque, Error **errp);
+
+void GRAPH_WRLOCK bdrv_root_unref_child(BdrvChild *child);
+
+void GRAPH_RDLOCK bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm,
+ uint64_t *shared_perm);
+
+/**
+ * Sets a BdrvChild's permissions. Avoid if the parent is a BDS; use
+ * bdrv_child_refresh_perms() instead and make the parent's
+ * .bdrv_child_perm() implementation return the correct values.
+ */
+int GRAPH_RDLOCK
+bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
+ Error **errp);
+
+/**
+ * Calls bs->drv->bdrv_child_perm() and updates the child's permission
+ * masks with the result.
+ * Drivers should invoke this function whenever an event occurs that
+ * makes their .bdrv_child_perm() implementation return different
+ * values than before, but which will not result in the block layer
+ * automatically refreshing the permissions.
+ */
+int GRAPH_RDLOCK
+bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp);
+
+bool GRAPH_RDLOCK bdrv_recurse_can_replace(BlockDriverState *bs,
+ BlockDriverState *to_replace);
+
+/*
+ * Default implementation for BlockDriver.bdrv_child_perm() that can
+ * be used by block filters and image formats, as long as they use the
+ * child_of_bds child class and set an appropriate BdrvChildRole.
+ */
+void bdrv_default_perms(BlockDriverState *bs, BdrvChild *c,
+ BdrvChildRole role, BlockReopenQueue *reopen_queue,
+ uint64_t perm, uint64_t shared,
+ uint64_t *nperm, uint64_t *nshared);
+
+void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp);
+bool blk_dev_has_removable_media(BlockBackend *blk);
+void blk_dev_eject_request(BlockBackend *blk, bool force);
+bool blk_dev_is_medium_locked(BlockBackend *blk);
+
+void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup);
+
+void bdrv_set_monitor_owned(BlockDriverState *bs);
+
+void blockdev_close_all_bdrv_states(void);
+
+BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp);
+
+/**
+ * Simple implementation of bdrv_co_create_opts for protocol drivers
+ * which only support creation via opening a file
+ * (usually existing raw storage device)
+ */
+int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv,
+ const char *filename,
+ QemuOpts *opts,
+ Error **errp);
+
+BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
+ const char *name,
+ BlockDriverState **pbs,
+ Error **errp);
+BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
+ BlockDirtyBitmapOrStrList *bms,
+ HBitmap **backup, Error **errp);
+BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
+ bool release,
+ BlockDriverState **bitmap_bs,
+ Error **errp);
+
+
+BlockDriverState * GRAPH_RDLOCK
+bdrv_skip_implicit_filters(BlockDriverState *bs);
+
+/**
+ * bdrv_add_aio_context_notifier:
+ *
+ * If a long-running job intends to be always run in the same AioContext as a
+ * certain BDS, it may use this function to be notified of changes regarding the
+ * association of the BDS to an AioContext.
+ *
+ * attached_aio_context() is called after the target BDS has been attached to a
+ * new AioContext; detach_aio_context() is called before the target BDS is being
+ * detached from its old AioContext.
+ */
+void bdrv_add_aio_context_notifier(BlockDriverState *bs,
+ void (*attached_aio_context)(AioContext *new_context, void *opaque),
+ void (*detach_aio_context)(void *opaque), void *opaque);
+
+/**
+ * bdrv_remove_aio_context_notifier:
+ *
+ * Unsubscribe of change notifications regarding the BDS's AioContext. The
+ * parameters given here have to be the same as those given to
+ * bdrv_add_aio_context_notifier().
+ */
+void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
+ void (*aio_context_attached)(AioContext *,
+ void *),
+ void (*aio_context_detached)(void *),
+ void *opaque);
+
+/**
+ * End all quiescent sections started by bdrv_drain_all_begin(). This is
+ * needed when deleting a BDS before bdrv_drain_all_end() is called.
+ *
+ * NOTE: this is an internal helper for bdrv_close() *only*. No one else
+ * should call it.
+ */
+void bdrv_drain_all_end_quiesce(BlockDriverState *bs);
+
+#endif /* BLOCK_INT_GLOBAL_STATE_H */
diff --git a/include/block/block_int-io.h b/include/block/block_int-io.h
new file mode 100644
index 0000000000..4a7cf2b4fd
--- /dev/null
+++ b/include/block/block_int-io.h
@@ -0,0 +1,194 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_INT_IO_H
+#define BLOCK_INT_IO_H
+
+#include "block/block_int-common.h"
+#include "qemu/hbitmap.h"
+#include "qemu/main-loop.h"
+
+/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+int coroutine_fn GRAPH_RDLOCK bdrv_co_preadv_snapshot(BdrvChild *child,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_snapshot_block_status(
+ BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map, BlockDriverState **file);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_pdiscard_snapshot(BlockDriverState *bs,
+ int64_t offset, int64_t bytes);
+
+
+int coroutine_fn GRAPH_RDLOCK bdrv_co_preadv(BdrvChild *child,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_preadv_part(BdrvChild *child,
+ int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_pwritev(BdrvChild *child,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_pwritev_part(BdrvChild *child,
+ int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
+
+static inline int coroutine_fn GRAPH_RDLOCK bdrv_co_pread(BdrvChild *child,
+ int64_t offset, int64_t bytes, void *buf, BdrvRequestFlags flags)
+{
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
+ assert_bdrv_graph_readable();
+
+ return bdrv_co_preadv(child, offset, bytes, &qiov, flags);
+}
+
+static inline int coroutine_fn GRAPH_RDLOCK bdrv_co_pwrite(BdrvChild *child,
+ int64_t offset, int64_t bytes, const void *buf, BdrvRequestFlags flags)
+{
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
+ assert_bdrv_graph_readable();
+
+ return bdrv_co_pwritev(child, offset, bytes, &qiov, flags);
+}
+
+void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
+ uint64_t align);
+BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
+
+BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
+ const char *filename);
+
+/**
+ * bdrv_wakeup:
+ * @bs: The BlockDriverState for which an I/O operation has been completed.
+ *
+ * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During
+ * synchronous I/O on a BlockDriverState that is attached to another
+ * I/O thread, the main thread lets the I/O thread's event loop run,
+ * waiting for the I/O operation to complete. A bdrv_wakeup will wake
+ * up the main thread if necessary.
+ *
+ * Manual calls to bdrv_wakeup are rarely necessary, because
+ * bdrv_dec_in_flight already calls it.
+ */
+void bdrv_wakeup(BlockDriverState *bs);
+
+const char * GRAPH_RDLOCK bdrv_get_parent_name(const BlockDriverState *bs);
+bool blk_dev_has_tray(BlockBackend *blk);
+bool blk_dev_is_tray_open(BlockBackend *blk);
+
+void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
+void bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
+ const BdrvDirtyBitmap *src,
+ HBitmap **backup, bool lock);
+
+void bdrv_inc_in_flight(BlockDriverState *bs);
+void bdrv_dec_in_flight(BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_refresh_total_sectors(BlockDriverState *bs, int64_t hint);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_refresh_total_sectors(BlockDriverState *bs, int64_t hint);
+
+BdrvChild * GRAPH_RDLOCK bdrv_cow_child(BlockDriverState *bs);
+BdrvChild * GRAPH_RDLOCK bdrv_filter_child(BlockDriverState *bs);
+BdrvChild * GRAPH_RDLOCK bdrv_filter_or_cow_child(BlockDriverState *bs);
+BdrvChild * GRAPH_RDLOCK bdrv_primary_child(BlockDriverState *bs);
+BlockDriverState * GRAPH_RDLOCK bdrv_skip_filters(BlockDriverState *bs);
+BlockDriverState * GRAPH_RDLOCK bdrv_backing_chain_next(BlockDriverState *bs);
+
+static inline BlockDriverState * GRAPH_RDLOCK
+bdrv_cow_bs(BlockDriverState *bs)
+{
+ IO_CODE();
+ return child_bs(bdrv_cow_child(bs));
+}
+
+static inline BlockDriverState * GRAPH_RDLOCK
+bdrv_filter_bs(BlockDriverState *bs)
+{
+ IO_CODE();
+ return child_bs(bdrv_filter_child(bs));
+}
+
+static inline BlockDriverState * GRAPH_RDLOCK
+bdrv_filter_or_cow_bs(BlockDriverState *bs)
+{
+ IO_CODE();
+ return child_bs(bdrv_filter_or_cow_child(bs));
+}
+
+static inline BlockDriverState * GRAPH_RDLOCK
+bdrv_primary_bs(BlockDriverState *bs)
+{
+ IO_CODE();
+ return child_bs(bdrv_primary_child(bs));
+}
+
+/**
+ * Check whether the given offset is in the cached block-status data
+ * region.
+ *
+ * If it is, and @pnum is not NULL, *pnum is set to
+ * `bsc.data_end - offset`, i.e. how many bytes, starting from
+ * @offset, are data (according to the cache).
+ * Otherwise, *pnum is not touched.
+ */
+bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum);
+
+/**
+ * If [offset, offset + bytes) overlaps with the currently cached
+ * block-status region, invalidate the cache.
+ *
+ * (To be used by I/O paths that cause data regions to be zero or
+ * holes.)
+ */
+void bdrv_bsc_invalidate_range(BlockDriverState *bs,
+ int64_t offset, int64_t bytes);
+
+/**
+ * Mark the range [offset, offset + bytes) as a data region.
+ */
+void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+#endif /* BLOCK_INT_IO_H */
diff --git a/include/block/block_int.h b/include/block/block_int.h
index f605622216..567a178e13 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -24,1155 +24,10 @@
#ifndef BLOCK_INT_H
#define BLOCK_INT_H
-#include "block/accounting.h"
-#include "block/block.h"
-#include "block/aio-wait.h"
-#include "qemu/queue.h"
-#include "qemu/coroutine.h"
-#include "qemu/stats64.h"
-#include "qemu/timer.h"
-#include "qemu/hbitmap.h"
-#include "block/snapshot.h"
-#include "qemu/main-loop.h"
-#include "qemu/throttle.h"
+#include "block/block_int-global-state.h"
+#include "block/block_int-io.h"
+#include "block/graph-lock.h"
-#define BLOCK_FLAG_LAZY_REFCOUNTS 8
-
-#define BLOCK_OPT_SIZE "size"
-#define BLOCK_OPT_ENCRYPT "encryption"
-#define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format"
-#define BLOCK_OPT_COMPAT6 "compat6"
-#define BLOCK_OPT_HWVERSION "hwversion"
-#define BLOCK_OPT_BACKING_FILE "backing_file"
-#define BLOCK_OPT_BACKING_FMT "backing_fmt"
-#define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
-#define BLOCK_OPT_TABLE_SIZE "table_size"
-#define BLOCK_OPT_PREALLOC "preallocation"
-#define BLOCK_OPT_SUBFMT "subformat"
-#define BLOCK_OPT_COMPAT_LEVEL "compat"
-#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
-#define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
-#define BLOCK_OPT_REDUNDANCY "redundancy"
-#define BLOCK_OPT_NOCOW "nocow"
-#define BLOCK_OPT_OBJECT_SIZE "object_size"
-#define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
-
-#define BLOCK_PROBE_BUF_SIZE 512
-
-enum BdrvTrackedRequestType {
- BDRV_TRACKED_READ,
- BDRV_TRACKED_WRITE,
- BDRV_TRACKED_DISCARD,
- BDRV_TRACKED_TRUNCATE,
-};
-
-typedef struct BdrvTrackedRequest {
- BlockDriverState *bs;
- int64_t offset;
- uint64_t bytes;
- enum BdrvTrackedRequestType type;
-
- bool serialising;
- int64_t overlap_offset;
- uint64_t overlap_bytes;
-
- QLIST_ENTRY(BdrvTrackedRequest) list;
- Coroutine *co; /* owner, used for deadlock detection */
- CoQueue wait_queue; /* coroutines blocked on this request */
-
- struct BdrvTrackedRequest *waiting_for;
-} BdrvTrackedRequest;
-
-struct BlockDriver {
- const char *format_name;
- int instance_size;
-
- /* set to true if the BlockDriver is a block filter. Block filters pass
- * certain callbacks that refer to data (see block.c) to their bs->file if
- * the driver doesn't implement them. Drivers that do not wish to forward
- * must implement them and return -ENOTSUP.
- */
- bool is_filter;
- /* for snapshots block filter like Quorum can implement the
- * following recursive callback.
- * It's purpose is to recurse on the filter children while calling
- * bdrv_recurse_is_first_non_filter on them.
- * For a sample implementation look in the future Quorum block filter.
- */
- bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs,
- BlockDriverState *candidate);
-
- int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
- int (*bdrv_probe_device)(const char *filename);
-
- /* Any driver implementing this callback is expected to be able to handle
- * NULL file names in its .bdrv_open() implementation */
- void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp);
- /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
- * this field set to true, except ones that are defined only by their
- * child's bs.
- * An example of the last type will be the quorum block driver.
- */
- bool bdrv_needs_filename;
-
- /* Set if a driver can support backing files */
- bool supports_backing;
-
- /* For handling image reopen for split or non-split files */
- int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
- BlockReopenQueue *queue, Error **errp);
- void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
- void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
- void (*bdrv_join_options)(QDict *options, QDict *old_options);
-
- int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags,
- Error **errp);
-
- /* Protocol drivers should implement this instead of bdrv_open */
- int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
- Error **errp);
- void (*bdrv_close)(BlockDriverState *bs);
- int coroutine_fn (*bdrv_co_create)(BlockdevCreateOptions *opts,
- Error **errp);
- int coroutine_fn (*bdrv_co_create_opts)(const char *filename,
- QemuOpts *opts,
- Error **errp);
- int (*bdrv_make_empty)(BlockDriverState *bs);
-
- void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options);
-
- /* aio */
- BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags,
- BlockCompletionFunc *cb, void *opaque);
- BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags,
- BlockCompletionFunc *cb, void *opaque);
- BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
- BlockCompletionFunc *cb, void *opaque);
- BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs,
- int64_t offset, int bytes,
- BlockCompletionFunc *cb, void *opaque);
-
- int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
-
- /**
- * @offset: position in bytes to read at
- * @bytes: number of bytes to read
- * @qiov: the buffers to fill with read data
- * @flags: currently unused, always 0
- *
- * @offset and @bytes will be a multiple of 'request_alignment',
- * but the length of individual @qiov elements does not have to
- * be a multiple.
- *
- * @bytes will always equal the total size of @qiov, and will be
- * no larger than 'max_transfer'.
- *
- * The buffer in @qiov may point directly to guest memory.
- */
- int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
- int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags);
- /**
- * @offset: position in bytes to write at
- * @bytes: number of bytes to write
- * @qiov: the buffers containing data to write
- * @flags: zero or more bits allowed by 'supported_write_flags'
- *
- * @offset and @bytes will be a multiple of 'request_alignment',
- * but the length of individual @qiov elements does not have to
- * be a multiple.
- *
- * @bytes will always equal the total size of @qiov, and will be
- * no larger than 'max_transfer'.
- *
- * The buffer in @qiov may point directly to guest memory.
- */
- int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
-
- /*
- * Efficiently zero a region of the disk image. Typically an image format
- * would use a compact metadata representation to implement this. This
- * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
- * will be called instead.
- */
- int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
- int64_t offset, int bytes, BdrvRequestFlags flags);
- int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs,
- int64_t offset, int bytes);
-
- /* Map [offset, offset + nbytes) range onto a child of @bs to copy from,
- * and invoke bdrv_co_copy_range_from(child, ...), or invoke
- * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from.
- *
- * See the comment of bdrv_co_copy_range for the parameter and return value
- * semantics.
- */
- int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs,
- BdrvChild *src,
- uint64_t offset,
- BdrvChild *dst,
- uint64_t dst_offset,
- uint64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-
- /* Map [offset, offset + nbytes) range onto a child of bs to copy data to,
- * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy
- * operation if @bs is the leaf and @src has the same BlockDriver. Return
- * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver.
- *
- * See the comment of bdrv_co_copy_range for the parameter and return value
- * semantics.
- */
- int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs,
- BdrvChild *src,
- uint64_t src_offset,
- BdrvChild *dst,
- uint64_t dst_offset,
- uint64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-
- /*
- * Building block for bdrv_block_status[_above] and
- * bdrv_is_allocated[_above]. The driver should answer only
- * according to the current layer, and should only need to set
- * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID,
- * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing
- * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See
- * block.h for the overall meaning of the bits. As a hint, the
- * flag want_zero is true if the caller cares more about precise
- * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for
- * overall allocation (favor larger *pnum, perhaps by reporting
- * _DATA instead of _ZERO). The block layer guarantees input
- * clamped to bdrv_getlength() and aligned to request_alignment,
- * as well as non-NULL pnum, map, and file; in turn, the driver
- * must return an error or set pnum to an aligned non-zero value.
- */
- int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs,
- bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
- int64_t *map, BlockDriverState **file);
-
- /*
- * Invalidate any cached meta-data.
- */
- void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs,
- Error **errp);
- int (*bdrv_inactivate)(BlockDriverState *bs);
-
- /*
- * Flushes all data for all layers by calling bdrv_co_flush for underlying
- * layers, if needed. This function is needed for deterministic
- * synchronization of the flush finishing callback.
- */
- int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs);
-
- /*
- * Flushes all data that was already written to the OS all the way down to
- * the disk (for example file-posix.c calls fsync()).
- */
- int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
-
- /*
- * Flushes all internal caches to the OS. The data may still sit in a
- * writeback cache of the host OS, but it will survive a crash of the qemu
- * process.
- */
- int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
-
- /*
- * Drivers setting this field must be able to work with just a plain
- * filename with '<protocol_name>:' as a prefix, and no other options.
- * Options may be extracted from the filename by implementing
- * bdrv_parse_filename.
- */
- const char *protocol_name;
- int coroutine_fn (*bdrv_co_truncate)(BlockDriverState *bs, int64_t offset,
- PreallocMode prealloc, Error **errp);
-
- int64_t (*bdrv_getlength)(BlockDriverState *bs);
- bool has_variable_length;
- int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
- BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs,
- Error **errp);
-
- int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov);
-
- int (*bdrv_snapshot_create)(BlockDriverState *bs,
- QEMUSnapshotInfo *sn_info);
- int (*bdrv_snapshot_goto)(BlockDriverState *bs,
- const char *snapshot_id);
- int (*bdrv_snapshot_delete)(BlockDriverState *bs,
- const char *snapshot_id,
- const char *name,
- Error **errp);
- int (*bdrv_snapshot_list)(BlockDriverState *bs,
- QEMUSnapshotInfo **psn_info);
- int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
- const char *snapshot_id,
- const char *name,
- Error **errp);
- int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
- ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs);
-
- int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs,
- QEMUIOVector *qiov,
- int64_t pos);
- int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs,
- QEMUIOVector *qiov,
- int64_t pos);
-
- int (*bdrv_change_backing_file)(BlockDriverState *bs,
- const char *backing_file, const char *backing_fmt);
-
- /* removable device specific */
- bool (*bdrv_is_inserted)(BlockDriverState *bs);
- void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
- void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
-
- /* to control generic scsi devices */
- BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
- unsigned long int req, void *buf,
- BlockCompletionFunc *cb, void *opaque);
- int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs,
- unsigned long int req, void *buf);
-
- /* List of options for creating images, terminated by name == NULL */
- QemuOptsList *create_opts;
-
- /*
- * Returns 0 for completed check, -errno for internal errors.
- * The check results are stored in result.
- */
- int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs,
- BdrvCheckResult *result,
- BdrvCheckMode fix);
-
- int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts,
- BlockDriverAmendStatusCB *status_cb,
- void *cb_opaque,
- Error **errp);
-
- void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event);
-
- /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
- int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
- const char *tag);
- int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
- const char *tag);
- int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
- bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
-
- void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp);
-
- /*
- * Returns 1 if newly created images are guaranteed to contain only
- * zeros, 0 otherwise.
- */
- int (*bdrv_has_zero_init)(BlockDriverState *bs);
-
- /* Remove fd handlers, timers, and other event loop callbacks so the event
- * loop is no longer in use. Called with no in-flight requests and in
- * depth-first traversal order with parents before child nodes.
- */
- void (*bdrv_detach_aio_context)(BlockDriverState *bs);
-
- /* Add fd handlers, timers, and other event loop callbacks so I/O requests
- * can be processed again. Called with no in-flight requests and in
- * depth-first traversal order with child nodes before parent nodes.
- */
- void (*bdrv_attach_aio_context)(BlockDriverState *bs,
- AioContext *new_context);
-
- /* io queue for linux-aio */
- void (*bdrv_io_plug)(BlockDriverState *bs);
- void (*bdrv_io_unplug)(BlockDriverState *bs);
-
- /**
- * Try to get @bs's logical and physical block size.
- * On success, store them in @bsz and return zero.
- * On failure, return negative errno.
- */
- int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz);
- /**
- * Try to get @bs's geometry (cyls, heads, sectors)
- * On success, store them in @geo and return 0.
- * On failure return -errno.
- * Only drivers that want to override guest geometry implement this
- * callback; see hd_geometry_guess().
- */
- int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo);
-
- /**
- * bdrv_co_drain_begin is called if implemented in the beginning of a
- * drain operation to drain and stop any internal sources of requests in
- * the driver.
- * bdrv_co_drain_end is called if implemented at the end of the drain.
- *
- * They should be used by the driver to e.g. manage scheduled I/O
- * requests, or toggle an internal state. After the end of the drain new
- * requests will continue normally.
- */
- void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs);
- void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs);
-
- void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child,
- Error **errp);
- void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child,
- Error **errp);
-
- /**
- * Informs the block driver that a permission change is intended. The
- * driver checks whether the change is permissible and may take other
- * preparations for the change (e.g. get file system locks). This operation
- * is always followed either by a call to either .bdrv_set_perm or
- * .bdrv_abort_perm_update.
- *
- * Checks whether the requested set of cumulative permissions in @perm
- * can be granted for accessing @bs and whether no other users are using
- * permissions other than those given in @shared (both arguments take
- * BLK_PERM_* bitmasks).
- *
- * If both conditions are met, 0 is returned. Otherwise, -errno is returned
- * and errp is set to an error describing the conflict.
- */
- int (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm,
- uint64_t shared, Error **errp);
-
- /**
- * Called to inform the driver that the set of cumulative set of used
- * permissions for @bs has changed to @perm, and the set of sharable
- * permission to @shared. The driver can use this to propagate changes to
- * its children (i.e. request permissions only if a parent actually needs
- * them).
- *
- * This function is only invoked after bdrv_check_perm(), so block drivers
- * may rely on preparations made in their .bdrv_check_perm implementation.
- */
- void (*bdrv_set_perm)(BlockDriverState *bs, uint64_t perm, uint64_t shared);
-
- /*
- * Called to inform the driver that after a previous bdrv_check_perm()
- * call, the permission update is not performed and any preparations made
- * for it (e.g. taken file locks) need to be undone.
- *
- * This function can be called even for nodes that never saw a
- * bdrv_check_perm() call. It is a no-op then.
- */
- void (*bdrv_abort_perm_update)(BlockDriverState *bs);
-
- /**
- * Returns in @nperm and @nshared the permissions that the driver for @bs
- * needs on its child @c, based on the cumulative permissions requested by
- * the parents in @parent_perm and @parent_shared.
- *
- * If @c is NULL, return the permissions for attaching a new child for the
- * given @role.
- *
- * If @reopen_queue is non-NULL, don't return the currently needed
- * permissions, but those that will be needed after applying the
- * @reopen_queue.
- */
- void (*bdrv_child_perm)(BlockDriverState *bs, BdrvChild *c,
- const BdrvChildRole *role,
- BlockReopenQueue *reopen_queue,
- uint64_t parent_perm, uint64_t parent_shared,
- uint64_t *nperm, uint64_t *nshared);
-
- /**
- * Bitmaps should be marked as 'IN_USE' in the image on reopening image
- * as rw. This handler should realize it. It also should unset readonly
- * field of BlockDirtyBitmap's in case of success.
- */
- int (*bdrv_reopen_bitmaps_rw)(BlockDriverState *bs, Error **errp);
- bool (*bdrv_can_store_new_dirty_bitmap)(BlockDriverState *bs,
- const char *name,
- uint32_t granularity,
- Error **errp);
- void (*bdrv_remove_persistent_dirty_bitmap)(BlockDriverState *bs,
- const char *name,
- Error **errp);
-
- /**
- * Register/unregister a buffer for I/O. For example, when the driver is
- * interested to know the memory areas that will later be used in iovs, so
- * that it can do IOMMU mapping with VFIO etc., in order to get better
- * performance. In the case of VFIO drivers, this callback is used to do
- * DMA mapping for hot buffers.
- */
- void (*bdrv_register_buf)(BlockDriverState *bs, void *host, size_t size);
- void (*bdrv_unregister_buf)(BlockDriverState *bs, void *host);
- QLIST_ENTRY(BlockDriver) list;
-};
-
-typedef struct BlockLimits {
- /* Alignment requirement, in bytes, for offset/length of I/O
- * requests. Must be a power of 2 less than INT_MAX; defaults to
- * 1 for drivers with modern byte interfaces, and to 512
- * otherwise. */
- uint32_t request_alignment;
-
- /* Maximum number of bytes that can be discarded at once (since it
- * is signed, it must be < 2G, if set). Must be multiple of
- * pdiscard_alignment, but need not be power of 2. May be 0 if no
- * inherent 32-bit limit */
- int32_t max_pdiscard;
-
- /* Optimal alignment for discard requests in bytes. A power of 2
- * is best but not mandatory. Must be a multiple of
- * bl.request_alignment, and must be less than max_pdiscard if
- * that is set. May be 0 if bl.request_alignment is good enough */
- uint32_t pdiscard_alignment;
-
- /* Maximum number of bytes that can zeroized at once (since it is
- * signed, it must be < 2G, if set). Must be multiple of
- * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */
- int32_t max_pwrite_zeroes;
-
- /* Optimal alignment for write zeroes requests in bytes. A power
- * of 2 is best but not mandatory. Must be a multiple of
- * bl.request_alignment, and must be less than max_pwrite_zeroes
- * if that is set. May be 0 if bl.request_alignment is good
- * enough */
- uint32_t pwrite_zeroes_alignment;
-
- /* Optimal transfer length in bytes. A power of 2 is best but not
- * mandatory. Must be a multiple of bl.request_alignment, or 0 if
- * no preferred size */
- uint32_t opt_transfer;
-
- /* Maximal transfer length in bytes. Need not be power of 2, but
- * must be multiple of opt_transfer and bl.request_alignment, or 0
- * for no 32-bit limit. For now, anything larger than INT_MAX is
- * clamped down. */
- uint32_t max_transfer;
-
- /* memory alignment, in bytes so that no bounce buffer is needed */
- size_t min_mem_alignment;
-
- /* memory alignment, in bytes, for bounce buffer */
- size_t opt_mem_alignment;
-
- /* maximum number of iovec elements */
- int max_iov;
-} BlockLimits;
-
-typedef struct BdrvOpBlocker BdrvOpBlocker;
-
-typedef struct BdrvAioNotifier {
- void (*attached_aio_context)(AioContext *new_context, void *opaque);
- void (*detach_aio_context)(void *opaque);
-
- void *opaque;
- bool deleted;
-
- QLIST_ENTRY(BdrvAioNotifier) list;
-} BdrvAioNotifier;
-
-struct BdrvChildRole {
- /* If true, bdrv_replace_node() doesn't change the node this BdrvChild
- * points to. */
- bool stay_at_node;
-
- /* If true, the parent is a BlockDriverState and bdrv_next_all_states()
- * will return it. This information is used for drain_all, where every node
- * will be drained separately, so the drain only needs to be propagated to
- * non-BDS parents. */
- bool parent_is_bds;
-
- void (*inherit_options)(int *child_flags, QDict *child_options,
- int parent_flags, QDict *parent_options);
-
- void (*change_media)(BdrvChild *child, bool load);
- void (*resize)(BdrvChild *child);
-
- /* Returns a name that is supposedly more useful for human users than the
- * node name for identifying the node in question (in particular, a BB
- * name), or NULL if the parent can't provide a better name. */
- const char *(*get_name)(BdrvChild *child);
-
- /* Returns a malloced string that describes the parent of the child for a
- * human reader. This could be a node-name, BlockBackend name, qdev ID or
- * QOM path of the device owning the BlockBackend, job type and ID etc. The
- * caller is responsible for freeing the memory. */
- char *(*get_parent_desc)(BdrvChild *child);
-
- /*
- * If this pair of functions is implemented, the parent doesn't issue new
- * requests after returning from .drained_begin() until .drained_end() is
- * called.
- *
- * These functions must not change the graph (and therefore also must not
- * call aio_poll(), which could change the graph indirectly).
- *
- * Note that this can be nested. If drained_begin() was called twice, new
- * I/O is allowed only after drained_end() was called twice, too.
- */
- void (*drained_begin)(BdrvChild *child);
- void (*drained_end)(BdrvChild *child);
-
- /*
- * Returns whether the parent has pending requests for the child. This
- * callback is polled after .drained_begin() has been called until all
- * activity on the child has stopped.
- */
- bool (*drained_poll)(BdrvChild *child);
-
- /* Notifies the parent that the child has been activated/inactivated (e.g.
- * when migration is completing) and it can start/stop requesting
- * permissions and doing I/O on it. */
- void (*activate)(BdrvChild *child, Error **errp);
- int (*inactivate)(BdrvChild *child);
-
- void (*attach)(BdrvChild *child);
- void (*detach)(BdrvChild *child);
-
- /* Notifies the parent that the filename of its child has changed (e.g.
- * because the direct child was removed from the backing chain), so that it
- * can update its reference. */
- int (*update_filename)(BdrvChild *child, BlockDriverState *new_base,
- const char *filename, Error **errp);
-};
-
-extern const BdrvChildRole child_file;
-extern const BdrvChildRole child_format;
-extern const BdrvChildRole child_backing;
-
-struct BdrvChild {
- BlockDriverState *bs;
- char *name;
- const BdrvChildRole *role;
- void *opaque;
-
- /**
- * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask)
- */
- uint64_t perm;
-
- /**
- * Permissions that can still be granted to other users of @bs while this
- * BdrvChild is still attached to it. (BLK_PERM_* bitmask)
- */
- uint64_t shared_perm;
-
- QLIST_ENTRY(BdrvChild) next;
- QLIST_ENTRY(BdrvChild) next_parent;
-};
-
-/*
- * Note: the function bdrv_append() copies and swaps contents of
- * BlockDriverStates, so if you add new fields to this struct, please
- * inspect bdrv_append() to determine if the new fields need to be
- * copied as well.
- */
-struct BlockDriverState {
- /* Protected by big QEMU lock or read-only after opening. No special
- * locking needed during I/O...
- */
- int open_flags; /* flags used to open the file, re-used for re-open */
- bool read_only; /* if true, the media is read only */
- bool encrypted; /* if true, the media is encrypted */
- bool sg; /* if true, the device is a /dev/sg* */
- bool probed; /* if true, format was probed rather than specified */
- bool force_share; /* if true, always allow all shared permissions */
- bool implicit; /* if true, this filter node was automatically inserted */
-
- BlockDriver *drv; /* NULL means no media */
- void *opaque;
-
- AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
- /* long-running tasks intended to always use the same AioContext as this
- * BDS may register themselves in this list to be notified of changes
- * regarding this BDS's context */
- QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
- bool walking_aio_notifiers; /* to make removal during iteration safe */
-
- char filename[PATH_MAX];
- char backing_file[PATH_MAX]; /* if non zero, the image is a diff of
- this file image */
- char backing_format[16]; /* if non-zero and backing_file exists */
-
- QDict *full_open_options;
- char exact_filename[PATH_MAX];
-
- BdrvChild *backing;
- BdrvChild *file;
-
- /* I/O Limits */
- BlockLimits bl;
-
- /* Flags honored during pwrite (so far: BDRV_REQ_FUA,
- * BDRV_REQ_WRITE_UNCHANGED).
- * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those
- * writes will be issued as normal writes without the flag set.
- * This is important to note for drivers that do not explicitly
- * request a WRITE permission for their children and instead take
- * the same permissions as their parent did (this is commonly what
- * block filters do). Such drivers have to be aware that the
- * parent may have taken a WRITE_UNCHANGED permission only and is
- * issuing such requests. Drivers either must make sure that
- * these requests do not result in plain WRITE accesses (usually
- * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding
- * every incoming write request as-is, including potentially that
- * flag), or they have to explicitly take the WRITE permission for
- * their children. */
- unsigned int supported_write_flags;
- /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
- * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED) */
- unsigned int supported_zero_flags;
-
- /* the following member gives a name to every node on the bs graph. */
- char node_name[32];
- /* element of the list of named nodes building the graph */
- QTAILQ_ENTRY(BlockDriverState) node_list;
- /* element of the list of all BlockDriverStates (all_bdrv_states) */
- QTAILQ_ENTRY(BlockDriverState) bs_list;
- /* element of the list of monitor-owned BDS */
- QTAILQ_ENTRY(BlockDriverState) monitor_list;
- int refcnt;
-
- /* operation blockers */
- QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
-
- /* long-running background operation */
- BlockJob *job;
-
- /* The node that this node inherited default options from (and a reopen on
- * which can affect this node by changing these defaults). This is always a
- * parent node of this node. */
- BlockDriverState *inherits_from;
- QLIST_HEAD(, BdrvChild) children;
- QLIST_HEAD(, BdrvChild) parents;
-
- QDict *options;
- QDict *explicit_options;
- BlockdevDetectZeroesOptions detect_zeroes;
-
- /* The error object in use for blocking operations on backing_hd */
- Error *backing_blocker;
-
- /* Protected by AioContext lock */
-
- /* If we are reading a disk image, give its size in sectors.
- * Generally read-only; it is written to by load_snapshot and
- * save_snaphost, but the block layer is quiescent during those.
- */
- int64_t total_sectors;
-
- /* Callback before write request is processed */
- NotifierWithReturnList before_write_notifiers;
-
- /* threshold limit for writes, in bytes. "High water mark". */
- uint64_t write_threshold_offset;
- NotifierWithReturn write_threshold_notifier;
-
- /* Writing to the list requires the BQL _and_ the dirty_bitmap_mutex.
- * Reading from the list can be done with either the BQL or the
- * dirty_bitmap_mutex. Modifying a bitmap only requires
- * dirty_bitmap_mutex. */
- QemuMutex dirty_bitmap_mutex;
- QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
-
- /* Offset after the highest byte written to */
- Stat64 wr_highest_offset;
-
- /* If true, copy read backing sectors into image. Can be >1 if more
- * than one client has requested copy-on-read. Accessed with atomic
- * ops.
- */
- int copy_on_read;
-
- /* number of in-flight requests; overall and serialising.
- * Accessed with atomic ops.
- */
- unsigned int in_flight;
- unsigned int serialising_in_flight;
-
- /* counter for nested bdrv_io_plug.
- * Accessed with atomic ops.
- */
- unsigned io_plugged;
-
- /* do we need to tell the quest if we have a volatile write cache? */
- int enable_write_cache;
-
- /* Accessed with atomic ops. */
- int quiesce_counter;
- int recursive_quiesce_counter;
-
- unsigned int write_gen; /* Current data generation */
-
- /* Protected by reqs_lock. */
- CoMutex reqs_lock;
- QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
- CoQueue flush_queue; /* Serializing flush queue */
- bool active_flush_req; /* Flush request in flight? */
-
- /* Only read/written by whoever has set active_flush_req to true. */
- unsigned int flushed_gen; /* Flushed write generation */
-};
-
-struct BlockBackendRootState {
- int open_flags;
- bool read_only;
- BlockdevDetectZeroesOptions detect_zeroes;
-};
-
-typedef enum BlockMirrorBackingMode {
- /* Reuse the existing backing chain from the source for the target.
- * - sync=full: Set backing BDS to NULL.
- * - sync=top: Use source's backing BDS.
- * - sync=none: Use source as the backing BDS. */
- MIRROR_SOURCE_BACKING_CHAIN,
-
- /* Open the target's backing chain completely anew */
- MIRROR_OPEN_BACKING_CHAIN,
-
- /* Do not change the target's backing BDS after job completion */
- MIRROR_LEAVE_BACKING_CHAIN,
-} BlockMirrorBackingMode;
-
-static inline BlockDriverState *backing_bs(BlockDriverState *bs)
-{
- return bs->backing ? bs->backing->bs : NULL;
-}
-
-
-/* Essential block drivers which must always be statically linked into qemu, and
- * which therefore can be accessed without using bdrv_find_format() */
-extern BlockDriver bdrv_file;
-extern BlockDriver bdrv_raw;
-extern BlockDriver bdrv_qcow2;
-
-int coroutine_fn bdrv_co_preadv(BdrvChild *child,
- int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
- int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-
-extern unsigned int bdrv_drain_all_count;
-void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
-void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
-
-int get_tmp_filename(char *filename, int size);
-BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
- const char *filename);
-
-void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix,
- QDict *options);
-
-
-/**
- * bdrv_add_before_write_notifier:
- *
- * Register a callback that is invoked before write requests are processed but
- * after any throttling or waiting for overlapping requests.
- */
-void bdrv_add_before_write_notifier(BlockDriverState *bs,
- NotifierWithReturn *notifier);
-
-/**
- * bdrv_detach_aio_context:
- *
- * May be called from .bdrv_detach_aio_context() to detach children from the
- * current #AioContext. This is only needed by block drivers that manage their
- * own children. Both ->file and ->backing are automatically handled and
- * block drivers should not call this function on them explicitly.
- */
-void bdrv_detach_aio_context(BlockDriverState *bs);
-
-/**
- * bdrv_attach_aio_context:
- *
- * May be called from .bdrv_attach_aio_context() to attach children to the new
- * #AioContext. This is only needed by block drivers that manage their own
- * children. Both ->file and ->backing are automatically handled and block
- * drivers should not call this function on them explicitly.
- */
-void bdrv_attach_aio_context(BlockDriverState *bs,
- AioContext *new_context);
-
-/**
- * bdrv_add_aio_context_notifier:
- *
- * If a long-running job intends to be always run in the same AioContext as a
- * certain BDS, it may use this function to be notified of changes regarding the
- * association of the BDS to an AioContext.
- *
- * attached_aio_context() is called after the target BDS has been attached to a
- * new AioContext; detach_aio_context() is called before the target BDS is being
- * detached from its old AioContext.
- */
-void bdrv_add_aio_context_notifier(BlockDriverState *bs,
- void (*attached_aio_context)(AioContext *new_context, void *opaque),
- void (*detach_aio_context)(void *opaque), void *opaque);
-
-/**
- * bdrv_remove_aio_context_notifier:
- *
- * Unsubscribe of change notifications regarding the BDS's AioContext. The
- * parameters given here have to be the same as those given to
- * bdrv_add_aio_context_notifier().
- */
-void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
- void (*aio_context_attached)(AioContext *,
- void *),
- void (*aio_context_detached)(void *),
- void *opaque);
-
-/**
- * bdrv_wakeup:
- * @bs: The BlockDriverState for which an I/O operation has been completed.
- *
- * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During
- * synchronous I/O on a BlockDriverState that is attached to another
- * I/O thread, the main thread lets the I/O thread's event loop run,
- * waiting for the I/O operation to complete. A bdrv_wakeup will wake
- * up the main thread if necessary.
- *
- * Manual calls to bdrv_wakeup are rarely necessary, because
- * bdrv_dec_in_flight already calls it.
- */
-void bdrv_wakeup(BlockDriverState *bs);
-
-#ifdef _WIN32
-int is_windows_drive(const char *filename);
-#endif
-
-/**
- * stream_start:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Block device to operate on.
- * @base: Block device that will become the new base, or %NULL to
- * flatten the whole backing file chain onto @bs.
- * @backing_file_str: The file name that will be written to @bs as the
- * the new backing file if the job completes. Ignored if @base is %NULL.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @on_error: The action to take upon error.
- * @errp: Error object.
- *
- * Start a streaming operation on @bs. Clusters that are unallocated
- * in @bs, but allocated in any image between @base and @bs (both
- * exclusive) will be written to @bs. At the end of a successful
- * streaming job, the backing file of @bs will be changed to
- * @backing_file_str in the written image and to @base in the live
- * BlockDriverState.
- */
-void stream_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, const char *backing_file_str,
- int creation_flags, int64_t speed,
- BlockdevOnError on_error, Error **errp);
-
-/**
- * commit_start:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Active block device.
- * @top: Top block device to be committed.
- * @base: Block device that will be written into, and become the new top.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @on_error: The action to take upon error.
- * @backing_file_str: String to use as the backing file in @top's overlay
- * @filter_node_name: The node name that should be assigned to the filter
- * driver that the commit job inserts into the graph above @top. NULL means
- * that a node name should be autogenerated.
- * @errp: Error object.
- *
- */
-void commit_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, BlockDriverState *top,
- int creation_flags, int64_t speed,
- BlockdevOnError on_error, const char *backing_file_str,
- const char *filter_node_name, Error **errp);
-/**
- * commit_active_start:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Active block device to be committed.
- * @base: Block device that will be written into, and become the new top.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @on_error: The action to take upon error.
- * @filter_node_name: The node name that should be assigned to the filter
- * driver that the commit job inserts into the graph above @bs. NULL means that
- * a node name should be autogenerated.
- * @cb: Completion function for the job.
- * @opaque: Opaque pointer value passed to @cb.
- * @auto_complete: Auto complete the job.
- * @errp: Error object.
- *
- */
-void commit_active_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, int creation_flags,
- int64_t speed, BlockdevOnError on_error,
- const char *filter_node_name,
- BlockCompletionFunc *cb, void *opaque,
- bool auto_complete, Error **errp);
-/*
- * mirror_start:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Block device to operate on.
- * @target: Block device to write to.
- * @replaces: Block graph node name to replace once the mirror is done. Can
- * only be used when full mirroring is selected.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @granularity: The chosen granularity for the dirty bitmap.
- * @buf_size: The amount of data that can be in flight at one time.
- * @mode: Whether to collapse all images in the chain to the target.
- * @backing_mode: How to establish the target's backing chain after completion.
- * @on_source_error: The action to take upon error reading from the source.
- * @on_target_error: The action to take upon error writing to the target.
- * @unmap: Whether to unmap target where source sectors only contain zeroes.
- * @filter_node_name: The node name that should be assigned to the filter
- * driver that the mirror job inserts into the graph above @bs. NULL means that
- * a node name should be autogenerated.
- * @copy_mode: When to trigger writes to the target.
- * @errp: Error object.
- *
- * Start a mirroring operation on @bs. Clusters that are allocated
- * in @bs will be written to @target until the job is cancelled or
- * manually completed. At the end of a successful mirroring job,
- * @bs will be switched to read from @target.
- */
-void mirror_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *target, const char *replaces,
- int creation_flags, int64_t speed,
- uint32_t granularity, int64_t buf_size,
- MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
- BlockdevOnError on_source_error,
- BlockdevOnError on_target_error,
- bool unmap, const char *filter_node_name,
- MirrorCopyMode copy_mode, Error **errp);
-
-/*
- * backup_job_create:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Block device to operate on.
- * @target: Block device to write to.
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @sync_mode: What parts of the disk image should be copied to the destination.
- * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
- * @on_source_error: The action to take upon error reading from the source.
- * @on_target_error: The action to take upon error writing to the target.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @cb: Completion function for the job.
- * @opaque: Opaque pointer value passed to @cb.
- * @txn: Transaction that this job is part of (may be NULL).
- *
- * Create a backup operation on @bs. Clusters in @bs are written to @target
- * until the job is cancelled or manually completed.
- */
-BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
- BlockDriverState *target, int64_t speed,
- MirrorSyncMode sync_mode,
- BdrvDirtyBitmap *sync_bitmap,
- bool compress,
- BlockdevOnError on_source_error,
- BlockdevOnError on_target_error,
- int creation_flags,
- BlockCompletionFunc *cb, void *opaque,
- JobTxn *txn, Error **errp);
-
-void hmp_drive_add_node(Monitor *mon, const char *optstr);
-
-BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
- const char *child_name,
- const BdrvChildRole *child_role,
- uint64_t perm, uint64_t shared_perm,
- void *opaque, Error **errp);
-void bdrv_root_unref_child(BdrvChild *child);
-
-int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
- Error **errp);
-
-/* Default implementation for BlockDriver.bdrv_child_perm() that can be used by
- * block filters: Forward CONSISTENT_READ, WRITE, WRITE_UNCHANGED and RESIZE to
- * all children */
-void bdrv_filter_default_perms(BlockDriverState *bs, BdrvChild *c,
- const BdrvChildRole *role,
- BlockReopenQueue *reopen_queue,
- uint64_t perm, uint64_t shared,
- uint64_t *nperm, uint64_t *nshared);
-
-/* Default implementation for BlockDriver.bdrv_child_perm() that can be used by
- * (non-raw) image formats: Like above for bs->backing, but for bs->file it
- * requires WRITE | RESIZE for read-write images, always requires
- * CONSISTENT_READ and doesn't share WRITE. */
-void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,
- const BdrvChildRole *role,
- BlockReopenQueue *reopen_queue,
- uint64_t perm, uint64_t shared,
- uint64_t *nperm, uint64_t *nshared);
-
-/*
- * Default implementation for drivers to pass bdrv_co_block_status() to
- * their file.
- */
-int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
- bool want_zero,
- int64_t offset,
- int64_t bytes,
- int64_t *pnum,
- int64_t *map,
- BlockDriverState **file);
-/*
- * Default implementation for drivers to pass bdrv_co_block_status() to
- * their backing file.
- */
-int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
- bool want_zero,
- int64_t offset,
- int64_t bytes,
- int64_t *pnum,
- int64_t *map,
- BlockDriverState **file);
-const char *bdrv_get_parent_name(const BlockDriverState *bs);
-void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp);
-bool blk_dev_has_removable_media(BlockBackend *blk);
-bool blk_dev_has_tray(BlockBackend *blk);
-void blk_dev_eject_request(BlockBackend *blk, bool force);
-bool blk_dev_is_tray_open(BlockBackend *blk);
-bool blk_dev_is_medium_locked(BlockBackend *blk);
-
-void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
-
-void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
-void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup);
-
-void bdrv_inc_in_flight(BlockDriverState *bs);
-void bdrv_dec_in_flight(BlockDriverState *bs);
-
-void blockdev_close_all_bdrv_states(void);
-
-int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
- BdrvChild *dst, uint64_t dst_offset,
- uint64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
- BdrvChild *dst, uint64_t dst_offset,
- uint64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-
-int refresh_total_sectors(BlockDriverState *bs, int64_t hint);
+/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */
#endif /* BLOCK_INT_H */
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index ede0bd8dcb..7061ab7201 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -26,8 +26,8 @@
#ifndef BLOCKJOB_H
#define BLOCKJOB_H
+#include "qapi/qapi-types-block-core.h"
#include "qemu/job.h"
-#include "block/block.h"
#include "qemu/ratelimit.h"
#define BLOCK_JOB_SLICE_TIME 100000000ULL /* ns */
@@ -40,24 +40,38 @@ typedef struct BlockJobDriver BlockJobDriver;
* Long-running operation on a BlockDriverState.
*/
typedef struct BlockJob {
- /** Data belonging to the generic Job infrastructure */
+ /**
+ * Data belonging to the generic Job infrastructure.
+ * Protected by job mutex.
+ */
Job job;
- /** The block device on which the job is operating. */
- BlockBackend *blk;
-
- /** Status that is published by the query-block-jobs QMP API */
+ /**
+ * Status that is published by the query-block-jobs QMP API.
+ * Protected by job mutex.
+ */
BlockDeviceIoStatus iostatus;
- /** Speed that was set with @block_job_set_speed. */
+ /**
+ * Speed that was set with @block_job_set_speed.
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
+ */
int64_t speed;
- /** Rate limiting data structure for implementing @speed. */
+ /**
+ * Rate limiting data structure for implementing @speed.
+ * RateLimit API is thread-safe.
+ */
RateLimit limit;
- /** Block other operations when block job is running */
+ /**
+ * Block other operations when block job is running.
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
+ */
Error *blocker;
+ /** All notifiers are set once in block_job_create() and never modified. */
+
/** Called when a cancelled job is finalised. */
Notifier finalize_cancelled_notifier;
@@ -73,20 +87,31 @@ typedef struct BlockJob {
/** Called when the job coroutine yields or terminates */
Notifier idle_notifier;
- /** BlockDriverStates that are involved in this block job */
+ /**
+ * BlockDriverStates that are involved in this block job.
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
+ */
GSList *nodes;
} BlockJob;
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
/**
- * block_job_next:
+ * block_job_next_locked:
* @job: A block job, or %NULL.
*
* Get the next element from the list of block jobs after @job, or the
* first one if @job is %NULL.
*
* Returns the requested job, or %NULL if there are no more jobs left.
+ * Called with job lock held.
*/
-BlockJob *block_job_next(BlockJob *job);
+BlockJob *block_job_next_locked(BlockJob *job);
/**
* block_job_get:
@@ -95,9 +120,13 @@ BlockJob *block_job_next(BlockJob *job);
* Get the block job identified by @id (which must not be %NULL).
*
* Returns the requested job, or %NULL if it doesn't exist.
+ * Called with job lock *not* held.
*/
BlockJob *block_job_get(const char *id);
+/* Same as block_job_get(), but called with job lock held. */
+BlockJob *block_job_get_locked(const char *id);
+
/**
* block_job_add_bdrv:
* @job: A block job
@@ -109,8 +138,9 @@ BlockJob *block_job_get(const char *id);
* @job. This means that all operations will be blocked on @bs while
* @job exists.
*/
-int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
- uint64_t perm, uint64_t shared_perm, Error **errp);
+int GRAPH_WRLOCK
+block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
+ uint64_t perm, uint64_t shared_perm, Error **errp);
/**
* block_job_remove_all_bdrv:
@@ -122,32 +152,73 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
void block_job_remove_all_bdrv(BlockJob *job);
/**
- * block_job_set_speed:
+ * block_job_has_bdrv:
+ * @job: The block job
+ *
+ * Searches for @bs in the list of nodes that are involved in the
+ * job.
+ */
+bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs);
+
+/**
+ * block_job_set_speed_locked:
* @job: The job to set the speed for.
* @speed: The new value
* @errp: Error object.
*
* Set a rate-limiting parameter for the job; the actual meaning may
* vary depending on the job type.
+ *
+ * Called with job lock held, but might release it temporarily.
*/
-void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
+bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp);
/**
- * block_job_query:
+ * block_job_change_locked:
+ * @job: The job to change.
+ * @opts: The new options.
+ * @errp: Error object.
+ *
+ * Change the job according to opts.
+ */
+void block_job_change_locked(BlockJob *job, BlockJobChangeOptions *opts,
+ Error **errp);
+
+/**
+ * block_job_query_locked:
* @job: The job to get information about.
*
* Return information about a job.
+ *
+ * Called with job lock held.
*/
-BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
+BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp);
/**
- * block_job_iostatus_reset:
+ * block_job_iostatus_reset_locked:
* @job: The job whose I/O status should be reset.
*
* Reset I/O status on @job and on BlockDriverState objects it uses,
* other than job->blk.
+ *
+ * Called with job lock held.
+ */
+void block_job_iostatus_reset_locked(BlockJob *job);
+
+/*
+ * block_job_get_aio_context:
+ *
+ * Returns aio context associated with a block job.
+ */
+AioContext *block_job_get_aio_context(BlockJob *job);
+
+
+/*
+ * Common functions that are neither I/O nor Global State.
+ *
+ * See include/block/block-common.h for more information about
+ * the Common API.
*/
-void block_job_iostatus_reset(BlockJob *job);
/**
* block_job_is_internal:
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
index e4a318dd15..4c3d2e25a2 100644
--- a/include/block/blockjob_int.h
+++ b/include/block/blockjob_int.h
@@ -27,7 +27,6 @@
#define BLOCKJOB_INT_H
#include "block/blockjob.h"
-#include "block/block.h"
/**
* BlockJobDriver:
@@ -39,6 +38,13 @@ struct BlockJobDriver {
JobDriver job_driver;
/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+ /*
* Returns whether the job has pending requests for the child or will
* submit new requests before the next pause point. This callback is polled
* in the context of draining a job node after requesting that the job be
@@ -47,24 +53,41 @@ struct BlockJobDriver {
bool (*drained_poll)(BlockJob *job);
/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+ /*
* If the callback is not NULL, it will be invoked before the job is
* resumed in a new AioContext. This is the place to move any resources
* besides job->blk to the new AioContext.
*/
void (*attached_aio_context)(BlockJob *job, AioContext *new_context);
+ void (*set_speed)(BlockJob *job, int64_t speed);
+
/*
- * If the callback is not NULL, it will be invoked when the job has to be
- * synchronously cancelled or completed; it should drain BlockDriverStates
- * as required to ensure progress.
+ * Change the @job's options according to @opts.
*
- * Block jobs must use the default implementation for job_driver.drain,
- * which will in turn call this callback after doing generic block job
- * stuff.
+ * Note that this can already be called before the job coroutine is running.
+ */
+ void (*change)(BlockJob *job, BlockJobChangeOptions *opts, Error **errp);
+
+ /*
+ * Query information specific to this kind of block job.
*/
- void (*drain)(BlockJob *job);
+ void (*query)(BlockJob *job, BlockJobInfo *info);
};
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
/**
* block_job_create:
* @job_id: The id of the newly-created job, or %NULL to have one
@@ -88,10 +111,11 @@ struct BlockJobDriver {
* This function is not part of the public job interface; it should be
* called from a wrapper that is specific to the job type.
*/
-void *block_job_create(const char *job_id, const BlockJobDriver *driver,
- JobTxn *txn, BlockDriverState *bs, uint64_t perm,
- uint64_t shared_perm, int64_t speed, int flags,
- BlockCompletionFunc *cb, void *opaque, Error **errp);
+void * GRAPH_UNLOCKED
+block_job_create(const char *job_id, const BlockJobDriver *driver,
+ JobTxn *txn, BlockDriverState *bs, uint64_t perm,
+ uint64_t shared_perm, int64_t speed, int flags,
+ BlockCompletionFunc *cb, void *opaque, Error **errp);
/**
* block_job_free:
@@ -107,21 +131,26 @@ void block_job_free(Job *job);
*/
void block_job_user_resume(Job *job);
-/**
- * block_job_drain:
- * Callback to be used for JobDriver.drain in all block jobs. Drains the main
- * block node associated with the block jobs and calls BlockJobDriver.drain for
- * job-specific actions.
+/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
*/
-void block_job_drain(Job *job);
/**
- * block_job_ratelimit_get_delay:
+ * block_job_ratelimit_processed_bytes:
*
- * Calculate and return delay for the next request in ns. See the documentation
- * of ratelimit_calculate_delay() for details.
+ * To be called after some work has been done. Adjusts the delay for the next
+ * request. See the documentation of ratelimit_calculate_delay() for details.
+ */
+void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n);
+
+/**
+ * Put the job to sleep (assuming that it wasn't canceled) to throttle it to the
+ * right speed according to its rate limiting.
*/
-int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n);
+void block_job_ratelimit_sleep(BlockJob *job);
/**
* block_job_error_action:
diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
index 04a117fc81..fa956debfb 100644
--- a/include/block/dirty-bitmap.h
+++ b/include/block/dirty-bitmap.h
@@ -1,34 +1,48 @@
#ifndef BLOCK_DIRTY_BITMAP_H
#define BLOCK_DIRTY_BITMAP_H
-#include "qemu-common.h"
+#include "block/block-common.h"
#include "qapi/qapi-types-block-core.h"
#include "qemu/hbitmap.h"
+typedef enum BitmapCheckFlags {
+ BDRV_BITMAP_BUSY = 1,
+ BDRV_BITMAP_RO = 2,
+ BDRV_BITMAP_INCONSISTENT = 4,
+} BitmapCheckFlags;
+
+#define BDRV_BITMAP_DEFAULT (BDRV_BITMAP_BUSY | BDRV_BITMAP_RO | \
+ BDRV_BITMAP_INCONSISTENT)
+#define BDRV_BITMAP_ALLOW_RO (BDRV_BITMAP_BUSY | BDRV_BITMAP_INCONSISTENT)
+
+#define BDRV_BITMAP_MAX_NAME_SIZE 1023
+
+bool bdrv_supports_persistent_dirty_bitmap(BlockDriverState *bs);
BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs,
uint32_t granularity,
const char *name,
Error **errp);
-void bdrv_create_meta_dirty_bitmap(BdrvDirtyBitmap *bitmap,
- int chunk_size);
-void bdrv_release_meta_dirty_bitmap(BdrvDirtyBitmap *bitmap);
-int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs,
- BdrvDirtyBitmap *bitmap,
+int bdrv_dirty_bitmap_create_successor(BdrvDirtyBitmap *bitmap,
Error **errp);
-BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs,
- BdrvDirtyBitmap *bitmap,
+BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BdrvDirtyBitmap *bitmap,
Error **errp);
-BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs,
- BdrvDirtyBitmap *bitmap,
+BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BdrvDirtyBitmap *bitmap,
Error **errp);
void bdrv_dirty_bitmap_enable_successor(BdrvDirtyBitmap *bitmap);
BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs,
const char *name);
-void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap);
+int bdrv_dirty_bitmap_check(const BdrvDirtyBitmap *bitmap, uint32_t flags,
+ Error **errp);
+void bdrv_release_dirty_bitmap(BdrvDirtyBitmap *bitmap);
void bdrv_release_named_dirty_bitmaps(BlockDriverState *bs);
-void bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs,
- const char *name,
- Error **errp);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
+ Error **errp);
+int co_wrapper_bdrv_rdlock
+bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
+ Error **errp);
+
void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap);
void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap);
void bdrv_enable_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap);
@@ -36,21 +50,21 @@ BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs);
uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs);
uint32_t bdrv_dirty_bitmap_granularity(const BdrvDirtyBitmap *bitmap);
bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap);
-bool bdrv_dirty_bitmap_frozen(BdrvDirtyBitmap *bitmap);
+bool bdrv_dirty_bitmap_has_successor(BdrvDirtyBitmap *bitmap);
const char *bdrv_dirty_bitmap_name(const BdrvDirtyBitmap *bitmap);
int64_t bdrv_dirty_bitmap_size(const BdrvDirtyBitmap *bitmap);
-DirtyBitmapStatus bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap);
void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap,
int64_t offset, int64_t bytes);
void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap,
int64_t offset, int64_t bytes);
-BdrvDirtyBitmapIter *bdrv_dirty_meta_iter_new(BdrvDirtyBitmap *bitmap);
BdrvDirtyBitmapIter *bdrv_dirty_iter_new(BdrvDirtyBitmap *bitmap);
void bdrv_dirty_iter_free(BdrvDirtyBitmapIter *iter);
uint64_t bdrv_dirty_bitmap_serialization_size(const BdrvDirtyBitmap *bitmap,
uint64_t offset, uint64_t bytes);
uint64_t bdrv_dirty_bitmap_serialization_align(const BdrvDirtyBitmap *bitmap);
+uint64_t bdrv_dirty_bitmap_serialization_coverage(int serialized_chunk_size,
+ const BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_serialize_part(const BdrvDirtyBitmap *bitmap,
uint8_t *buf, uint64_t offset,
uint64_t bytes);
@@ -66,18 +80,19 @@ void bdrv_dirty_bitmap_deserialize_ones(BdrvDirtyBitmap *bitmap,
void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_set_readonly(BdrvDirtyBitmap *bitmap, bool value);
-void bdrv_dirty_bitmap_set_persistance(BdrvDirtyBitmap *bitmap,
+void bdrv_dirty_bitmap_set_persistence(BdrvDirtyBitmap *bitmap,
bool persistent);
-void bdrv_dirty_bitmap_set_qmp_locked(BdrvDirtyBitmap *bitmap, bool qmp_locked);
-void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
+void bdrv_dirty_bitmap_set_inconsistent(BdrvDirtyBitmap *bitmap);
+void bdrv_dirty_bitmap_set_busy(BdrvDirtyBitmap *bitmap, bool busy);
+bool bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
HBitmap **backup, Error **errp);
-void bdrv_dirty_bitmap_set_migration(BdrvDirtyBitmap *bitmap, bool migration);
+void bdrv_dirty_bitmap_skip_store(BdrvDirtyBitmap *bitmap, bool skip);
+bool bdrv_dirty_bitmap_get(BdrvDirtyBitmap *bitmap, int64_t offset);
/* Functions that require manual locking. */
void bdrv_dirty_bitmap_lock(BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_unlock(BdrvDirtyBitmap *bitmap);
-bool bdrv_get_dirty_locked(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
- int64_t offset);
+bool bdrv_dirty_bitmap_get_locked(BdrvDirtyBitmap *bitmap, int64_t offset);
void bdrv_set_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
int64_t offset, int64_t bytes);
void bdrv_reset_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
@@ -85,24 +100,31 @@ void bdrv_reset_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
int64_t bdrv_dirty_iter_next(BdrvDirtyBitmapIter *iter);
void bdrv_set_dirty_iter(BdrvDirtyBitmapIter *hbi, int64_t offset);
int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap);
-int64_t bdrv_get_meta_dirty_count(BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_truncate(BlockDriverState *bs, int64_t bytes);
bool bdrv_dirty_bitmap_readonly(const BdrvDirtyBitmap *bitmap);
bool bdrv_has_readonly_bitmaps(BlockDriverState *bs);
+bool bdrv_has_named_bitmaps(BlockDriverState *bs);
bool bdrv_dirty_bitmap_get_autoload(const BdrvDirtyBitmap *bitmap);
-bool bdrv_dirty_bitmap_get_persistance(BdrvDirtyBitmap *bitmap);
-bool bdrv_dirty_bitmap_qmp_locked(BdrvDirtyBitmap *bitmap);
-bool bdrv_dirty_bitmap_user_locked(BdrvDirtyBitmap *bitmap);
-bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs);
-BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs,
- BdrvDirtyBitmap *bitmap);
+bool bdrv_dirty_bitmap_get_persistence(BdrvDirtyBitmap *bitmap);
+bool bdrv_dirty_bitmap_inconsistent(const BdrvDirtyBitmap *bitmap);
+
+BdrvDirtyBitmap *bdrv_dirty_bitmap_first(BlockDriverState *bs);
+BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BdrvDirtyBitmap *bitmap);
+#define FOR_EACH_DIRTY_BITMAP(bs, bitmap) \
+for (bitmap = bdrv_dirty_bitmap_first(bs); bitmap; \
+ bitmap = bdrv_dirty_bitmap_next(bitmap))
+
char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp);
-int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset,
- uint64_t bytes);
+int64_t bdrv_dirty_bitmap_next_dirty(BdrvDirtyBitmap *bitmap, int64_t offset,
+ int64_t bytes);
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset,
+ int64_t bytes);
bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
- uint64_t *offset, uint64_t *bytes);
-BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BlockDriverState *bs,
- BdrvDirtyBitmap *bitmap,
+ int64_t start, int64_t end, int64_t max_dirty_count,
+ int64_t *dirty_start, int64_t *dirty_count);
+bool bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap, int64_t offset,
+ int64_t bytes, int64_t *count);
+BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
Error **errp);
#endif
diff --git a/include/block/export.h b/include/block/export.h
new file mode 100644
index 0000000000..f2fe0f8078
--- /dev/null
+++ b/include/block/export.h
@@ -0,0 +1,91 @@
+/*
+ * Declarations for block exports
+ *
+ * Copyright (c) 2012, 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Paolo Bonzini <pbonzini@redhat.com>
+ * Kevin Wolf <kwolf@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef BLOCK_EXPORT_H
+#define BLOCK_EXPORT_H
+
+#include "qapi/qapi-types-block-export.h"
+#include "qemu/queue.h"
+
+typedef struct BlockExport BlockExport;
+
+typedef struct BlockExportDriver {
+ /* The export type that this driver services */
+ BlockExportType type;
+
+ /*
+ * The size of the driver-specific state that contains BlockExport as its
+ * first field.
+ */
+ size_t instance_size;
+
+ /* Creates and starts a new block export */
+ int (*create)(BlockExport *, BlockExportOptions *, Error **);
+
+ /*
+ * Frees a removed block export. This function is only called after all
+ * references have been dropped.
+ */
+ void (*delete)(BlockExport *);
+
+ /*
+ * Start to disconnect all clients and drop other references held
+ * internally by the export driver. When the function returns, there may
+ * still be active references while the export is in the process of
+ * shutting down.
+ */
+ void (*request_shutdown)(BlockExport *);
+} BlockExportDriver;
+
+struct BlockExport {
+ const BlockExportDriver *drv;
+
+ /* Unique identifier for the export */
+ char *id;
+
+ /*
+ * Reference count for this block export. This includes strong references
+ * both from the owner (qemu-nbd or the monitor) and clients connected to
+ * the export.
+ *
+ * Use atomics to access this field.
+ */
+ int refcount;
+
+ /*
+ * True if one of the references in refcount belongs to the user. After the
+ * user has dropped their reference, they may not e.g. remove the same
+ * export a second time (which would decrease the refcount without having
+ * it incremented first).
+ */
+ bool user_owned;
+
+ /* The AioContext whose lock protects this BlockExport object. */
+ AioContext *ctx;
+
+ /* The block device to export */
+ BlockBackend *blk;
+
+ /* List entry for block_exports */
+ QLIST_ENTRY(BlockExport) next;
+};
+
+BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp);
+BlockExport *blk_exp_find(const char *id);
+void blk_exp_ref(BlockExport *exp);
+void blk_exp_unref(BlockExport *exp);
+void blk_exp_request_shutdown(BlockExport *exp);
+void blk_exp_close_all(void);
+void blk_exp_close_all_type(BlockExportType type);
+
+#endif
diff --git a/include/block/fuse.h b/include/block/fuse.h
new file mode 100644
index 0000000000..ffa91fe364
--- /dev/null
+++ b/include/block/fuse.h
@@ -0,0 +1,30 @@
+/*
+ * Present a block device as a raw image through FUSE
+ *
+ * Copyright (c) 2020 Max Reitz <mreitz@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; under version 2 or later of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef BLOCK_FUSE_H
+#define BLOCK_FUSE_H
+
+#ifdef CONFIG_FUSE
+
+#include "block/export.h"
+
+extern const BlockExportDriver blk_exp_fuse;
+
+#endif /* CONFIG_FUSE */
+
+#endif
diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h
new file mode 100644
index 0000000000..d7545e82d0
--- /dev/null
+++ b/include/block/graph-lock.h
@@ -0,0 +1,278 @@
+/*
+ * Graph lock: rwlock to protect block layer graph manipulations (add/remove
+ * edges and nodes)
+ *
+ * Copyright (c) 2022 Red Hat
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef GRAPH_LOCK_H
+#define GRAPH_LOCK_H
+
+#include "qemu/clang-tsa.h"
+
+/**
+ * Graph Lock API
+ * This API provides a rwlock used to protect block layer
+ * graph modifications like edge (BdrvChild) and node (BlockDriverState)
+ * addition and removal.
+ * Currently we have 1 writer only, the Main loop, and many
+ * readers, mostly coroutines running in other AioContext thus other threads.
+ *
+ * We distinguish between writer (main loop, under BQL) that modifies the
+ * graph, and readers (all other coroutines running in various AioContext),
+ * that go through the graph edges, reading
+ * BlockDriverState ->parents and->children.
+ *
+ * The writer (main loop) has an "exclusive" access, so it first waits for
+ * current read to finish, and then prevents incoming ones from
+ * entering while it has the exclusive access.
+ *
+ * The readers (coroutines in multiple AioContext) are free to
+ * access the graph as long the writer is not modifying the graph.
+ * In case it is, they go in a CoQueue and sleep until the writer
+ * is done.
+ *
+ * If a coroutine changes AioContext, the counter in the original and new
+ * AioContext are left intact, since the writer does not care where is the
+ * reader, but only if there is one.
+ * As a result, some AioContexts might have a negative reader count, to
+ * balance the positive count of the AioContext that took the lock.
+ * This also means that when an AioContext is deleted it may have a nonzero
+ * reader count. In that case we transfer the count to a global shared counter
+ * so that the writer is always aware of all readers.
+ */
+typedef struct BdrvGraphRWlock BdrvGraphRWlock;
+
+/* Dummy lock object to use for Thread Safety Analysis (TSA) */
+typedef struct TSA_CAPABILITY("mutex") BdrvGraphLock {
+} BdrvGraphLock;
+
+extern BdrvGraphLock graph_lock;
+
+/*
+ * clang doesn't check consistency in locking annotations between forward
+ * declarations and the function definition. Having the annotation on the
+ * definition, but not the declaration in a header file, may give the reader
+ * a false sense of security because the condition actually remains unchecked
+ * for callers in other source files.
+ *
+ * Therefore, as a convention, for public functions, GRAPH_RDLOCK and
+ * GRAPH_WRLOCK annotations should be present only in the header file.
+ */
+#define GRAPH_WRLOCK TSA_REQUIRES(graph_lock)
+#define GRAPH_RDLOCK TSA_REQUIRES_SHARED(graph_lock)
+#define GRAPH_UNLOCKED TSA_EXCLUDES(graph_lock)
+
+/*
+ * TSA annotations are not part of function types, so checks are defeated when
+ * using a function pointer. As a workaround, annotate function pointers with
+ * this macro that will require that the lock is at least taken while reading
+ * the pointer. In most cases this is equivalent to actually protecting the
+ * function call.
+ */
+#define GRAPH_RDLOCK_PTR TSA_GUARDED_BY(graph_lock)
+#define GRAPH_WRLOCK_PTR TSA_GUARDED_BY(graph_lock)
+#define GRAPH_UNLOCKED_PTR
+
+/*
+ * register_aiocontext:
+ * Add AioContext @ctx to the list of AioContext.
+ * This list is used to obtain the total number of readers
+ * currently running the graph.
+ */
+void register_aiocontext(AioContext *ctx);
+
+/*
+ * unregister_aiocontext:
+ * Removes AioContext @ctx to the list of AioContext.
+ */
+void unregister_aiocontext(AioContext *ctx);
+
+/*
+ * bdrv_graph_wrlock:
+ * Start an exclusive write operation to modify the graph. This means we are
+ * adding or removing an edge or a node in the block layer graph. Nobody else
+ * is allowed to access the graph.
+ *
+ * Must only be called from outside bdrv_graph_co_rdlock.
+ *
+ * The wrlock can only be taken from the main loop, with BQL held, as only the
+ * main loop is allowed to modify the graph.
+ */
+void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA
+bdrv_graph_wrlock(void);
+
+/*
+ * bdrv_graph_wrunlock:
+ * Write finished, reset global has_writer to 0 and restart
+ * all readers that are waiting.
+ */
+void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
+bdrv_graph_wrunlock(void);
+
+/*
+ * bdrv_graph_co_rdlock:
+ * Read the bs graph. This usually means traversing all nodes in
+ * the graph, therefore it can't happen while another thread is
+ * modifying it.
+ * Increases the reader counter of the current aiocontext,
+ * and if has_writer is set, it means that the writer is modifying
+ * the graph, therefore wait in a coroutine queue.
+ * The writer will then wake this coroutine once it is done.
+ *
+ * This lock should be taken from Iothreads (IO_CODE() class of functions)
+ * because it signals the writer that there are some
+ * readers currently running, or waits until the current
+ * write is finished before continuing.
+ * Calling this function from the Main Loop with BQL held
+ * is not necessary, since the Main Loop itself is the only
+ * writer, thus won't be able to read and write at the same time.
+ * The only exception to that is when we can't take the lock in the
+ * function/coroutine itself, and need to delegate the caller (usually main
+ * loop) to take it and wait that the coroutine ends, so that
+ * we always signal that a reader is running.
+ */
+void coroutine_fn TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA
+bdrv_graph_co_rdlock(void);
+
+/*
+ * bdrv_graph_rdunlock:
+ * Read terminated, decrease the count of readers in the current aiocontext.
+ * If the writer is waiting for reads to finish (has_writer == 1), signal
+ * the writer that we are done via aio_wait_kick() to let it continue.
+ */
+void coroutine_fn TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA
+bdrv_graph_co_rdunlock(void);
+
+/*
+ * bdrv_graph_rd{un}lock_main_loop:
+ * Just a placeholder to mark where the graph rdlock should be taken
+ * in the main loop. It is just asserting that we are not
+ * in a coroutine and in GLOBAL_STATE_CODE.
+ */
+void TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA
+bdrv_graph_rdlock_main_loop(void);
+
+void TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA
+bdrv_graph_rdunlock_main_loop(void);
+
+/*
+ * assert_bdrv_graph_readable:
+ * Make sure that the reader is either the main loop,
+ * or there is at least a reader helding the rdlock.
+ * In this way an incoming writer is aware of the read and waits.
+ */
+void GRAPH_RDLOCK assert_bdrv_graph_readable(void);
+
+/*
+ * assert_bdrv_graph_writable:
+ * Make sure that the writer is the main loop and has set @has_writer,
+ * so that incoming readers will pause.
+ */
+void GRAPH_WRLOCK assert_bdrv_graph_writable(void);
+
+/*
+ * Calling this function tells TSA that we know that the lock is effectively
+ * taken even though we cannot prove it (yet) with GRAPH_RDLOCK. This can be
+ * useful in intermediate stages of a conversion to using the GRAPH_RDLOCK
+ * macro.
+ */
+static inline void TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA
+assume_graph_lock(void)
+{
+}
+
+typedef struct GraphLockable { } GraphLockable;
+
+/*
+ * In C, compound literals have the lifetime of an automatic variable.
+ * In C++ it would be different, but then C++ wouldn't need QemuLockable
+ * either...
+ */
+#define GML_OBJ_() (&(GraphLockable) { })
+
+/*
+ * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the
+ * cleanup attribute and would therefore complain that the graph is never
+ * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that
+ * we hold the lock while unlocking is left unchecked.
+ */
+static inline GraphLockable * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA coroutine_fn
+graph_lockable_auto_lock(GraphLockable *x)
+{
+ bdrv_graph_co_rdlock();
+ return x;
+}
+
+static inline void TSA_NO_TSA coroutine_fn
+graph_lockable_auto_unlock(GraphLockable *x)
+{
+ bdrv_graph_co_rdunlock();
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockable, graph_lockable_auto_unlock)
+
+#define WITH_GRAPH_RDLOCK_GUARD_(var) \
+ for (g_autoptr(GraphLockable) var = graph_lockable_auto_lock(GML_OBJ_()); \
+ var; \
+ graph_lockable_auto_unlock(var), var = NULL)
+
+#define WITH_GRAPH_RDLOCK_GUARD() \
+ WITH_GRAPH_RDLOCK_GUARD_(glue(graph_lockable_auto, __COUNTER__))
+
+#define GRAPH_RDLOCK_GUARD(x) \
+ g_autoptr(GraphLockable) \
+ glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
+ graph_lockable_auto_lock(GML_OBJ_())
+
+
+typedef struct GraphLockableMainloop { } GraphLockableMainloop;
+
+/*
+ * In C, compound literals have the lifetime of an automatic variable.
+ * In C++ it would be different, but then C++ wouldn't need QemuLockable
+ * either...
+ */
+#define GMLML_OBJ_() (&(GraphLockableMainloop) { })
+
+/*
+ * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the
+ * cleanup attribute and would therefore complain that the graph is never
+ * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that
+ * we hold the lock while unlocking is left unchecked.
+ */
+static inline GraphLockableMainloop * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA
+graph_lockable_auto_lock_mainloop(GraphLockableMainloop *x)
+{
+ bdrv_graph_rdlock_main_loop();
+ return x;
+}
+
+static inline void TSA_NO_TSA
+graph_lockable_auto_unlock_mainloop(GraphLockableMainloop *x)
+{
+ bdrv_graph_rdunlock_main_loop();
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockableMainloop,
+ graph_lockable_auto_unlock_mainloop)
+
+#define GRAPH_RDLOCK_GUARD_MAINLOOP(x) \
+ g_autoptr(GraphLockableMainloop) \
+ glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
+ graph_lockable_auto_lock_mainloop(GMLML_OBJ_())
+
+#endif /* GRAPH_LOCK_H */
+
diff --git a/include/block/nbd.h b/include/block/nbd.h
index 1971b55789..4e7bd6342f 100644
--- a/include/block/nbd.h
+++ b/include/block/nbd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Red Hat, Inc.
+ * Copyright Red Hat
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
*
* Network Block Device
@@ -20,51 +20,70 @@
#ifndef NBD_H
#define NBD_H
-#include "qapi/qapi-types-block.h"
+#include "block/export.h"
#include "io/channel-socket.h"
#include "crypto/tlscreds.h"
+#include "qapi/error.h"
+#include "qemu/bswap.h"
+
+typedef struct NBDExport NBDExport;
+typedef struct NBDClient NBDClient;
+typedef struct NBDClientConnection NBDClientConnection;
+typedef struct NBDMetaContexts NBDMetaContexts;
+
+extern const BlockExportDriver blk_exp_nbd;
/* Handshake phase structs - this struct is passed on the wire */
-struct NBDOption {
+typedef struct NBDOption {
uint64_t magic; /* NBD_OPTS_MAGIC */
uint32_t option; /* NBD_OPT_* */
uint32_t length;
-} QEMU_PACKED;
-typedef struct NBDOption NBDOption;
+} QEMU_PACKED NBDOption;
-struct NBDOptionReply {
+typedef struct NBDOptionReply {
uint64_t magic; /* NBD_REP_MAGIC */
uint32_t option; /* NBD_OPT_* */
uint32_t type; /* NBD_REP_* */
uint32_t length;
-} QEMU_PACKED;
-typedef struct NBDOptionReply NBDOptionReply;
+} QEMU_PACKED NBDOptionReply;
typedef struct NBDOptionReplyMetaContext {
NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */
uint32_t context_id;
- /* meta context name follows */
+ /* metadata context name follows */
} QEMU_PACKED NBDOptionReplyMetaContext;
-/* Transmission phase structs
- *
- * Note: these are _NOT_ the same as the network representation of an NBD
- * request and reply!
+/* Track results of negotiation */
+typedef enum NBDMode {
+ /* Keep this list in a continuum of increasing features. */
+ NBD_MODE_OLDSTYLE, /* server lacks newstyle negotiation */
+ NBD_MODE_EXPORT_NAME, /* newstyle but only OPT_EXPORT_NAME safe */
+ NBD_MODE_SIMPLE, /* newstyle but only simple replies */
+ NBD_MODE_STRUCTURED, /* newstyle, structured replies enabled */
+ NBD_MODE_EXTENDED, /* newstyle, extended headers enabled */
+} NBDMode;
+
+/* Transmission phase structs */
+
+/*
+ * Note: NBDRequest is _NOT_ the same as the network representation of an NBD
+ * request!
*/
-struct NBDRequest {
- uint64_t handle;
- uint64_t from;
- uint32_t len;
+typedef struct NBDRequest {
+ uint64_t cookie;
+ uint64_t from; /* Offset touched by the command */
+ uint64_t len; /* Effect length; 32 bit limit without extended headers */
uint16_t flags; /* NBD_CMD_FLAG_* */
- uint16_t type; /* NBD_CMD_* */
-};
-typedef struct NBDRequest NBDRequest;
+ uint16_t type; /* NBD_CMD_* */
+ NBDMode mode; /* Determines which network representation to use */
+ NBDMetaContexts *contexts; /* Used by NBD_CMD_BLOCK_STATUS */
+} NBDRequest;
typedef struct NBDSimpleReply {
uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */
uint32_t error;
- uint64_t handle;
+ uint64_t cookie;
} QEMU_PACKED NBDSimpleReply;
/* Header of all structured replies */
@@ -72,72 +91,127 @@ typedef struct NBDStructuredReplyChunk {
uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
uint16_t type; /* NBD_REPLY_TYPE_* */
- uint64_t handle; /* request handle */
+ uint64_t cookie; /* request handle */
uint32_t length; /* length of payload */
} QEMU_PACKED NBDStructuredReplyChunk;
+typedef struct NBDExtendedReplyChunk {
+ uint32_t magic; /* NBD_EXTENDED_REPLY_MAGIC */
+ uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
+ uint16_t type; /* NBD_REPLY_TYPE_* */
+ uint64_t cookie; /* request handle */
+ uint64_t offset; /* request offset */
+ uint64_t length; /* length of payload */
+} QEMU_PACKED NBDExtendedReplyChunk;
+
typedef union NBDReply {
NBDSimpleReply simple;
NBDStructuredReplyChunk structured;
+ NBDExtendedReplyChunk extended;
struct {
- /* @magic and @handle fields have the same offset and size both in
- * simple reply and structured reply chunk, so let them be accessible
- * without ".simple." or ".structured." specification
+ /*
+ * @magic and @cookie fields have the same offset and size in all
+ * forms of replies, so let them be accessible without ".simple.",
+ * ".structured.", or ".extended." specifications.
*/
uint32_t magic;
uint32_t _skip;
- uint64_t handle;
- } QEMU_PACKED;
+ uint64_t cookie;
+ };
} NBDReply;
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, simple.cookie) !=
+ offsetof(NBDReply, cookie));
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, structured.cookie) !=
+ offsetof(NBDReply, cookie));
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, extended.cookie) !=
+ offsetof(NBDReply, cookie));
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
typedef struct NBDStructuredReadData {
- NBDStructuredReplyChunk h; /* h.length >= 9 */
+ /* header's .length >= 9 */
uint64_t offset;
/* At least one byte of data payload follows, calculated from h.length */
} QEMU_PACKED NBDStructuredReadData;
/* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */
typedef struct NBDStructuredReadHole {
- NBDStructuredReplyChunk h; /* h.length == 12 */
+ /* header's length == 12 */
uint64_t offset;
uint32_t length;
} QEMU_PACKED NBDStructuredReadHole;
/* Header of all NBD_REPLY_TYPE_ERROR* errors */
typedef struct NBDStructuredError {
- NBDStructuredReplyChunk h; /* h.length >= 6 */
+ /* header's length >= 6 */
uint32_t error;
uint16_t message_length;
} QEMU_PACKED NBDStructuredError;
/* Header of NBD_REPLY_TYPE_BLOCK_STATUS */
typedef struct NBDStructuredMeta {
- NBDStructuredReplyChunk h; /* h.length >= 12 (at least one extent) */
+ /* header's length >= 12 (at least one extent) */
uint32_t context_id;
- /* extents follows */
+ /* NBDExtent32 extents[] follows, array length implied by header */
} QEMU_PACKED NBDStructuredMeta;
-/* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */
-typedef struct NBDExtent {
+/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS */
+typedef struct NBDExtent32 {
uint32_t length;
uint32_t flags; /* NBD_STATE_* */
-} QEMU_PACKED NBDExtent;
+} QEMU_PACKED NBDExtent32;
+
+/* Header of NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
+typedef struct NBDExtendedMeta {
+ /* header's length >= 24 (at least one extent) */
+ uint32_t context_id;
+ uint32_t count; /* header length must be count * 16 + 8 */
+ /* NBDExtent64 extents[count] follows */
+} QEMU_PACKED NBDExtendedMeta;
+
+/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
+typedef struct NBDExtent64 {
+ uint64_t length;
+ uint64_t flags; /* NBD_STATE_* */
+} QEMU_PACKED NBDExtent64;
+
+/* Client payload for limiting NBD_CMD_BLOCK_STATUS reply */
+typedef struct NBDBlockStatusPayload {
+ uint64_t effect_length;
+ /* uint32_t ids[] follows, array length implied by header */
+} QEMU_PACKED NBDBlockStatusPayload;
/* Transmission (export) flags: sent from server to client during handshake,
but describe what will happen during transmission */
-#define NBD_FLAG_HAS_FLAGS (1 << 0) /* Flags are there */
-#define NBD_FLAG_READ_ONLY (1 << 1) /* Device is read-only */
-#define NBD_FLAG_SEND_FLUSH (1 << 2) /* Send FLUSH */
-#define NBD_FLAG_SEND_FUA (1 << 3) /* Send FUA (Force Unit Access) */
-#define NBD_FLAG_ROTATIONAL (1 << 4) /* Use elevator algorithm -
- rotational media */
-#define NBD_FLAG_SEND_TRIM (1 << 5) /* Send TRIM (discard) */
-#define NBD_FLAG_SEND_WRITE_ZEROES (1 << 6) /* Send WRITE_ZEROES */
-#define NBD_FLAG_SEND_DF (1 << 7) /* Send DF (Do not Fragment) */
-#define NBD_FLAG_CAN_MULTI_CONN (1 << 8) /* Multi-client cache consistent */
-#define NBD_FLAG_SEND_RESIZE (1 << 9) /* Send resize */
-#define NBD_FLAG_SEND_CACHE (1 << 10) /* Send CACHE (prefetch) */
+enum {
+ NBD_FLAG_HAS_FLAGS_BIT = 0, /* Flags are there */
+ NBD_FLAG_READ_ONLY_BIT = 1, /* Device is read-only */
+ NBD_FLAG_SEND_FLUSH_BIT = 2, /* Send FLUSH */
+ NBD_FLAG_SEND_FUA_BIT = 3, /* Send FUA (Force Unit Access) */
+ NBD_FLAG_ROTATIONAL_BIT = 4, /* Use elevator algorithm -
+ rotational media */
+ NBD_FLAG_SEND_TRIM_BIT = 5, /* Send TRIM (discard) */
+ NBD_FLAG_SEND_WRITE_ZEROES_BIT = 6, /* Send WRITE_ZEROES */
+ NBD_FLAG_SEND_DF_BIT = 7, /* Send DF (Do not Fragment) */
+ NBD_FLAG_CAN_MULTI_CONN_BIT = 8, /* Multi-client cache consistent */
+ NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */
+ NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */
+ NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */
+ NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT = 12, /* PAYLOAD flag for BLOCK_STATUS */
+};
+
+#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
+#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
+#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
+#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
+#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
+#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
+#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
+#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
+#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
+#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
+#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
+#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
+#define NBD_FLAG_BLOCK_STAT_PAYLOAD (1 << NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT)
/* New-style handshake (global) flags, sent from server to client, and
control what will happen during handshake phase. */
@@ -160,6 +234,7 @@ typedef struct NBDExtent {
#define NBD_OPT_STRUCTURED_REPLY (8)
#define NBD_OPT_LIST_META_CONTEXT (9)
#define NBD_OPT_SET_META_CONTEXT (10)
+#define NBD_OPT_EXTENDED_HEADERS (11)
/* Option reply types. */
#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
@@ -177,6 +252,8 @@ typedef struct NBDExtent {
#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
+#define NBD_REP_ERR_TOO_BIG NBD_REP_ERR(9) /* Payload size overflow */
+#define NBD_REP_ERR_EXT_HEADER_REQD NBD_REP_ERR(10) /* Need extended headers */
/* Info types, used during NBD_REP_INFO */
#define NBD_INFO_EXPORT 0
@@ -185,11 +262,14 @@ typedef struct NBDExtent {
#define NBD_INFO_BLOCK_SIZE 3
/* Request flags, sent from client to server during transmission phase */
-#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
-#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
-#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
-#define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS
- * reply chunk */
+#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
+#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
+#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
+#define NBD_CMD_FLAG_REQ_ONE (1 << 3) \
+ /* only one extent in BLOCK_STATUS reply chunk */
+#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
+#define NBD_CMD_FLAG_PAYLOAD_LEN (1 << 5) \
+ /* length describes payload, not effect; only with ext header */
/* Supported request types */
enum {
@@ -208,29 +288,38 @@ enum {
/* Maximum size of a single READ/WRITE data buffer */
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
-/* Maximum size of an export name. The NBD spec requires 256 and
- * suggests that servers support up to 4096, but we stick to only the
- * required size so that we can stack-allocate the names, and because
- * going larger would require an audit of more code to make sure we
- * aren't overflowing some other buffer. */
-#define NBD_MAX_NAME_SIZE 256
+/*
+ * Maximum size of a protocol string (export name, metadata context name,
+ * etc.). Use malloc rather than stack allocation for storage of a
+ * string.
+ */
+#define NBD_MAX_STRING_SIZE 4096
+
+/* Two types of request structures, a given client will only use 1 */
+#define NBD_REQUEST_MAGIC 0x25609513
+#define NBD_EXTENDED_REQUEST_MAGIC 0x21e41c71
-/* Two types of reply structures */
+/*
+ * Three types of reply structures, but what a client expects depends
+ * on NBD_OPT_STRUCTURED_REPLY and NBD_OPT_EXTENDED_HEADERS.
+ */
#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
+#define NBD_EXTENDED_REPLY_MAGIC 0x6e8a278c
-/* Structured reply flags */
+/* Chunk reply flags (for structured and extended replies) */
#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
-/* Structured reply types */
+/* Chunk reply types */
#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
-#define NBD_REPLY_TYPE_NONE 0
-#define NBD_REPLY_TYPE_OFFSET_DATA 1
-#define NBD_REPLY_TYPE_OFFSET_HOLE 2
-#define NBD_REPLY_TYPE_BLOCK_STATUS 5
-#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
-#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
+#define NBD_REPLY_TYPE_NONE 0
+#define NBD_REPLY_TYPE_OFFSET_DATA 1
+#define NBD_REPLY_TYPE_OFFSET_HOLE 2
+#define NBD_REPLY_TYPE_BLOCK_STATUS 5
+#define NBD_REPLY_TYPE_BLOCK_STATUS_EXT 6
+#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
+#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
/* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */
#define NBD_STATE_HOLE (1 << 0)
@@ -239,6 +328,8 @@ enum {
/* Extent flags for qemu:dirty-bitmap in NBD_REPLY_TYPE_BLOCK_STATUS */
#define NBD_STATE_DIRTY (1 << 0)
+/* No flags needed for qemu:allocation-depth in NBD_REPLY_TYPE_BLOCK_STATUS */
+
static inline bool nbd_reply_type_is_error(int type)
{
return type & (1 << 15);
@@ -255,79 +346,115 @@ static inline bool nbd_reply_type_is_error(int type)
#define NBD_EINVAL 22
#define NBD_ENOSPC 28
#define NBD_EOVERFLOW 75
+#define NBD_ENOTSUP 95
#define NBD_ESHUTDOWN 108
/* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */
-struct NBDExportInfo {
+typedef struct NBDExportInfo {
/* Set by client before nbd_receive_negotiate() */
bool request_sizes;
char *x_dirty_bitmap;
+ /* Set by client before nbd_receive_negotiate(), or by server results
+ * during nbd_receive_export_list() */
+ char *name; /* must be non-NULL */
+
/* In-out fields, set by client before nbd_receive_negotiate() and
* updated by server results during nbd_receive_negotiate() */
- bool structured_reply;
+ NBDMode mode; /* input maximum mode tolerated; output actual mode chosen */
bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */
- /* Set by server results during nbd_receive_negotiate() */
+ /* Set by server results during nbd_receive_negotiate() and
+ * nbd_receive_export_list() */
uint64_t size;
uint16_t flags;
uint32_t min_block;
uint32_t opt_block;
uint32_t max_block;
- uint32_t meta_base_allocation_id;
-};
-typedef struct NBDExportInfo NBDExportInfo;
+ uint32_t context_id;
-int nbd_receive_negotiate(QIOChannel *ioc, const char *name,
- QCryptoTLSCreds *tlscreds, const char *hostname,
- QIOChannel **outioc, NBDExportInfo *info,
- Error **errp);
+ /* Set by server results during nbd_receive_export_list() */
+ char *description;
+ int n_contexts;
+ char **contexts;
+} NBDExportInfo;
+
+int nbd_receive_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
+ const char *hostname, QIOChannel **outioc,
+ NBDExportInfo *info, Error **errp);
+void nbd_free_export_list(NBDExportInfo *info, int count);
+int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
+ const char *hostname, NBDExportInfo **info,
+ Error **errp);
int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info,
Error **errp);
int nbd_send_request(QIOChannel *ioc, NBDRequest *request);
-int nbd_receive_reply(QIOChannel *ioc, NBDReply *reply, Error **errp);
+int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
+ NBDReply *reply, NBDMode mode,
+ Error **errp);
int nbd_client(int fd);
int nbd_disconnect(int fd);
int nbd_errno_to_system_errno(int err);
-typedef struct NBDExport NBDExport;
-typedef struct NBDClient NBDClient;
-
-NBDExport *nbd_export_new(BlockDriverState *bs, off_t dev_offset, off_t size,
- const char *name, const char *description,
- const char *bitmap, uint16_t nbdflags,
- void (*close)(NBDExport *), bool writethrough,
- BlockBackend *on_eject_blk, Error **errp);
-void nbd_export_close(NBDExport *exp);
-void nbd_export_remove(NBDExport *exp, NbdServerRemoveMode mode, Error **errp);
-void nbd_export_get(NBDExport *exp);
-void nbd_export_put(NBDExport *exp);
-
-BlockBackend *nbd_export_get_blockdev(NBDExport *exp);
+void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk);
+AioContext *nbd_export_aio_context(NBDExport *exp);
NBDExport *nbd_export_find(const char *name);
-void nbd_export_close_all(void);
void nbd_client_new(QIOChannelSocket *sioc,
QCryptoTLSCreds *tlscreds,
- const char *tlsaclname,
+ const char *tlsauthz,
void (*close_fn)(NBDClient *, bool));
void nbd_client_get(NBDClient *client);
void nbd_client_put(NBDClient *client);
+void nbd_server_is_qemu_nbd(int max_connections);
+bool nbd_server_is_running(void);
+int nbd_server_max_connections(void);
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
+ const char *tls_authz, uint32_t max_connections,
Error **errp);
+void nbd_server_start_options(NbdServerOptions *arg, Error **errp);
/* nbd_read
* Reads @size bytes from @ioc. Returns 0 on success.
*/
static inline int nbd_read(QIOChannel *ioc, void *buffer, size_t size,
- Error **errp)
+ const char *desc, Error **errp)
{
- return qio_channel_read_all(ioc, buffer, size, errp) < 0 ? -EIO : 0;
+ ERRP_GUARD();
+ int ret = qio_channel_read_all(ioc, buffer, size, errp) < 0 ? -EIO : 0;
+
+ if (ret < 0) {
+ if (desc) {
+ error_prepend(errp, "Failed to read %s: ", desc);
+ }
+ return ret;
+ }
+
+ return 0;
}
+#define DEF_NBD_READ_N(bits) \
+static inline int nbd_read##bits(QIOChannel *ioc, \
+ uint##bits##_t *val, \
+ const char *desc, Error **errp) \
+{ \
+ int ret = nbd_read(ioc, val, sizeof(*val), desc, errp); \
+ if (ret < 0) { \
+ return ret; \
+ } \
+ *val = be##bits##_to_cpu(*val); \
+ return 0; \
+}
+
+DEF_NBD_READ_N(16) /* Defines nbd_read16(). */
+DEF_NBD_READ_N(32) /* Defines nbd_read32(). */
+DEF_NBD_READ_N(64) /* Defines nbd_read64(). */
+
+#undef DEF_NBD_READ_N
+
static inline bool nbd_reply_is_simple(NBDReply *reply)
{
return reply->magic == NBD_SIMPLE_REPLY_MAGIC;
@@ -344,5 +471,23 @@ const char *nbd_rep_lookup(uint32_t rep);
const char *nbd_info_lookup(uint16_t info);
const char *nbd_cmd_lookup(uint16_t info);
const char *nbd_err_lookup(int err);
+const char *nbd_mode_lookup(NBDMode mode);
+
+/* nbd/client-connection.c */
+void nbd_client_connection_enable_retry(NBDClientConnection *conn);
+
+NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
+ bool do_negotiation,
+ const char *export_name,
+ const char *x_dirty_bitmap,
+ QCryptoTLSCreds *tlscreds,
+ const char *tlshostname);
+void nbd_client_connection_release(NBDClientConnection *conn);
+
+QIOChannel *coroutine_fn
+nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
+ bool blocking, Error **errp);
+
+void nbd_co_establish_connection_cancel(NBDClientConnection *conn);
#endif
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 849a6f3fa3..bb231d0b9a 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -1,32 +1,95 @@
#ifndef BLOCK_NVME_H
#define BLOCK_NVME_H
-typedef struct NvmeBar {
+#include "hw/registerfields.h"
+
+typedef struct QEMU_PACKED NvmeBar {
uint64_t cap;
uint32_t vs;
uint32_t intms;
uint32_t intmc;
uint32_t cc;
- uint32_t rsvd1;
+ uint8_t rsvd24[4];
uint32_t csts;
- uint32_t nssrc;
+ uint32_t nssr;
uint32_t aqa;
uint64_t asq;
uint64_t acq;
uint32_t cmbloc;
uint32_t cmbsz;
+ uint32_t bpinfo;
+ uint32_t bprsel;
+ uint64_t bpmbl;
+ uint64_t cmbmsc;
+ uint32_t cmbsts;
+ uint8_t rsvd92[3492];
+ uint32_t pmrcap;
+ uint32_t pmrctl;
+ uint32_t pmrsts;
+ uint32_t pmrebs;
+ uint32_t pmrswtp;
+ uint32_t pmrmscl;
+ uint32_t pmrmscu;
+ uint8_t css[484];
} NvmeBar;
+enum NvmeBarRegs {
+ NVME_REG_CAP = offsetof(NvmeBar, cap),
+ NVME_REG_VS = offsetof(NvmeBar, vs),
+ NVME_REG_INTMS = offsetof(NvmeBar, intms),
+ NVME_REG_INTMC = offsetof(NvmeBar, intmc),
+ NVME_REG_CC = offsetof(NvmeBar, cc),
+ NVME_REG_CSTS = offsetof(NvmeBar, csts),
+ NVME_REG_NSSR = offsetof(NvmeBar, nssr),
+ NVME_REG_AQA = offsetof(NvmeBar, aqa),
+ NVME_REG_ASQ = offsetof(NvmeBar, asq),
+ NVME_REG_ACQ = offsetof(NvmeBar, acq),
+ NVME_REG_CMBLOC = offsetof(NvmeBar, cmbloc),
+ NVME_REG_CMBSZ = offsetof(NvmeBar, cmbsz),
+ NVME_REG_BPINFO = offsetof(NvmeBar, bpinfo),
+ NVME_REG_BPRSEL = offsetof(NvmeBar, bprsel),
+ NVME_REG_BPMBL = offsetof(NvmeBar, bpmbl),
+ NVME_REG_CMBMSC = offsetof(NvmeBar, cmbmsc),
+ NVME_REG_CMBSTS = offsetof(NvmeBar, cmbsts),
+ NVME_REG_PMRCAP = offsetof(NvmeBar, pmrcap),
+ NVME_REG_PMRCTL = offsetof(NvmeBar, pmrctl),
+ NVME_REG_PMRSTS = offsetof(NvmeBar, pmrsts),
+ NVME_REG_PMREBS = offsetof(NvmeBar, pmrebs),
+ NVME_REG_PMRSWTP = offsetof(NvmeBar, pmrswtp),
+ NVME_REG_PMRMSCL = offsetof(NvmeBar, pmrmscl),
+ NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu),
+};
+
+typedef struct QEMU_PACKED NvmeEndGrpLog {
+ uint8_t critical_warning;
+ uint8_t rsvd[2];
+ uint8_t avail_spare;
+ uint8_t avail_spare_thres;
+ uint8_t percet_used;
+ uint8_t rsvd1[26];
+ uint64_t end_estimate[2];
+ uint64_t data_units_read[2];
+ uint64_t data_units_written[2];
+ uint64_t media_units_written[2];
+ uint64_t host_read_commands[2];
+ uint64_t host_write_commands[2];
+ uint64_t media_integrity_errors[2];
+ uint64_t no_err_info_log_entries[2];
+ uint8_t rsvd2[352];
+} NvmeEndGrpLog;
+
enum NvmeCapShift {
CAP_MQES_SHIFT = 0,
CAP_CQR_SHIFT = 16,
CAP_AMS_SHIFT = 17,
CAP_TO_SHIFT = 24,
CAP_DSTRD_SHIFT = 32,
- CAP_NSSRS_SHIFT = 33,
+ CAP_NSSRS_SHIFT = 36,
CAP_CSS_SHIFT = 37,
CAP_MPSMIN_SHIFT = 48,
CAP_MPSMAX_SHIFT = 52,
+ CAP_PMRS_SHIFT = 56,
+ CAP_CMBS_SHIFT = 57,
};
enum NvmeCapMask {
@@ -39,6 +102,8 @@ enum NvmeCapMask {
CAP_CSS_MASK = 0xff,
CAP_MPSMIN_MASK = 0xf,
CAP_MPSMAX_MASK = 0xf,
+ CAP_PMRS_MASK = 0x1,
+ CAP_CMBS_MASK = 0x1,
};
#define NVME_CAP_MQES(cap) (((cap) >> CAP_MQES_SHIFT) & CAP_MQES_MASK)
@@ -50,25 +115,37 @@ enum NvmeCapMask {
#define NVME_CAP_CSS(cap) (((cap) >> CAP_CSS_SHIFT) & CAP_CSS_MASK)
#define NVME_CAP_MPSMIN(cap)(((cap) >> CAP_MPSMIN_SHIFT) & CAP_MPSMIN_MASK)
#define NVME_CAP_MPSMAX(cap)(((cap) >> CAP_MPSMAX_SHIFT) & CAP_MPSMAX_MASK)
+#define NVME_CAP_PMRS(cap) (((cap) >> CAP_PMRS_SHIFT) & CAP_PMRS_MASK)
+#define NVME_CAP_CMBS(cap) (((cap) >> CAP_CMBS_SHIFT) & CAP_CMBS_MASK)
-#define NVME_CAP_SET_MQES(cap, val) (cap |= (uint64_t)(val & CAP_MQES_MASK) \
- << CAP_MQES_SHIFT)
-#define NVME_CAP_SET_CQR(cap, val) (cap |= (uint64_t)(val & CAP_CQR_MASK) \
- << CAP_CQR_SHIFT)
-#define NVME_CAP_SET_AMS(cap, val) (cap |= (uint64_t)(val & CAP_AMS_MASK) \
- << CAP_AMS_SHIFT)
-#define NVME_CAP_SET_TO(cap, val) (cap |= (uint64_t)(val & CAP_TO_MASK) \
- << CAP_TO_SHIFT)
-#define NVME_CAP_SET_DSTRD(cap, val) (cap |= (uint64_t)(val & CAP_DSTRD_MASK) \
- << CAP_DSTRD_SHIFT)
-#define NVME_CAP_SET_NSSRS(cap, val) (cap |= (uint64_t)(val & CAP_NSSRS_MASK) \
- << CAP_NSSRS_SHIFT)
-#define NVME_CAP_SET_CSS(cap, val) (cap |= (uint64_t)(val & CAP_CSS_MASK) \
- << CAP_CSS_SHIFT)
-#define NVME_CAP_SET_MPSMIN(cap, val) (cap |= (uint64_t)(val & CAP_MPSMIN_MASK)\
- << CAP_MPSMIN_SHIFT)
-#define NVME_CAP_SET_MPSMAX(cap, val) (cap |= (uint64_t)(val & CAP_MPSMAX_MASK)\
- << CAP_MPSMAX_SHIFT)
+#define NVME_CAP_SET_MQES(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_MQES_MASK) << CAP_MQES_SHIFT)
+#define NVME_CAP_SET_CQR(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_CQR_MASK) << CAP_CQR_SHIFT)
+#define NVME_CAP_SET_AMS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_AMS_MASK) << CAP_AMS_SHIFT)
+#define NVME_CAP_SET_TO(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_TO_MASK) << CAP_TO_SHIFT)
+#define NVME_CAP_SET_DSTRD(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_DSTRD_MASK) << CAP_DSTRD_SHIFT)
+#define NVME_CAP_SET_NSSRS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_NSSRS_MASK) << CAP_NSSRS_SHIFT)
+#define NVME_CAP_SET_CSS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_CSS_MASK) << CAP_CSS_SHIFT)
+#define NVME_CAP_SET_MPSMIN(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_MPSMIN_MASK) << CAP_MPSMIN_SHIFT)
+#define NVME_CAP_SET_MPSMAX(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_MPSMAX_MASK) << CAP_MPSMAX_SHIFT)
+#define NVME_CAP_SET_PMRS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_PMRS_MASK) << CAP_PMRS_SHIFT)
+#define NVME_CAP_SET_CMBS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_CMBS_MASK) << CAP_CMBS_SHIFT)
+
+enum NvmeCapCss {
+ NVME_CAP_CSS_NVM = 1 << 0,
+ NVME_CAP_CSS_CSI_SUPP = 1 << 6,
+ NVME_CAP_CSS_ADMIN_ONLY = 1 << 7,
+};
enum NvmeCcShift {
CC_EN_SHIFT = 0,
@@ -98,6 +175,27 @@ enum NvmeCcMask {
#define NVME_CC_IOSQES(cc) ((cc >> CC_IOSQES_SHIFT) & CC_IOSQES_MASK)
#define NVME_CC_IOCQES(cc) ((cc >> CC_IOCQES_SHIFT) & CC_IOCQES_MASK)
+enum NvmeCcCss {
+ NVME_CC_CSS_NVM = 0x0,
+ NVME_CC_CSS_CSI = 0x6,
+ NVME_CC_CSS_ADMIN_ONLY = 0x7,
+};
+
+#define NVME_SET_CC_EN(cc, val) \
+ (cc |= (uint32_t)((val) & CC_EN_MASK) << CC_EN_SHIFT)
+#define NVME_SET_CC_CSS(cc, val) \
+ (cc |= (uint32_t)((val) & CC_CSS_MASK) << CC_CSS_SHIFT)
+#define NVME_SET_CC_MPS(cc, val) \
+ (cc |= (uint32_t)((val) & CC_MPS_MASK) << CC_MPS_SHIFT)
+#define NVME_SET_CC_AMS(cc, val) \
+ (cc |= (uint32_t)((val) & CC_AMS_MASK) << CC_AMS_SHIFT)
+#define NVME_SET_CC_SHN(cc, val) \
+ (cc |= (uint32_t)((val) & CC_SHN_MASK) << CC_SHN_SHIFT)
+#define NVME_SET_CC_IOSQES(cc, val) \
+ (cc |= (uint32_t)((val) & CC_IOSQES_MASK) << CC_IOSQES_SHIFT)
+#define NVME_SET_CC_IOCQES(cc, val) \
+ (cc |= (uint32_t)((val) & CC_IOCQES_MASK) << CC_IOCQES_SHIFT)
+
enum NvmeCstsShift {
CSTS_RDY_SHIFT = 0,
CSTS_CFS_SHIFT = 1,
@@ -140,25 +238,64 @@ enum NvmeAqaMask {
#define NVME_AQA_ACQS(aqa) ((aqa >> AQA_ACQS_SHIFT) & AQA_ACQS_MASK)
enum NvmeCmblocShift {
- CMBLOC_BIR_SHIFT = 0,
- CMBLOC_OFST_SHIFT = 12,
+ CMBLOC_BIR_SHIFT = 0,
+ CMBLOC_CQMMS_SHIFT = 3,
+ CMBLOC_CQPDS_SHIFT = 4,
+ CMBLOC_CDPMLS_SHIFT = 5,
+ CMBLOC_CDPCILS_SHIFT = 6,
+ CMBLOC_CDMMMS_SHIFT = 7,
+ CMBLOC_CQDA_SHIFT = 8,
+ CMBLOC_OFST_SHIFT = 12,
};
enum NvmeCmblocMask {
- CMBLOC_BIR_MASK = 0x7,
- CMBLOC_OFST_MASK = 0xfffff,
+ CMBLOC_BIR_MASK = 0x7,
+ CMBLOC_CQMMS_MASK = 0x1,
+ CMBLOC_CQPDS_MASK = 0x1,
+ CMBLOC_CDPMLS_MASK = 0x1,
+ CMBLOC_CDPCILS_MASK = 0x1,
+ CMBLOC_CDMMMS_MASK = 0x1,
+ CMBLOC_CQDA_MASK = 0x1,
+ CMBLOC_OFST_MASK = 0xfffff,
};
-#define NVME_CMBLOC_BIR(cmbloc) ((cmbloc >> CMBLOC_BIR_SHIFT) & \
- CMBLOC_BIR_MASK)
-#define NVME_CMBLOC_OFST(cmbloc)((cmbloc >> CMBLOC_OFST_SHIFT) & \
- CMBLOC_OFST_MASK)
+#define NVME_CMBLOC_BIR(cmbloc) \
+ ((cmbloc >> CMBLOC_BIR_SHIFT) & CMBLOC_BIR_MASK)
+#define NVME_CMBLOC_CQMMS(cmbloc) \
+ ((cmbloc >> CMBLOC_CQMMS_SHIFT) & CMBLOC_CQMMS_MASK)
+#define NVME_CMBLOC_CQPDS(cmbloc) \
+ ((cmbloc >> CMBLOC_CQPDS_SHIFT) & CMBLOC_CQPDS_MASK)
+#define NVME_CMBLOC_CDPMLS(cmbloc) \
+ ((cmbloc >> CMBLOC_CDPMLS_SHIFT) & CMBLOC_CDPMLS_MASK)
+#define NVME_CMBLOC_CDPCILS(cmbloc) \
+ ((cmbloc >> CMBLOC_CDPCILS_SHIFT) & CMBLOC_CDPCILS_MASK)
+#define NVME_CMBLOC_CDMMMS(cmbloc) \
+ ((cmbloc >> CMBLOC_CDMMMS_SHIFT) & CMBLOC_CDMMMS_MASK)
+#define NVME_CMBLOC_CQDA(cmbloc) \
+ ((cmbloc >> CMBLOC_CQDA_SHIFT) & CMBLOC_CQDA_MASK)
+#define NVME_CMBLOC_OFST(cmbloc) \
+ ((cmbloc >> CMBLOC_OFST_SHIFT) & CMBLOC_OFST_MASK)
-#define NVME_CMBLOC_SET_BIR(cmbloc, val) \
+#define NVME_CMBLOC_SET_BIR(cmbloc, val) \
(cmbloc |= (uint64_t)(val & CMBLOC_BIR_MASK) << CMBLOC_BIR_SHIFT)
+#define NVME_CMBLOC_SET_CQMMS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CQMMS_MASK) << CMBLOC_CQMMS_SHIFT)
+#define NVME_CMBLOC_SET_CQPDS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CQPDS_MASK) << CMBLOC_CQPDS_SHIFT)
+#define NVME_CMBLOC_SET_CDPMLS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CDPMLS_MASK) << CMBLOC_CDPMLS_SHIFT)
+#define NVME_CMBLOC_SET_CDPCILS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CDPCILS_MASK) << CMBLOC_CDPCILS_SHIFT)
+#define NVME_CMBLOC_SET_CDMMMS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CDMMMS_MASK) << CMBLOC_CDMMMS_SHIFT)
+#define NVME_CMBLOC_SET_CQDA(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CQDA_MASK) << CMBLOC_CQDA_SHIFT)
#define NVME_CMBLOC_SET_OFST(cmbloc, val) \
(cmbloc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBLOC_OFST_SHIFT)
+#define NVME_CMBMSMC_SET_CRE (cmbmsc, val) \
+ (cmbmsc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBMSC_CRE_SHIFT)
+
enum NvmeCmbszShift {
CMBSZ_SQS_SHIFT = 0,
CMBSZ_CQS_SHIFT = 1,
@@ -205,15 +342,254 @@ enum NvmeCmbszMask {
#define NVME_CMBSZ_GETSIZE(cmbsz) \
(NVME_CMBSZ_SZ(cmbsz) * (1 << (12 + 4 * NVME_CMBSZ_SZU(cmbsz))))
-typedef struct NvmeCmd {
+enum NvmeCmbmscShift {
+ CMBMSC_CRE_SHIFT = 0,
+ CMBMSC_CMSE_SHIFT = 1,
+ CMBMSC_CBA_SHIFT = 12,
+};
+
+enum NvmeCmbmscMask {
+ CMBMSC_CRE_MASK = 0x1,
+ CMBMSC_CMSE_MASK = 0x1,
+ CMBMSC_CBA_MASK = ((1ULL << 52) - 1),
+};
+
+#define NVME_CMBMSC_CRE(cmbmsc) \
+ ((cmbmsc >> CMBMSC_CRE_SHIFT) & CMBMSC_CRE_MASK)
+#define NVME_CMBMSC_CMSE(cmbmsc) \
+ ((cmbmsc >> CMBMSC_CMSE_SHIFT) & CMBMSC_CMSE_MASK)
+#define NVME_CMBMSC_CBA(cmbmsc) \
+ ((cmbmsc >> CMBMSC_CBA_SHIFT) & CMBMSC_CBA_MASK)
+
+
+#define NVME_CMBMSC_SET_CRE(cmbmsc, val) \
+ (cmbmsc |= (uint64_t)(val & CMBMSC_CRE_MASK) << CMBMSC_CRE_SHIFT)
+#define NVME_CMBMSC_SET_CMSE(cmbmsc, val) \
+ (cmbmsc |= (uint64_t)(val & CMBMSC_CMSE_MASK) << CMBMSC_CMSE_SHIFT)
+#define NVME_CMBMSC_SET_CBA(cmbmsc, val) \
+ (cmbmsc |= (uint64_t)(val & CMBMSC_CBA_MASK) << CMBMSC_CBA_SHIFT)
+
+enum NvmeCmbstsShift {
+ CMBSTS_CBAI_SHIFT = 0,
+};
+enum NvmeCmbstsMask {
+ CMBSTS_CBAI_MASK = 0x1,
+};
+
+#define NVME_CMBSTS_CBAI(cmbsts) \
+ ((cmbsts >> CMBSTS_CBAI_SHIFT) & CMBSTS_CBAI_MASK)
+
+#define NVME_CMBSTS_SET_CBAI(cmbsts, val) \
+ (cmbsts |= (uint64_t)(val & CMBSTS_CBAI_MASK) << CMBSTS_CBAI_SHIFT)
+
+enum NvmePmrcapShift {
+ PMRCAP_RDS_SHIFT = 3,
+ PMRCAP_WDS_SHIFT = 4,
+ PMRCAP_BIR_SHIFT = 5,
+ PMRCAP_PMRTU_SHIFT = 8,
+ PMRCAP_PMRWBM_SHIFT = 10,
+ PMRCAP_PMRTO_SHIFT = 16,
+ PMRCAP_CMSS_SHIFT = 24,
+};
+
+enum NvmePmrcapMask {
+ PMRCAP_RDS_MASK = 0x1,
+ PMRCAP_WDS_MASK = 0x1,
+ PMRCAP_BIR_MASK = 0x7,
+ PMRCAP_PMRTU_MASK = 0x3,
+ PMRCAP_PMRWBM_MASK = 0xf,
+ PMRCAP_PMRTO_MASK = 0xff,
+ PMRCAP_CMSS_MASK = 0x1,
+};
+
+#define NVME_PMRCAP_RDS(pmrcap) \
+ ((pmrcap >> PMRCAP_RDS_SHIFT) & PMRCAP_RDS_MASK)
+#define NVME_PMRCAP_WDS(pmrcap) \
+ ((pmrcap >> PMRCAP_WDS_SHIFT) & PMRCAP_WDS_MASK)
+#define NVME_PMRCAP_BIR(pmrcap) \
+ ((pmrcap >> PMRCAP_BIR_SHIFT) & PMRCAP_BIR_MASK)
+#define NVME_PMRCAP_PMRTU(pmrcap) \
+ ((pmrcap >> PMRCAP_PMRTU_SHIFT) & PMRCAP_PMRTU_MASK)
+#define NVME_PMRCAP_PMRWBM(pmrcap) \
+ ((pmrcap >> PMRCAP_PMRWBM_SHIFT) & PMRCAP_PMRWBM_MASK)
+#define NVME_PMRCAP_PMRTO(pmrcap) \
+ ((pmrcap >> PMRCAP_PMRTO_SHIFT) & PMRCAP_PMRTO_MASK)
+#define NVME_PMRCAP_CMSS(pmrcap) \
+ ((pmrcap >> PMRCAP_CMSS_SHIFT) & PMRCAP_CMSS_MASK)
+
+#define NVME_PMRCAP_SET_RDS(pmrcap, val) \
+ (pmrcap |= (uint64_t)(val & PMRCAP_RDS_MASK) << PMRCAP_RDS_SHIFT)
+#define NVME_PMRCAP_SET_WDS(pmrcap, val) \
+ (pmrcap |= (uint64_t)(val & PMRCAP_WDS_MASK) << PMRCAP_WDS_SHIFT)
+#define NVME_PMRCAP_SET_BIR(pmrcap, val) \
+ (pmrcap |= (uint64_t)(val & PMRCAP_BIR_MASK) << PMRCAP_BIR_SHIFT)
+#define NVME_PMRCAP_SET_PMRTU(pmrcap, val) \
+ (pmrcap |= (uint64_t)(val & PMRCAP_PMRTU_MASK) << PMRCAP_PMRTU_SHIFT)
+#define NVME_PMRCAP_SET_PMRWBM(pmrcap, val) \
+ (pmrcap |= (uint64_t)(val & PMRCAP_PMRWBM_MASK) << PMRCAP_PMRWBM_SHIFT)
+#define NVME_PMRCAP_SET_PMRTO(pmrcap, val) \
+ (pmrcap |= (uint64_t)(val & PMRCAP_PMRTO_MASK) << PMRCAP_PMRTO_SHIFT)
+#define NVME_PMRCAP_SET_CMSS(pmrcap, val) \
+ (pmrcap |= (uint64_t)(val & PMRCAP_CMSS_MASK) << PMRCAP_CMSS_SHIFT)
+
+enum NvmePmrctlShift {
+ PMRCTL_EN_SHIFT = 0,
+};
+
+enum NvmePmrctlMask {
+ PMRCTL_EN_MASK = 0x1,
+};
+
+#define NVME_PMRCTL_EN(pmrctl) ((pmrctl >> PMRCTL_EN_SHIFT) & PMRCTL_EN_MASK)
+
+#define NVME_PMRCTL_SET_EN(pmrctl, val) \
+ (pmrctl |= (uint64_t)(val & PMRCTL_EN_MASK) << PMRCTL_EN_SHIFT)
+
+enum NvmePmrstsShift {
+ PMRSTS_ERR_SHIFT = 0,
+ PMRSTS_NRDY_SHIFT = 8,
+ PMRSTS_HSTS_SHIFT = 9,
+ PMRSTS_CBAI_SHIFT = 12,
+};
+
+enum NvmePmrstsMask {
+ PMRSTS_ERR_MASK = 0xff,
+ PMRSTS_NRDY_MASK = 0x1,
+ PMRSTS_HSTS_MASK = 0x7,
+ PMRSTS_CBAI_MASK = 0x1,
+};
+
+#define NVME_PMRSTS_ERR(pmrsts) \
+ ((pmrsts >> PMRSTS_ERR_SHIFT) & PMRSTS_ERR_MASK)
+#define NVME_PMRSTS_NRDY(pmrsts) \
+ ((pmrsts >> PMRSTS_NRDY_SHIFT) & PMRSTS_NRDY_MASK)
+#define NVME_PMRSTS_HSTS(pmrsts) \
+ ((pmrsts >> PMRSTS_HSTS_SHIFT) & PMRSTS_HSTS_MASK)
+#define NVME_PMRSTS_CBAI(pmrsts) \
+ ((pmrsts >> PMRSTS_CBAI_SHIFT) & PMRSTS_CBAI_MASK)
+
+#define NVME_PMRSTS_SET_ERR(pmrsts, val) \
+ (pmrsts |= (uint64_t)(val & PMRSTS_ERR_MASK) << PMRSTS_ERR_SHIFT)
+#define NVME_PMRSTS_SET_NRDY(pmrsts, val) \
+ (pmrsts |= (uint64_t)(val & PMRSTS_NRDY_MASK) << PMRSTS_NRDY_SHIFT)
+#define NVME_PMRSTS_SET_HSTS(pmrsts, val) \
+ (pmrsts |= (uint64_t)(val & PMRSTS_HSTS_MASK) << PMRSTS_HSTS_SHIFT)
+#define NVME_PMRSTS_SET_CBAI(pmrsts, val) \
+ (pmrsts |= (uint64_t)(val & PMRSTS_CBAI_MASK) << PMRSTS_CBAI_SHIFT)
+
+enum NvmePmrebsShift {
+ PMREBS_PMRSZU_SHIFT = 0,
+ PMREBS_RBB_SHIFT = 4,
+ PMREBS_PMRWBZ_SHIFT = 8,
+};
+
+enum NvmePmrebsMask {
+ PMREBS_PMRSZU_MASK = 0xf,
+ PMREBS_RBB_MASK = 0x1,
+ PMREBS_PMRWBZ_MASK = 0xffffff,
+};
+
+#define NVME_PMREBS_PMRSZU(pmrebs) \
+ ((pmrebs >> PMREBS_PMRSZU_SHIFT) & PMREBS_PMRSZU_MASK)
+#define NVME_PMREBS_RBB(pmrebs) \
+ ((pmrebs >> PMREBS_RBB_SHIFT) & PMREBS_RBB_MASK)
+#define NVME_PMREBS_PMRWBZ(pmrebs) \
+ ((pmrebs >> PMREBS_PMRWBZ_SHIFT) & PMREBS_PMRWBZ_MASK)
+
+#define NVME_PMREBS_SET_PMRSZU(pmrebs, val) \
+ (pmrebs |= (uint64_t)(val & PMREBS_PMRSZU_MASK) << PMREBS_PMRSZU_SHIFT)
+#define NVME_PMREBS_SET_RBB(pmrebs, val) \
+ (pmrebs |= (uint64_t)(val & PMREBS_RBB_MASK) << PMREBS_RBB_SHIFT)
+#define NVME_PMREBS_SET_PMRWBZ(pmrebs, val) \
+ (pmrebs |= (uint64_t)(val & PMREBS_PMRWBZ_MASK) << PMREBS_PMRWBZ_SHIFT)
+
+enum NvmePmrswtpShift {
+ PMRSWTP_PMRSWTU_SHIFT = 0,
+ PMRSWTP_PMRSWTV_SHIFT = 8,
+};
+
+enum NvmePmrswtpMask {
+ PMRSWTP_PMRSWTU_MASK = 0xf,
+ PMRSWTP_PMRSWTV_MASK = 0xffffff,
+};
+
+#define NVME_PMRSWTP_PMRSWTU(pmrswtp) \
+ ((pmrswtp >> PMRSWTP_PMRSWTU_SHIFT) & PMRSWTP_PMRSWTU_MASK)
+#define NVME_PMRSWTP_PMRSWTV(pmrswtp) \
+ ((pmrswtp >> PMRSWTP_PMRSWTV_SHIFT) & PMRSWTP_PMRSWTV_MASK)
+
+#define NVME_PMRSWTP_SET_PMRSWTU(pmrswtp, val) \
+ (pmrswtp |= (uint64_t)(val & PMRSWTP_PMRSWTU_MASK) << PMRSWTP_PMRSWTU_SHIFT)
+#define NVME_PMRSWTP_SET_PMRSWTV(pmrswtp, val) \
+ (pmrswtp |= (uint64_t)(val & PMRSWTP_PMRSWTV_MASK) << PMRSWTP_PMRSWTV_SHIFT)
+
+enum NvmePmrmsclShift {
+ PMRMSCL_CMSE_SHIFT = 1,
+ PMRMSCL_CBA_SHIFT = 12,
+};
+
+enum NvmePmrmsclMask {
+ PMRMSCL_CMSE_MASK = 0x1,
+ PMRMSCL_CBA_MASK = 0xfffff,
+};
+
+#define NVME_PMRMSCL_CMSE(pmrmscl) \
+ ((pmrmscl >> PMRMSCL_CMSE_SHIFT) & PMRMSCL_CMSE_MASK)
+#define NVME_PMRMSCL_CBA(pmrmscl) \
+ ((pmrmscl >> PMRMSCL_CBA_SHIFT) & PMRMSCL_CBA_MASK)
+
+#define NVME_PMRMSCL_SET_CMSE(pmrmscl, val) \
+ (pmrmscl |= (uint32_t)(val & PMRMSCL_CMSE_MASK) << PMRMSCL_CMSE_SHIFT)
+#define NVME_PMRMSCL_SET_CBA(pmrmscl, val) \
+ (pmrmscl |= (uint32_t)(val & PMRMSCL_CBA_MASK) << PMRMSCL_CBA_SHIFT)
+
+enum NvmeSglDescriptorType {
+ NVME_SGL_DESCR_TYPE_DATA_BLOCK = 0x0,
+ NVME_SGL_DESCR_TYPE_BIT_BUCKET = 0x1,
+ NVME_SGL_DESCR_TYPE_SEGMENT = 0x2,
+ NVME_SGL_DESCR_TYPE_LAST_SEGMENT = 0x3,
+ NVME_SGL_DESCR_TYPE_KEYED_DATA_BLOCK = 0x4,
+
+ NVME_SGL_DESCR_TYPE_VENDOR_SPECIFIC = 0xf,
+};
+
+enum NvmeSglDescriptorSubtype {
+ NVME_SGL_DESCR_SUBTYPE_ADDRESS = 0x0,
+};
+
+typedef struct QEMU_PACKED NvmeSglDescriptor {
+ uint64_t addr;
+ uint32_t len;
+ uint8_t rsvd[3];
+ uint8_t type;
+} NvmeSglDescriptor;
+
+#define NVME_SGL_TYPE(type) ((type >> 4) & 0xf)
+#define NVME_SGL_SUBTYPE(type) (type & 0xf)
+
+typedef union NvmeCmdDptr {
+ struct {
+ uint64_t prp1;
+ uint64_t prp2;
+ };
+
+ NvmeSglDescriptor sgl;
+} NvmeCmdDptr;
+
+enum NvmePsdt {
+ NVME_PSDT_PRP = 0x0,
+ NVME_PSDT_SGL_MPTR_CONTIGUOUS = 0x1,
+ NVME_PSDT_SGL_MPTR_SGL = 0x2,
+};
+
+typedef struct QEMU_PACKED NvmeCmd {
uint8_t opcode;
- uint8_t fuse;
+ uint8_t flags;
uint16_t cid;
uint32_t nsid;
uint64_t res1;
uint64_t mptr;
- uint64_t prp1;
- uint64_t prp2;
+ NvmeCmdDptr dptr;
uint32_t cdw10;
uint32_t cdw11;
uint32_t cdw12;
@@ -222,6 +598,9 @@ typedef struct NvmeCmd {
uint32_t cdw15;
} NvmeCmd;
+#define NVME_CMD_FLAGS_FUSE(flags) (flags & 0x3)
+#define NVME_CMD_FLAGS_PSDT(flags) ((flags >> 6) & 0x3)
+
enum NvmeAdminCommands {
NVME_ADM_CMD_DELETE_SQ = 0x00,
NVME_ADM_CMD_CREATE_SQ = 0x01,
@@ -235,6 +614,11 @@ enum NvmeAdminCommands {
NVME_ADM_CMD_ASYNC_EV_REQ = 0x0c,
NVME_ADM_CMD_ACTIVATE_FW = 0x10,
NVME_ADM_CMD_DOWNLOAD_FW = 0x11,
+ NVME_ADM_CMD_NS_ATTACHMENT = 0x15,
+ NVME_ADM_CMD_DIRECTIVE_SEND = 0x19,
+ NVME_ADM_CMD_VIRT_MNGMT = 0x1c,
+ NVME_ADM_CMD_DIRECTIVE_RECV = 0x1a,
+ NVME_ADM_CMD_DBBUF_CONFIG = 0x7c,
NVME_ADM_CMD_FORMAT_NVM = 0x80,
NVME_ADM_CMD_SECURITY_SEND = 0x81,
NVME_ADM_CMD_SECURITY_RECV = 0x82,
@@ -246,11 +630,18 @@ enum NvmeIoCommands {
NVME_CMD_READ = 0x02,
NVME_CMD_WRITE_UNCOR = 0x04,
NVME_CMD_COMPARE = 0x05,
- NVME_CMD_WRITE_ZEROS = 0x08,
+ NVME_CMD_WRITE_ZEROES = 0x08,
NVME_CMD_DSM = 0x09,
+ NVME_CMD_VERIFY = 0x0c,
+ NVME_CMD_IO_MGMT_RECV = 0x12,
+ NVME_CMD_COPY = 0x19,
+ NVME_CMD_IO_MGMT_SEND = 0x1d,
+ NVME_CMD_ZONE_MGMT_SEND = 0x79,
+ NVME_CMD_ZONE_MGMT_RECV = 0x7a,
+ NVME_CMD_ZONE_APPEND = 0x7d,
};
-typedef struct NvmeDeleteQ {
+typedef struct QEMU_PACKED NvmeDeleteQ {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
@@ -260,7 +651,7 @@ typedef struct NvmeDeleteQ {
uint32_t rsvd11[5];
} NvmeDeleteQ;
-typedef struct NvmeCreateCq {
+typedef struct QEMU_PACKED NvmeCreateCq {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
@@ -277,7 +668,12 @@ typedef struct NvmeCreateCq {
#define NVME_CQ_FLAGS_PC(cq_flags) (cq_flags & 0x1)
#define NVME_CQ_FLAGS_IEN(cq_flags) ((cq_flags >> 1) & 0x1)
-typedef struct NvmeCreateSq {
+enum NvmeFlagsCq {
+ NVME_CQ_PC = 1,
+ NVME_CQ_IEN = 2,
+};
+
+typedef struct QEMU_PACKED NvmeCreateSq {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
@@ -294,15 +690,16 @@ typedef struct NvmeCreateSq {
#define NVME_SQ_FLAGS_PC(sq_flags) (sq_flags & 0x1)
#define NVME_SQ_FLAGS_QPRIO(sq_flags) ((sq_flags >> 1) & 0x3)
-enum NvmeQueueFlags {
- NVME_Q_PC = 1,
- NVME_Q_PRIO_URGENT = 0,
- NVME_Q_PRIO_HIGH = 1,
- NVME_Q_PRIO_NORMAL = 2,
- NVME_Q_PRIO_LOW = 3,
+enum NvmeFlagsSq {
+ NVME_SQ_PC = 1,
+
+ NVME_SQ_PRIO_URGENT = 0,
+ NVME_SQ_PRIO_HIGH = 1,
+ NVME_SQ_PRIO_NORMAL = 2,
+ NVME_SQ_PRIO_LOW = 3,
};
-typedef struct NvmeIdentify {
+typedef struct QEMU_PACKED NvmeIdentify {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
@@ -310,23 +707,30 @@ typedef struct NvmeIdentify {
uint64_t rsvd2[2];
uint64_t prp1;
uint64_t prp2;
- uint32_t cns;
- uint32_t rsvd11[5];
+ uint8_t cns;
+ uint8_t rsvd10;
+ uint16_t ctrlid;
+ uint16_t nvmsetid;
+ uint8_t rsvd11;
+ uint8_t csi;
+ uint32_t rsvd12[4];
} NvmeIdentify;
-typedef struct NvmeRwCmd {
+typedef struct QEMU_PACKED NvmeRwCmd {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
uint32_t nsid;
- uint64_t rsvd2;
+ uint32_t cdw2;
+ uint32_t cdw3;
uint64_t mptr;
- uint64_t prp1;
- uint64_t prp2;
+ NvmeCmdDptr dptr;
uint64_t slba;
uint16_t nlb;
uint16_t control;
- uint32_t dsmgmt;
+ uint8_t dsmgmt;
+ uint8_t rsvd;
+ uint16_t dspec;
uint32_t reftag;
uint16_t apptag;
uint16_t appmask;
@@ -350,20 +754,31 @@ enum {
NVME_RW_DSM_LATENCY_LOW = 3 << 4,
NVME_RW_DSM_SEQ_REQ = 1 << 6,
NVME_RW_DSM_COMPRESSED = 1 << 7,
+ NVME_RW_PIREMAP = 1 << 9,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
+ NVME_RW_PRINFO_PRCHK_MASK = 7 << 10,
};
-typedef struct NvmeDsmCmd {
+#define NVME_RW_PRINFO(control) ((control >> 10) & 0xf)
+
+enum {
+ NVME_PRINFO_PRACT = 1 << 3,
+ NVME_PRINFO_PRCHK_GUARD = 1 << 2,
+ NVME_PRINFO_PRCHK_APP = 1 << 1,
+ NVME_PRINFO_PRCHK_REF = 1 << 0,
+ NVME_PRINFO_PRCHK_MASK = 7 << 0,
+};
+
+typedef struct QEMU_PACKED NvmeDsmCmd {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
uint32_t nsid;
uint64_t rsvd2[2];
- uint64_t prp1;
- uint64_t prp2;
+ NvmeCmdDptr dptr;
uint32_t nr;
uint32_t attributes;
uint32_t rsvd12[4];
@@ -375,19 +790,64 @@ enum {
NVME_DSMGMT_AD = 1 << 2,
};
-typedef struct NvmeDsmRange {
+typedef struct QEMU_PACKED NvmeDsmRange {
uint32_t cattr;
uint32_t nlb;
uint64_t slba;
} NvmeDsmRange;
+enum {
+ NVME_COPY_FORMAT_0 = 0x0,
+ NVME_COPY_FORMAT_1 = 0x1,
+};
+
+typedef struct QEMU_PACKED NvmeCopyCmd {
+ uint8_t opcode;
+ uint8_t flags;
+ uint16_t cid;
+ uint32_t nsid;
+ uint32_t cdw2;
+ uint32_t cdw3;
+ uint32_t rsvd2[2];
+ NvmeCmdDptr dptr;
+ uint64_t sdlba;
+ uint8_t nr;
+ uint8_t control[3];
+ uint16_t rsvd13;
+ uint16_t dspec;
+ uint32_t reftag;
+ uint16_t apptag;
+ uint16_t appmask;
+} NvmeCopyCmd;
+
+typedef struct QEMU_PACKED NvmeCopySourceRangeFormat0 {
+ uint8_t rsvd0[8];
+ uint64_t slba;
+ uint16_t nlb;
+ uint8_t rsvd18[6];
+ uint32_t reftag;
+ uint16_t apptag;
+ uint16_t appmask;
+} NvmeCopySourceRangeFormat0;
+
+typedef struct QEMU_PACKED NvmeCopySourceRangeFormat1 {
+ uint8_t rsvd0[8];
+ uint64_t slba;
+ uint16_t nlb;
+ uint8_t rsvd18[8];
+ uint8_t sr[10];
+ uint16_t apptag;
+ uint16_t appmask;
+} NvmeCopySourceRangeFormat1;
+
enum NvmeAsyncEventRequest {
NVME_AER_TYPE_ERROR = 0,
NVME_AER_TYPE_SMART = 1,
+ NVME_AER_TYPE_NOTICE = 2,
NVME_AER_TYPE_IO_SPECIFIC = 6,
NVME_AER_TYPE_VENDOR_SPECIFIC = 7,
- NVME_AER_INFO_ERR_INVALID_SQ = 0,
- NVME_AER_INFO_ERR_INVALID_DB = 1,
+ NVME_AER_INFO_ERR_INVALID_DB_REGISTER = 0,
+ NVME_AER_INFO_ERR_INVALID_DB_VALUE = 1,
NVME_AER_INFO_ERR_DIAG_FAIL = 2,
NVME_AER_INFO_ERR_PERS_INTERNAL_ERR = 3,
NVME_AER_INFO_ERR_TRANS_INTERNAL_ERR = 4,
@@ -395,18 +855,23 @@ enum NvmeAsyncEventRequest {
NVME_AER_INFO_SMART_RELIABILITY = 0,
NVME_AER_INFO_SMART_TEMP_THRESH = 1,
NVME_AER_INFO_SMART_SPARE_THRESH = 2,
+ NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED = 0,
};
-typedef struct NvmeAerResult {
+typedef struct QEMU_PACKED NvmeAerResult {
uint8_t event_type;
uint8_t event_info;
uint8_t log_page;
uint8_t resv;
} NvmeAerResult;
-typedef struct NvmeCqe {
+typedef struct QEMU_PACKED NvmeZonedResult {
+ uint64_t slba;
+} NvmeZonedResult;
+
+typedef struct QEMU_PACKED NvmeCqe {
uint32_t result;
- uint32_t rsvd;
+ uint32_t dw1;
uint16_t sq_head;
uint16_t sq_id;
uint16_t cid;
@@ -427,10 +892,22 @@ enum NvmeStatusCodes {
NVME_CMD_ABORT_MISSING_FUSE = 0x000a,
NVME_INVALID_NSID = 0x000b,
NVME_CMD_SEQ_ERROR = 0x000c,
+ NVME_INVALID_SGL_SEG_DESCR = 0x000d,
+ NVME_INVALID_NUM_SGL_DESCRS = 0x000e,
+ NVME_DATA_SGL_LEN_INVALID = 0x000f,
+ NVME_MD_SGL_LEN_INVALID = 0x0010,
+ NVME_SGL_DESCR_TYPE_INVALID = 0x0011,
+ NVME_INVALID_USE_OF_CMB = 0x0012,
+ NVME_INVALID_PRP_OFFSET = 0x0013,
+ NVME_CMD_SET_CMB_REJECTED = 0x002b,
+ NVME_INVALID_CMD_SET = 0x002c,
+ NVME_FDP_DISABLED = 0x0029,
+ NVME_INVALID_PHID_LIST = 0x002a,
NVME_LBA_RANGE = 0x0080,
NVME_CAP_EXCEEDED = 0x0081,
NVME_NS_NOT_READY = 0x0082,
NVME_NS_RESV_CONFLICT = 0x0083,
+ NVME_FORMAT_IN_PROGRESS = 0x0084,
NVME_INVALID_CQID = 0x0100,
NVME_INVALID_QID = 0x0101,
NVME_MAX_QSIZE_EXCEEDED = 0x0102,
@@ -445,11 +922,31 @@ enum NvmeStatusCodes {
NVME_FW_REQ_RESET = 0x010b,
NVME_INVALID_QUEUE_DEL = 0x010c,
NVME_FID_NOT_SAVEABLE = 0x010d,
- NVME_FID_NOT_NSID_SPEC = 0x010f,
+ NVME_FEAT_NOT_CHANGEABLE = 0x010e,
+ NVME_FEAT_NOT_NS_SPEC = 0x010f,
NVME_FW_REQ_SUSYSTEM_RESET = 0x0110,
+ NVME_NS_ALREADY_ATTACHED = 0x0118,
+ NVME_NS_PRIVATE = 0x0119,
+ NVME_NS_NOT_ATTACHED = 0x011a,
+ NVME_NS_CTRL_LIST_INVALID = 0x011c,
+ NVME_INVALID_CTRL_ID = 0x011f,
+ NVME_INVALID_SEC_CTRL_STATE = 0x0120,
+ NVME_INVALID_NUM_RESOURCES = 0x0121,
+ NVME_INVALID_RESOURCE_ID = 0x0122,
NVME_CONFLICTING_ATTRS = 0x0180,
NVME_INVALID_PROT_INFO = 0x0181,
NVME_WRITE_TO_RO = 0x0182,
+ NVME_CMD_SIZE_LIMIT = 0x0183,
+ NVME_INVALID_ZONE_OP = 0x01b6,
+ NVME_NOZRWA = 0x01b7,
+ NVME_ZONE_BOUNDARY_ERROR = 0x01b8,
+ NVME_ZONE_FULL = 0x01b9,
+ NVME_ZONE_READ_ONLY = 0x01ba,
+ NVME_ZONE_OFFLINE = 0x01bb,
+ NVME_ZONE_INVALID_WRITE = 0x01bc,
+ NVME_ZONE_TOO_MANY_ACTIVE = 0x01bd,
+ NVME_ZONE_TOO_MANY_OPEN = 0x01be,
+ NVME_ZONE_INVAL_TRANSITION = 0x01bf,
NVME_WRITE_FAULT = 0x0280,
NVME_UNRECOVERED_READ = 0x0281,
NVME_E2E_GUARD_ERROR = 0x0282,
@@ -457,12 +954,14 @@ enum NvmeStatusCodes {
NVME_E2E_REF_ERROR = 0x0284,
NVME_CMP_FAILURE = 0x0285,
NVME_ACCESS_DENIED = 0x0286,
+ NVME_DULB = 0x0287,
+ NVME_E2E_STORAGE_TAG_ERROR = 0x0288,
NVME_MORE = 0x2000,
NVME_DNR = 0x4000,
NVME_NO_COMPLETE = 0xffff,
};
-typedef struct NvmeFwSlotInfoLog {
+typedef struct QEMU_PACKED NvmeFwSlotInfoLog {
uint8_t afi;
uint8_t reserved1[7];
uint8_t frs1[8];
@@ -475,7 +974,7 @@ typedef struct NvmeFwSlotInfoLog {
uint8_t reserved2[448];
} NvmeFwSlotInfoLog;
-typedef struct NvmeErrorLog {
+typedef struct QEMU_PACKED NvmeErrorLog {
uint64_t error_count;
uint16_t sqid;
uint16_t cid;
@@ -487,9 +986,9 @@ typedef struct NvmeErrorLog {
uint8_t resv[35];
} NvmeErrorLog;
-typedef struct NvmeSmartLog {
+typedef struct QEMU_PACKED NvmeSmartLog {
uint8_t critical_warning;
- uint8_t temperature[2];
+ uint16_t temperature;
uint8_t available_spare;
uint8_t available_spare_threshold;
uint8_t percentage_used;
@@ -507,21 +1006,46 @@ typedef struct NvmeSmartLog {
uint8_t reserved2[320];
} NvmeSmartLog;
+#define NVME_SMART_WARN_MAX 6
enum NvmeSmartWarn {
NVME_SMART_SPARE = 1 << 0,
NVME_SMART_TEMPERATURE = 1 << 1,
NVME_SMART_RELIABILITY = 1 << 2,
NVME_SMART_MEDIA_READ_ONLY = 1 << 3,
NVME_SMART_FAILED_VOLATILE_MEDIA = 1 << 4,
+ NVME_SMART_PMR_UNRELIABLE = 1 << 5,
};
-enum LogIdentifier {
- NVME_LOG_ERROR_INFO = 0x01,
- NVME_LOG_SMART_INFO = 0x02,
- NVME_LOG_FW_SLOT_INFO = 0x03,
+typedef struct NvmeEffectsLog {
+ uint32_t acs[256];
+ uint32_t iocs[256];
+ uint8_t resv[2048];
+} NvmeEffectsLog;
+
+enum {
+ NVME_CMD_EFF_CSUPP = 1 << 0,
+ NVME_CMD_EFF_LBCC = 1 << 1,
+ NVME_CMD_EFF_NCC = 1 << 2,
+ NVME_CMD_EFF_NIC = 1 << 3,
+ NVME_CMD_EFF_CCC = 1 << 4,
+ NVME_CMD_EFF_CSE_MASK = 3 << 16,
+ NVME_CMD_EFF_UUID_SEL = 1 << 19,
};
-typedef struct NvmePSD {
+enum NvmeLogIdentifier {
+ NVME_LOG_ERROR_INFO = 0x01,
+ NVME_LOG_SMART_INFO = 0x02,
+ NVME_LOG_FW_SLOT_INFO = 0x03,
+ NVME_LOG_CHANGED_NSLIST = 0x04,
+ NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_ENDGRP = 0x09,
+ NVME_LOG_FDP_CONFS = 0x20,
+ NVME_LOG_FDP_RUH_USAGE = 0x21,
+ NVME_LOG_FDP_STATS = 0x22,
+ NVME_LOG_FDP_EVENTS = 0x23,
+};
+
+typedef struct QEMU_PACKED NvmePSD {
uint16_t mp;
uint16_t reserved;
uint32_t enlat;
@@ -533,7 +1057,29 @@ typedef struct NvmePSD {
uint8_t resv[16];
} NvmePSD;
-typedef struct NvmeIdCtrl {
+#define NVME_CONTROLLER_LIST_SIZE 2048
+#define NVME_IDENTIFY_DATA_SIZE 4096
+
+enum NvmeIdCns {
+ NVME_ID_CNS_NS = 0x00,
+ NVME_ID_CNS_CTRL = 0x01,
+ NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
+ NVME_ID_CNS_NS_DESCR_LIST = 0x03,
+ NVME_ID_CNS_CS_NS = 0x05,
+ NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07,
+ NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
+ NVME_ID_CNS_NS_PRESENT = 0x11,
+ NVME_ID_CNS_NS_ATTACHED_CTRL_LIST = 0x12,
+ NVME_ID_CNS_CTRL_LIST = 0x13,
+ NVME_ID_CNS_PRIMARY_CTRL_CAP = 0x14,
+ NVME_ID_CNS_SECONDARY_CTRL_LIST = 0x15,
+ NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a,
+ NVME_ID_CNS_CS_NS_PRESENT = 0x1b,
+ NVME_ID_CNS_IO_COMMAND_SET = 0x1c,
+};
+
+typedef struct QEMU_PACKED NvmeIdCtrl {
uint16_t vid;
uint16_t ssvid;
uint8_t sn[20];
@@ -543,7 +1089,16 @@ typedef struct NvmeIdCtrl {
uint8_t ieee[3];
uint8_t cmic;
uint8_t mdts;
- uint8_t rsvd255[178];
+ uint16_t cntlid;
+ uint32_t ver;
+ uint32_t rtd3r;
+ uint32_t rtd3e;
+ uint32_t oaes;
+ uint32_t ctratt;
+ uint8_t rsvd100[11];
+ uint8_t cntrltype;
+ uint8_t fguid[16];
+ uint8_t rsvd128[128];
uint16_t oacs;
uint8_t acl;
uint8_t aerl;
@@ -551,10 +1106,31 @@ typedef struct NvmeIdCtrl {
uint8_t lpa;
uint8_t elpe;
uint8_t npss;
- uint8_t rsvd511[248];
+ uint8_t avscc;
+ uint8_t apsta;
+ uint16_t wctemp;
+ uint16_t cctemp;
+ uint16_t mtfa;
+ uint32_t hmpre;
+ uint32_t hmmin;
+ uint8_t tnvmcap[16];
+ uint8_t unvmcap[16];
+ uint32_t rpmbs;
+ uint16_t edstt;
+ uint8_t dsto;
+ uint8_t fwug;
+ uint16_t kas;
+ uint16_t hctma;
+ uint16_t mntmt;
+ uint16_t mxtmt;
+ uint32_t sanicap;
+ uint8_t rsvd332[6];
+ uint16_t nsetidmax;
+ uint16_t endgidmax;
+ uint8_t rsvd342[170];
uint8_t sqes;
uint8_t cqes;
- uint16_t rsvd515;
+ uint16_t maxcmd;
uint32_t nn;
uint16_t oncs;
uint16_t fuses;
@@ -562,25 +1138,94 @@ typedef struct NvmeIdCtrl {
uint8_t vwc;
uint16_t awun;
uint16_t awupf;
- uint8_t rsvd703[174];
- uint8_t rsvd2047[1344];
+ uint8_t nvscc;
+ uint8_t rsvd531;
+ uint16_t acwu;
+ uint16_t ocfs;
+ uint32_t sgls;
+ uint8_t rsvd540[228];
+ uint8_t subnqn[256];
+ uint8_t rsvd1024[1024];
NvmePSD psd[32];
uint8_t vs[1024];
} NvmeIdCtrl;
+typedef struct NvmeIdCtrlZoned {
+ uint8_t zasl;
+ uint8_t rsvd1[4095];
+} NvmeIdCtrlZoned;
+
+typedef struct NvmeIdCtrlNvm {
+ uint8_t vsl;
+ uint8_t wzsl;
+ uint8_t wusl;
+ uint8_t dmrl;
+ uint32_t dmrsl;
+ uint64_t dmsl;
+ uint8_t rsvd16[4080];
+} NvmeIdCtrlNvm;
+
+enum NvmeIdCtrlOaes {
+ NVME_OAES_NS_ATTR = 1 << 8,
+};
+
+enum NvmeIdCtrlCtratt {
+ NVME_CTRATT_ENDGRPS = 1 << 4,
+ NVME_CTRATT_ELBAS = 1 << 15,
+ NVME_CTRATT_FDPS = 1 << 19,
+};
+
enum NvmeIdCtrlOacs {
- NVME_OACS_SECURITY = 1 << 0,
- NVME_OACS_FORMAT = 1 << 1,
- NVME_OACS_FW = 1 << 2,
+ NVME_OACS_SECURITY = 1 << 0,
+ NVME_OACS_FORMAT = 1 << 1,
+ NVME_OACS_FW = 1 << 2,
+ NVME_OACS_NS_MGMT = 1 << 3,
+ NVME_OACS_DIRECTIVES = 1 << 5,
+ NVME_OACS_DBBUF = 1 << 8,
};
enum NvmeIdCtrlOncs {
NVME_ONCS_COMPARE = 1 << 0,
NVME_ONCS_WRITE_UNCORR = 1 << 1,
NVME_ONCS_DSM = 1 << 2,
- NVME_ONCS_WRITE_ZEROS = 1 << 3,
+ NVME_ONCS_WRITE_ZEROES = 1 << 3,
NVME_ONCS_FEATURES = 1 << 4,
NVME_ONCS_RESRVATIONS = 1 << 5,
+ NVME_ONCS_TIMESTAMP = 1 << 6,
+ NVME_ONCS_VERIFY = 1 << 7,
+ NVME_ONCS_COPY = 1 << 8,
+};
+
+enum NvmeIdCtrlOcfs {
+ NVME_OCFS_COPY_FORMAT_0 = 1 << NVME_COPY_FORMAT_0,
+ NVME_OCFS_COPY_FORMAT_1 = 1 << NVME_COPY_FORMAT_1,
+};
+
+enum NvmeIdctrlVwc {
+ NVME_VWC_PRESENT = 1 << 0,
+ NVME_VWC_NSID_BROADCAST_NO_SUPPORT = 0 << 1,
+ NVME_VWC_NSID_BROADCAST_RESERVED = 1 << 1,
+ NVME_VWC_NSID_BROADCAST_CTRL_SPEC = 2 << 1,
+ NVME_VWC_NSID_BROADCAST_SUPPORT = 3 << 1,
+};
+
+enum NvmeIdCtrlFrmw {
+ NVME_FRMW_SLOT1_RO = 1 << 0,
+};
+
+enum NvmeIdCtrlLpa {
+ NVME_LPA_NS_SMART = 1 << 0,
+ NVME_LPA_CSE = 1 << 1,
+ NVME_LPA_EXTENDED = 1 << 2,
+};
+
+enum NvmeIdCtrlCmic {
+ NVME_CMIC_MULTI_CTRL = 1 << 1,
+};
+
+enum NvmeNsAttachmentOperation {
+ NVME_NS_ATTACHMENT_ATTACH = 0x0,
+ NVME_NS_ATTACHMENT_DETACH = 0x1,
};
#define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf)
@@ -588,21 +1233,18 @@ enum NvmeIdCtrlOncs {
#define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf)
#define NVME_CTRL_CQES_MAX(cqes) (((cqes) >> 4) & 0xf)
-typedef struct NvmeFeatureVal {
- uint32_t arbitration;
- uint32_t power_mgmt;
- uint32_t temp_thresh;
- uint32_t err_rec;
- uint32_t volatile_wc;
- uint32_t num_queues;
- uint32_t int_coalescing;
- uint32_t *int_vector_config;
- uint32_t write_atomicity;
- uint32_t async_config;
- uint32_t sw_prog_marker;
-} NvmeFeatureVal;
+#define NVME_CTRL_SGLS_SUPPORT_MASK (0x3 << 0)
+#define NVME_CTRL_SGLS_SUPPORT_NO_ALIGN (0x1 << 0)
+#define NVME_CTRL_SGLS_SUPPORT_DWORD_ALIGN (0x1 << 1)
+#define NVME_CTRL_SGLS_KEYED (0x1 << 2)
+#define NVME_CTRL_SGLS_BITBUCKET (0x1 << 16)
+#define NVME_CTRL_SGLS_MPTR_CONTIGUOUS (0x1 << 17)
+#define NVME_CTRL_SGLS_EXCESS_LENGTH (0x1 << 18)
+#define NVME_CTRL_SGLS_MPTR_SGL (0x1 << 19)
+#define NVME_CTRL_SGLS_ADDR_OFFSET (0x1 << 20)
#define NVME_ARB_AB(arb) (arb & 0x7)
+#define NVME_ARB_AB_NOLIMIT 0x7
#define NVME_ARB_LPW(arb) ((arb >> 8) & 0xff)
#define NVME_ARB_MPW(arb) ((arb >> 16) & 0xff)
#define NVME_ARB_HPW(arb) ((arb >> 24) & 0xff)
@@ -610,6 +1252,25 @@ typedef struct NvmeFeatureVal {
#define NVME_INTC_THR(intc) (intc & 0xff)
#define NVME_INTC_TIME(intc) ((intc >> 8) & 0xff)
+#define NVME_INTVC_NOCOALESCING (0x1 << 16)
+
+#define NVME_TEMP_THSEL(temp) ((temp >> 20) & 0x3)
+#define NVME_TEMP_THSEL_OVER 0x0
+#define NVME_TEMP_THSEL_UNDER 0x1
+
+#define NVME_TEMP_TMPSEL(temp) ((temp >> 16) & 0xf)
+#define NVME_TEMP_TMPSEL_COMPOSITE 0x0
+
+#define NVME_TEMP_TMPTH(temp) (temp & 0xffff)
+
+#define NVME_AEC_SMART(aec) (aec & 0xff)
+#define NVME_AEC_NS_ATTR(aec) ((aec >> 8) & 0x1)
+#define NVME_AEC_FW_ACTIVATION(aec) ((aec >> 9) & 0x1)
+#define NVME_AEC_ENDGRP_NOTICE(aec) ((aec >> 14) & 0x1)
+
+#define NVME_ERR_REC_TLER(err_rec) (err_rec & 0xffff)
+#define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000)
+
enum NvmeFeatureIds {
NVME_ARBITRATION = 0x1,
NVME_POWER_MANAGEMENT = 0x2,
@@ -622,10 +1283,42 @@ enum NvmeFeatureIds {
NVME_INTERRUPT_VECTOR_CONF = 0x9,
NVME_WRITE_ATOMICITY = 0xa,
NVME_ASYNCHRONOUS_EVENT_CONF = 0xb,
- NVME_SOFTWARE_PROGRESS_MARKER = 0x80
+ NVME_TIMESTAMP = 0xe,
+ NVME_HOST_BEHAVIOR_SUPPORT = 0x16,
+ NVME_COMMAND_SET_PROFILE = 0x19,
+ NVME_FDP_MODE = 0x1d,
+ NVME_FDP_EVENTS = 0x1e,
+ NVME_SOFTWARE_PROGRESS_MARKER = 0x80,
+ NVME_FID_MAX = 0x100,
};
-typedef struct NvmeRangeType {
+typedef enum NvmeFeatureCap {
+ NVME_FEAT_CAP_SAVE = 1 << 0,
+ NVME_FEAT_CAP_NS = 1 << 1,
+ NVME_FEAT_CAP_CHANGE = 1 << 2,
+} NvmeFeatureCap;
+
+typedef enum NvmeGetFeatureSelect {
+ NVME_GETFEAT_SELECT_CURRENT = 0x0,
+ NVME_GETFEAT_SELECT_DEFAULT = 0x1,
+ NVME_GETFEAT_SELECT_SAVED = 0x2,
+ NVME_GETFEAT_SELECT_CAP = 0x3,
+} NvmeGetFeatureSelect;
+
+#define NVME_GETSETFEAT_FID_MASK 0xff
+#define NVME_GETSETFEAT_FID(dw10) (dw10 & NVME_GETSETFEAT_FID_MASK)
+
+#define NVME_GETFEAT_SELECT_SHIFT 8
+#define NVME_GETFEAT_SELECT_MASK 0x7
+#define NVME_GETFEAT_SELECT(dw10) \
+ ((dw10 >> NVME_GETFEAT_SELECT_SHIFT) & NVME_GETFEAT_SELECT_MASK)
+
+#define NVME_SETFEAT_SAVE_SHIFT 31
+#define NVME_SETFEAT_SAVE_MASK 0x1
+#define NVME_SETFEAT_SAVE(dw10) \
+ ((dw10 >> NVME_SETFEAT_SAVE_SHIFT) & NVME_SETFEAT_SAVE_MASK)
+
+typedef struct QEMU_PACKED NvmeRangeType {
uint8_t type;
uint8_t attributes;
uint8_t rsvd2[14];
@@ -635,13 +1328,29 @@ typedef struct NvmeRangeType {
uint8_t rsvd48[16];
} NvmeRangeType;
-typedef struct NvmeLBAF {
+typedef struct NvmeHostBehaviorSupport {
+ uint8_t acre;
+ uint8_t etdas;
+ uint8_t lbafee;
+ uint8_t rsvd3[509];
+} NvmeHostBehaviorSupport;
+
+typedef struct QEMU_PACKED NvmeLBAF {
uint16_t ms;
uint8_t ds;
uint8_t rp;
} NvmeLBAF;
-typedef struct NvmeIdNs {
+typedef struct QEMU_PACKED NvmeLBAFE {
+ uint64_t zsze;
+ uint8_t zdes;
+ uint8_t rsvd9[7];
+} NvmeLBAFE;
+
+#define NVME_NSID_BROADCAST 0xffffffff
+#define NVME_MAX_NLBAF 64
+
+typedef struct QEMU_PACKED NvmeIdNs {
uint64_t nsze;
uint64_t ncap;
uint64_t nuse;
@@ -651,13 +1360,116 @@ typedef struct NvmeIdNs {
uint8_t mc;
uint8_t dpc;
uint8_t dps;
- uint8_t res30[98];
- NvmeLBAF lbaf[16];
- uint8_t res192[192];
+ uint8_t nmic;
+ uint8_t rescap;
+ uint8_t fpi;
+ uint8_t dlfeat;
+ uint16_t nawun;
+ uint16_t nawupf;
+ uint16_t nacwu;
+ uint16_t nabsn;
+ uint16_t nabo;
+ uint16_t nabspf;
+ uint16_t noiob;
+ uint8_t nvmcap[16];
+ uint16_t npwg;
+ uint16_t npwa;
+ uint16_t npdg;
+ uint16_t npda;
+ uint16_t nows;
+ uint16_t mssrl;
+ uint32_t mcl;
+ uint8_t msrc;
+ uint8_t rsvd81[18];
+ uint8_t nsattr;
+ uint16_t nvmsetid;
+ uint16_t endgid;
+ uint8_t nguid[16];
+ uint64_t eui64;
+ NvmeLBAF lbaf[NVME_MAX_NLBAF];
uint8_t vs[3712];
} NvmeIdNs;
+#define NVME_ID_NS_NVM_ELBAF_PIF(elbaf) (((elbaf) >> 7) & 0x3)
+
+typedef struct QEMU_PACKED NvmeIdNsNvm {
+ uint64_t lbstm;
+ uint8_t pic;
+ uint8_t rsvd9[3];
+ uint32_t elbaf[NVME_MAX_NLBAF];
+ uint8_t rsvd268[3828];
+} NvmeIdNsNvm;
+
+typedef struct QEMU_PACKED NvmeIdNsDescr {
+ uint8_t nidt;
+ uint8_t nidl;
+ uint8_t rsvd2[2];
+} NvmeIdNsDescr;
+
+enum NvmeNsIdentifierLength {
+ NVME_NIDL_EUI64 = 8,
+ NVME_NIDL_NGUID = 16,
+ NVME_NIDL_UUID = 16,
+ NVME_NIDL_CSI = 1,
+};
+
+enum NvmeNsIdentifierType {
+ NVME_NIDT_EUI64 = 0x01,
+ NVME_NIDT_NGUID = 0x02,
+ NVME_NIDT_UUID = 0x03,
+ NVME_NIDT_CSI = 0x04,
+};
+
+enum NvmeIdNsNmic {
+ NVME_NMIC_NS_SHARED = 1 << 0,
+};
+
+enum NvmeCsi {
+ NVME_CSI_NVM = 0x00,
+ NVME_CSI_ZONED = 0x02,
+};
+
+#define NVME_SET_CSI(vec, csi) (vec |= (uint8_t)(1 << (csi)))
+
+typedef struct QEMU_PACKED NvmeIdNsZoned {
+ uint16_t zoc;
+ uint16_t ozcs;
+ uint32_t mar;
+ uint32_t mor;
+ uint32_t rrl;
+ uint32_t frl;
+ uint8_t rsvd12[24];
+ uint32_t numzrwa;
+ uint16_t zrwafg;
+ uint16_t zrwas;
+ uint8_t zrwacap;
+ uint8_t rsvd53[2763];
+ NvmeLBAFE lbafe[16];
+ uint8_t rsvd3072[768];
+ uint8_t vs[256];
+} NvmeIdNsZoned;
+
+enum NvmeIdNsZonedOzcs {
+ NVME_ID_NS_ZONED_OZCS_RAZB = 1 << 0,
+ NVME_ID_NS_ZONED_OZCS_ZRWASUP = 1 << 1,
+};
+
+enum NvmeIdNsZonedZrwacap {
+ NVME_ID_NS_ZONED_ZRWACAP_EXPFLUSHSUP = 1 << 0,
+};
+
+/*Deallocate Logical Block Features*/
+#define NVME_ID_NS_DLFEAT_GUARD_CRC(dlfeat) ((dlfeat) & 0x10)
+#define NVME_ID_NS_DLFEAT_WRITE_ZEROES(dlfeat) ((dlfeat) & 0x08)
+
+#define NVME_ID_NS_DLFEAT_READ_BEHAVIOR(dlfeat) ((dlfeat) & 0x7)
+#define NVME_ID_NS_DLFEAT_READ_BEHAVIOR_UNDEFINED 0
+#define NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES 1
+#define NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ONES 2
+
+
#define NVME_ID_NS_NSFEAT_THIN(nsfeat) ((nsfeat & 0x1))
+#define NVME_ID_NS_NSFEAT_DULBE(nsfeat) ((nsfeat >> 2) & 0x1)
#define NVME_ID_NS_FLBAS_EXTENDED(flbas) ((flbas >> 4) & 0x1)
#define NVME_ID_NS_FLBAS_INDEX(flbas) ((flbas & 0xf))
#define NVME_ID_NS_MC_SEPARATE(mc) ((mc >> 1) & 0x1)
@@ -670,19 +1482,358 @@ typedef struct NvmeIdNs {
#define NVME_ID_NS_DPC_TYPE_MASK 0x7
enum NvmeIdNsDps {
- DPS_TYPE_NONE = 0,
- DPS_TYPE_1 = 1,
- DPS_TYPE_2 = 2,
- DPS_TYPE_3 = 3,
- DPS_TYPE_MASK = 0x7,
- DPS_FIRST_EIGHT = 8,
+ NVME_ID_NS_DPS_TYPE_NONE = 0,
+ NVME_ID_NS_DPS_TYPE_1 = 1,
+ NVME_ID_NS_DPS_TYPE_2 = 2,
+ NVME_ID_NS_DPS_TYPE_3 = 3,
+ NVME_ID_NS_DPS_TYPE_MASK = 0x7,
+ NVME_ID_NS_DPS_FIRST_EIGHT = 8,
+};
+
+enum NvmeIdNsFlbas {
+ NVME_ID_NS_FLBAS_EXTENDED = 1 << 4,
+};
+
+enum NvmeIdNsMc {
+ NVME_ID_NS_MC_EXTENDED = 1 << 0,
+ NVME_ID_NS_MC_SEPARATE = 1 << 1,
+};
+
+#define NVME_ID_NS_DPS_TYPE(dps) (dps & NVME_ID_NS_DPS_TYPE_MASK)
+
+enum NvmePIFormat {
+ NVME_PI_GUARD_16 = 0,
+ NVME_PI_GUARD_64 = 2,
+};
+
+typedef union NvmeDifTuple {
+ struct {
+ uint16_t guard;
+ uint16_t apptag;
+ uint32_t reftag;
+ } g16;
+
+ struct {
+ uint64_t guard;
+ uint16_t apptag;
+ uint8_t sr[6];
+ } g64;
+} NvmeDifTuple;
+
+enum NvmeZoneAttr {
+ NVME_ZA_FINISHED_BY_CTLR = 1 << 0,
+ NVME_ZA_FINISH_RECOMMENDED = 1 << 1,
+ NVME_ZA_RESET_RECOMMENDED = 1 << 2,
+ NVME_ZA_ZRWA_VALID = 1 << 3,
+ NVME_ZA_ZD_EXT_VALID = 1 << 7,
+};
+
+typedef struct QEMU_PACKED NvmeZoneReportHeader {
+ uint64_t nr_zones;
+ uint8_t rsvd[56];
+} NvmeZoneReportHeader;
+
+enum NvmeZoneReceiveAction {
+ NVME_ZONE_REPORT = 0,
+ NVME_ZONE_REPORT_EXTENDED = 1,
+};
+
+enum NvmeZoneReportType {
+ NVME_ZONE_REPORT_ALL = 0,
+ NVME_ZONE_REPORT_EMPTY = 1,
+ NVME_ZONE_REPORT_IMPLICITLY_OPEN = 2,
+ NVME_ZONE_REPORT_EXPLICITLY_OPEN = 3,
+ NVME_ZONE_REPORT_CLOSED = 4,
+ NVME_ZONE_REPORT_FULL = 5,
+ NVME_ZONE_REPORT_READ_ONLY = 6,
+ NVME_ZONE_REPORT_OFFLINE = 7,
+};
+
+enum NvmeZoneType {
+ NVME_ZONE_TYPE_RESERVED = 0x00,
+ NVME_ZONE_TYPE_SEQ_WRITE = 0x02,
+};
+
+typedef struct QEMU_PACKED NvmeZoneSendCmd {
+ uint8_t opcode;
+ uint8_t flags;
+ uint16_t cid;
+ uint32_t nsid;
+ uint32_t rsvd8[4];
+ NvmeCmdDptr dptr;
+ uint64_t slba;
+ uint32_t rsvd48;
+ uint8_t zsa;
+ uint8_t zsflags;
+ uint8_t rsvd54[2];
+ uint32_t rsvd56[2];
+} NvmeZoneSendCmd;
+
+enum NvmeZoneSendAction {
+ NVME_ZONE_ACTION_RSD = 0x00,
+ NVME_ZONE_ACTION_CLOSE = 0x01,
+ NVME_ZONE_ACTION_FINISH = 0x02,
+ NVME_ZONE_ACTION_OPEN = 0x03,
+ NVME_ZONE_ACTION_RESET = 0x04,
+ NVME_ZONE_ACTION_OFFLINE = 0x05,
+ NVME_ZONE_ACTION_SET_ZD_EXT = 0x10,
+ NVME_ZONE_ACTION_ZRWA_FLUSH = 0x11,
+};
+
+enum {
+ NVME_ZSFLAG_SELECT_ALL = 1 << 0,
+ NVME_ZSFLAG_ZRWA_ALLOC = 1 << 1,
+};
+
+typedef struct QEMU_PACKED NvmeZoneDescr {
+ uint8_t zt;
+ uint8_t zs;
+ uint8_t za;
+ uint8_t rsvd3[5];
+ uint64_t zcap;
+ uint64_t zslba;
+ uint64_t wp;
+ uint8_t rsvd32[32];
+} NvmeZoneDescr;
+
+typedef enum NvmeZoneState {
+ NVME_ZONE_STATE_RESERVED = 0x00,
+ NVME_ZONE_STATE_EMPTY = 0x01,
+ NVME_ZONE_STATE_IMPLICITLY_OPEN = 0x02,
+ NVME_ZONE_STATE_EXPLICITLY_OPEN = 0x03,
+ NVME_ZONE_STATE_CLOSED = 0x04,
+ NVME_ZONE_STATE_READ_ONLY = 0x0d,
+ NVME_ZONE_STATE_FULL = 0x0e,
+ NVME_ZONE_STATE_OFFLINE = 0x0f,
+} NvmeZoneState;
+
+typedef struct QEMU_PACKED NvmePriCtrlCap {
+ uint16_t cntlid;
+ uint16_t portid;
+ uint8_t crt;
+ uint8_t rsvd5[27];
+ uint32_t vqfrt;
+ uint32_t vqrfa;
+ uint16_t vqrfap;
+ uint16_t vqprt;
+ uint16_t vqfrsm;
+ uint16_t vqgran;
+ uint8_t rsvd48[16];
+ uint32_t vifrt;
+ uint32_t virfa;
+ uint16_t virfap;
+ uint16_t viprt;
+ uint16_t vifrsm;
+ uint16_t vigran;
+ uint8_t rsvd80[4016];
+} NvmePriCtrlCap;
+
+typedef enum NvmePriCtrlCapCrt {
+ NVME_CRT_VQ = 1 << 0,
+ NVME_CRT_VI = 1 << 1,
+} NvmePriCtrlCapCrt;
+
+typedef struct QEMU_PACKED NvmeSecCtrlEntry {
+ uint16_t scid;
+ uint16_t pcid;
+ uint8_t scs;
+ uint8_t rsvd5[3];
+ uint16_t vfn;
+ uint16_t nvq;
+ uint16_t nvi;
+ uint8_t rsvd14[18];
+} NvmeSecCtrlEntry;
+
+typedef struct QEMU_PACKED NvmeSecCtrlList {
+ uint8_t numcntl;
+ uint8_t rsvd1[31];
+ NvmeSecCtrlEntry sec[127];
+} NvmeSecCtrlList;
+
+typedef enum NvmeVirtMngmtAction {
+ NVME_VIRT_MNGMT_ACTION_PRM_ALLOC = 0x01,
+ NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE = 0x07,
+ NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN = 0x08,
+ NVME_VIRT_MNGMT_ACTION_SEC_ONLINE = 0x09,
+} NvmeVirtMngmtAction;
+
+typedef enum NvmeVirtualResourceType {
+ NVME_VIRT_RES_QUEUE = 0x00,
+ NVME_VIRT_RES_INTERRUPT = 0x01,
+} NvmeVirtualResourceType;
+
+typedef struct NvmeDirectiveIdentify {
+ uint8_t supported;
+ uint8_t unused1[31];
+ uint8_t enabled;
+ uint8_t unused33[31];
+ uint8_t persistent;
+ uint8_t unused65[31];
+ uint8_t rsvd64[4000];
+} NvmeDirectiveIdentify;
+
+enum NvmeDirectiveTypes {
+ NVME_DIRECTIVE_IDENTIFY = 0x0,
+ NVME_DIRECTIVE_DATA_PLACEMENT = 0x2,
+};
+
+enum NvmeDirectiveOperations {
+ NVME_DIRECTIVE_RETURN_PARAMS = 0x1,
+};
+
+typedef struct QEMU_PACKED NvmeFdpConfsHdr {
+ uint16_t num_confs;
+ uint8_t version;
+ uint8_t rsvd3;
+ uint32_t size;
+ uint8_t rsvd8[8];
+} NvmeFdpConfsHdr;
+
+REG8(FDPA, 0x0)
+ FIELD(FDPA, RGIF, 0, 4)
+ FIELD(FDPA, VWC, 4, 1)
+ FIELD(FDPA, VALID, 7, 1);
+
+typedef struct QEMU_PACKED NvmeFdpDescrHdr {
+ uint16_t descr_size;
+ uint8_t fdpa;
+ uint8_t vss;
+ uint32_t nrg;
+ uint16_t nruh;
+ uint16_t maxpids;
+ uint32_t nnss;
+ uint64_t runs;
+ uint32_t erutl;
+ uint8_t rsvd28[36];
+} NvmeFdpDescrHdr;
+
+enum NvmeRuhType {
+ NVME_RUHT_INITIALLY_ISOLATED = 1,
+ NVME_RUHT_PERSISTENTLY_ISOLATED = 2,
+};
+
+typedef struct QEMU_PACKED NvmeRuhDescr {
+ uint8_t ruht;
+ uint8_t rsvd1[3];
+} NvmeRuhDescr;
+
+typedef struct QEMU_PACKED NvmeRuhuLog {
+ uint16_t nruh;
+ uint8_t rsvd2[6];
+} NvmeRuhuLog;
+
+enum NvmeRuhAttributes {
+ NVME_RUHA_UNUSED = 0,
+ NVME_RUHA_HOST = 1,
+ NVME_RUHA_CTRL = 2,
+};
+
+typedef struct QEMU_PACKED NvmeRuhuDescr {
+ uint8_t ruha;
+ uint8_t rsvd1[7];
+} NvmeRuhuDescr;
+
+typedef struct QEMU_PACKED NvmeFdpStatsLog {
+ uint64_t hbmw[2];
+ uint64_t mbmw[2];
+ uint64_t mbe[2];
+ uint8_t rsvd48[16];
+} NvmeFdpStatsLog;
+
+typedef struct QEMU_PACKED NvmeFdpEventsLog {
+ uint32_t num_events;
+ uint8_t rsvd4[60];
+} NvmeFdpEventsLog;
+
+enum NvmeFdpEventType {
+ FDP_EVT_RU_NOT_FULLY_WRITTEN = 0x0,
+ FDP_EVT_RU_ATL_EXCEEDED = 0x1,
+ FDP_EVT_CTRL_RESET_RUH = 0x2,
+ FDP_EVT_INVALID_PID = 0x3,
+ FDP_EVT_MEDIA_REALLOC = 0x80,
+ FDP_EVT_RUH_IMPLICIT_RU_CHANGE = 0x81,
+};
+
+enum NvmeFdpEventFlags {
+ FDPEF_PIV = 1 << 0,
+ FDPEF_NSIDV = 1 << 1,
+ FDPEF_LV = 1 << 2,
+};
+
+typedef struct QEMU_PACKED NvmeFdpEvent {
+ uint8_t type;
+ uint8_t flags;
+ uint16_t pid;
+ uint64_t timestamp;
+ uint32_t nsid;
+ uint64_t type_specific[2];
+ uint16_t rgid;
+ uint8_t ruhid;
+ uint8_t rsvd35[5];
+ uint64_t vendor[3];
+} NvmeFdpEvent;
+
+typedef struct QEMU_PACKED NvmePhidList {
+ uint16_t nnruhd;
+ uint8_t rsvd2[6];
+} NvmePhidList;
+
+typedef struct QEMU_PACKED NvmePhidDescr {
+ uint8_t ruht;
+ uint8_t rsvd1;
+ uint16_t ruhid;
+} NvmePhidDescr;
+
+REG32(FEAT_FDP, 0x0)
+ FIELD(FEAT_FDP, FDPE, 0, 1)
+ FIELD(FEAT_FDP, CONF_NDX, 8, 8);
+
+typedef struct QEMU_PACKED NvmeFdpEventDescr {
+ uint8_t evt;
+ uint8_t evta;
+} NvmeFdpEventDescr;
+
+REG32(NVME_IOMR, 0x0)
+ FIELD(NVME_IOMR, MO, 0, 8)
+ FIELD(NVME_IOMR, MOS, 16, 16);
+
+enum NvmeIomr2Mo {
+ NVME_IOMR_MO_NOP = 0x0,
+ NVME_IOMR_MO_RUH_STATUS = 0x1,
+ NVME_IOMR_MO_VENDOR_SPECIFIC = 0x255,
+};
+
+typedef struct QEMU_PACKED NvmeRuhStatus {
+ uint8_t rsvd0[14];
+ uint16_t nruhsd;
+} NvmeRuhStatus;
+
+typedef struct QEMU_PACKED NvmeRuhStatusDescr {
+ uint16_t pid;
+ uint16_t ruhid;
+ uint32_t earutr;
+ uint64_t ruamw;
+ uint8_t rsvd16[16];
+} NvmeRuhStatusDescr;
+
+REG32(NVME_IOMS, 0x0)
+ FIELD(NVME_IOMS, MO, 0, 8)
+ FIELD(NVME_IOMS, MOS, 16, 16);
+
+enum NvmeIoms2Mo {
+ NVME_IOMS_MO_NOP = 0x0,
+ NVME_IOMS_MO_RUH_UPDATE = 0x1,
};
static inline void _nvme_check_size(void)
{
+ QEMU_BUILD_BUG_ON(sizeof(NvmeBar) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeAerResult) != 4);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeZonedResult) != 8);
QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat0) != 32);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat1) != 40);
QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64);
@@ -690,11 +1841,29 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd) != 64);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopyCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType) != 64);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeHostBehaviorSupport) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeEffectsLog) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlZoned) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlNvm) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsNvm) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsZoned) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeSglDescriptor) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsDescr) != 4);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeZoneDescr) != 64);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeDifTuple) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeEndGrpLog) != 512);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeDirectiveIdentify) != 4096);
}
#endif
diff --git a/include/block/qapi.h b/include/block/qapi.h
index 83bdb098bd..54c48de26a 100644
--- a/include/block/qapi.h
+++ b/include/block/qapi.h
@@ -25,22 +25,28 @@
#ifndef BLOCK_QAPI_H
#define BLOCK_QAPI_H
-#include "block/block.h"
+#include "block/graph-lock.h"
#include "block/snapshot.h"
+#include "qapi/qapi-types-block-core.h"
-BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
- BlockDriverState *bs, Error **errp);
-int bdrv_query_snapshot_info_list(BlockDriverState *bs,
- SnapshotInfoList **p_list,
- Error **errp);
-void bdrv_query_image_info(BlockDriverState *bs,
- ImageInfo **p_info,
- Error **errp);
+BlockDeviceInfo * GRAPH_RDLOCK
+bdrv_block_device_info(BlockBackend *blk, BlockDriverState *bs,
+ bool flat, Error **errp);
-void bdrv_snapshot_dump(fprintf_function func_fprintf, void *f,
- QEMUSnapshotInfo *sn);
-void bdrv_image_info_specific_dump(fprintf_function func_fprintf, void *f,
- ImageInfoSpecific *info_spec);
-void bdrv_image_info_dump(fprintf_function func_fprintf, void *f,
- ImageInfo *info);
+int GRAPH_RDLOCK
+bdrv_query_snapshot_info_list(BlockDriverState *bs,
+ SnapshotInfoList **p_list,
+ Error **errp);
+void GRAPH_RDLOCK
+bdrv_query_image_info(BlockDriverState *bs, ImageInfo **p_info, bool flat,
+ bool skip_implicit_filters, Error **errp);
+void GRAPH_RDLOCK
+bdrv_query_block_graph_info(BlockDriverState *bs, BlockGraphInfo **p_info,
+ Error **errp);
+
+void bdrv_snapshot_dump(QEMUSnapshotInfo *sn);
+void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec,
+ const char *prefix,
+ int indentation);
+void bdrv_node_info_dump(BlockNodeInfo *info, int indentation, bool protocol);
#endif
diff --git a/include/block/qdict.h b/include/block/qdict.h
index d8cb502d7d..b4c28d96a9 100644
--- a/include/block/qdict.h
+++ b/include/block/qdict.h
@@ -12,6 +12,9 @@
#include "qapi/qmp/qdict.h"
+QObject *qdict_crumple(const QDict *src, Error **errp);
+void qdict_flatten(QDict *qdict);
+
void qdict_copy_default(QDict *dst, QDict *src, const char *key);
void qdict_set_default_str(QDict *dst, const char *key, const char *val);
@@ -20,8 +23,6 @@ void qdict_join(QDict *dest, QDict *src, bool overwrite);
void qdict_extract_subqdict(QDict *src, QDict **dst, const char *start);
void qdict_array_split(QDict *src, QList **dst);
int qdict_array_entries(QDict *src, const char *subqdict);
-QObject *qdict_crumple(const QDict *src, Error **errp);
-void qdict_flatten(QDict *qdict);
typedef struct QDictRenames {
const char *from;
diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h
index 6799614e56..20e000b8ef 100644
--- a/include/block/raw-aio.h
+++ b/include/block/raw-aio.h
@@ -12,10 +12,11 @@
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
*/
+
#ifndef QEMU_RAW_AIO_H
#define QEMU_RAW_AIO_H
-#include "qemu/coroutine.h"
+#include "block/aio.h"
#include "qemu/iov.h"
/* AIO request types */
@@ -27,6 +28,9 @@
#define QEMU_AIO_WRITE_ZEROES 0x0020
#define QEMU_AIO_COPY_RANGE 0x0040
#define QEMU_AIO_TRUNCATE 0x0080
+#define QEMU_AIO_ZONE_REPORT 0x0100
+#define QEMU_AIO_ZONE_MGMT 0x0200
+#define QEMU_AIO_ZONE_APPEND 0x0400
#define QEMU_AIO_TYPE_MASK \
(QEMU_AIO_READ | \
QEMU_AIO_WRITE | \
@@ -35,11 +39,15 @@
QEMU_AIO_DISCARD | \
QEMU_AIO_WRITE_ZEROES | \
QEMU_AIO_COPY_RANGE | \
- QEMU_AIO_TRUNCATE)
+ QEMU_AIO_TRUNCATE | \
+ QEMU_AIO_ZONE_REPORT | \
+ QEMU_AIO_ZONE_MGMT | \
+ QEMU_AIO_ZONE_APPEND)
/* AIO flags */
#define QEMU_AIO_MISALIGNED 0x1000
#define QEMU_AIO_BLKDEV 0x2000
+#define QEMU_AIO_NO_FALLBACK 0x4000
/* linux-aio.c - Linux native implementation */
@@ -47,15 +55,24 @@
typedef struct LinuxAioState LinuxAioState;
LinuxAioState *laio_init(Error **errp);
void laio_cleanup(LinuxAioState *s);
-int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
- uint64_t offset, QEMUIOVector *qiov, int type);
-BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque, int type);
+
+/* laio_co_submit: submit I/O requests in the thread's current AioContext. */
+int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
+ int type, uint64_t dev_max_batch);
+
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context);
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context);
-void laio_io_plug(BlockDriverState *bs, LinuxAioState *s);
-void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s);
+#endif
+/* io_uring.c - Linux io_uring implementation */
+#ifdef CONFIG_LINUX_IO_URING
+LuringState *luring_init(Error **errp);
+void luring_cleanup(LuringState *s);
+
+/* luring_co_submit: submit I/O requests in the thread's current AioContext. */
+int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
+ QEMUIOVector *qiov, int type);
+void luring_detach_aio_context(LuringState *s, AioContext *old_context);
+void luring_attach_aio_context(LuringState *s, AioContext *new_context);
#endif
#ifdef _WIN32
diff --git a/include/block/replication.h b/include/block/replication.h
new file mode 100644
index 0000000000..21931b4f0c
--- /dev/null
+++ b/include/block/replication.h
@@ -0,0 +1,175 @@
+/*
+ * Replication filter
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ * Copyright (c) 2016 Intel Corporation
+ * Copyright (c) 2016 FUJITSU LIMITED
+ *
+ * Author:
+ * Changlong Xie <xiecl.fnst@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef REPLICATION_H
+#define REPLICATION_H
+
+#include "qapi/qapi-types-block-core.h"
+#include "qemu/module.h"
+#include "qemu/queue.h"
+
+typedef struct ReplicationOps ReplicationOps;
+typedef struct ReplicationState ReplicationState;
+
+/**
+ * SECTION:block/replication.h
+ * @title:Base Replication System
+ * @short_description: interfaces for handling replication
+ *
+ * The Replication Model provides a framework for handling Replication
+ *
+ * <example>
+ * <title>How to use replication interfaces</title>
+ * <programlisting>
+ * #include "block/replication.h"
+ *
+ * typedef struct BDRVReplicationState {
+ * ReplicationState *rs;
+ * } BDRVReplicationState;
+ *
+ * static void replication_start(ReplicationState *rs, ReplicationMode mode,
+ * Error **errp);
+ * static void replication_do_checkpoint(ReplicationState *rs, Error **errp);
+ * static void replication_get_error(ReplicationState *rs, Error **errp);
+ * static void replication_stop(ReplicationState *rs, bool failover,
+ * Error **errp);
+ *
+ * static ReplicationOps replication_ops = {
+ * .start = replication_start,
+ * .checkpoint = replication_do_checkpoint,
+ * .get_error = replication_get_error,
+ * .stop = replication_stop,
+ * }
+ *
+ * static int replication_open(BlockDriverState *bs, QDict *options,
+ * int flags, Error **errp)
+ * {
+ * BDRVReplicationState *s = bs->opaque;
+ * s->rs = replication_new(bs, &replication_ops);
+ * return 0;
+ * }
+ *
+ * static void replication_close(BlockDriverState *bs)
+ * {
+ * BDRVReplicationState *s = bs->opaque;
+ * replication_remove(s->rs);
+ * }
+ *
+ * BlockDriver bdrv_replication = {
+ * .format_name = "replication",
+ * .instance_size = sizeof(BDRVReplicationState),
+ *
+ * .bdrv_open = replication_open,
+ * .bdrv_close = replication_close,
+ * };
+ *
+ * static void bdrv_replication_init(void)
+ * {
+ * bdrv_register(&bdrv_replication);
+ * }
+ *
+ * block_init(bdrv_replication_init);
+ * </programlisting>
+ * </example>
+ *
+ * We create an example about how to use replication interfaces in above.
+ * Then in migration, we can use replication_(start/stop/do_checkpoint/
+ * get_error)_all to handle all replication operations.
+ */
+
+/**
+ * ReplicationState:
+ * @opaque: opaque pointer value passed to this ReplicationState
+ * @ops: replication operation of this ReplicationState
+ * @node: node that we will insert into @replication_states QLIST
+ */
+struct ReplicationState {
+ void *opaque;
+ ReplicationOps *ops;
+ QLIST_ENTRY(ReplicationState) node;
+};
+
+/**
+ * ReplicationOps:
+ * @start: callback to start replication
+ * @stop: callback to stop replication
+ * @checkpoint: callback to do checkpoint
+ * @get_error: callback to check if error occurred during replication
+ */
+struct ReplicationOps {
+ void (*start)(ReplicationState *rs, ReplicationMode mode, Error **errp);
+ void (*stop)(ReplicationState *rs, bool failover, Error **errp);
+ void (*checkpoint)(ReplicationState *rs, Error **errp);
+ void (*get_error)(ReplicationState *rs, Error **errp);
+};
+
+/**
+ * replication_new:
+ * @opaque: opaque pointer value passed to ReplicationState
+ * @ops: replication operation of the new relevant ReplicationState
+ *
+ * Called to create a new ReplicationState instance, and then insert it
+ * into @replication_states QLIST
+ *
+ * Returns: the new ReplicationState instance
+ */
+ReplicationState *replication_new(void *opaque, ReplicationOps *ops);
+
+/**
+ * replication_remove:
+ * @rs: the ReplicationState instance to remove
+ *
+ * Called to remove a ReplicationState instance, and then delete it from
+ * @replication_states QLIST
+ */
+void replication_remove(ReplicationState *rs);
+
+/**
+ * replication_start_all:
+ * @mode: replication mode that could be "primary" or "secondary"
+ * @errp: returns an error if this function fails
+ *
+ * Start replication, called in migration/checkpoint thread
+ *
+ * Note: the caller of the function MUST make sure vm stopped
+ */
+void replication_start_all(ReplicationMode mode, Error **errp);
+
+/**
+ * replication_do_checkpoint_all:
+ * @errp: returns an error if this function fails
+ *
+ * This interface is called after all VM state is transferred to Secondary QEMU
+ */
+void replication_do_checkpoint_all(Error **errp);
+
+/**
+ * replication_get_error_all:
+ * @errp: returns an error if this function fails
+ *
+ * This interface is called to check if error occurred during replication
+ */
+void replication_get_error_all(Error **errp);
+
+/**
+ * replication_stop_all:
+ * @failover: boolean value that indicates if we need do failover or not
+ * @errp: returns an error if this function fails
+ *
+ * It is called on failover. The vm should be stopped before calling it, if you
+ * use this API to shutdown the guest, or other things except failover
+ */
+void replication_stop_all(bool failover, Error **errp);
+
+#endif /* REPLICATION_H */
diff --git a/include/block/reqlist.h b/include/block/reqlist.h
new file mode 100644
index 0000000000..5253497bae
--- /dev/null
+++ b/include/block/reqlist.h
@@ -0,0 +1,75 @@
+/*
+ * reqlist API
+ *
+ * Copyright (C) 2013 Proxmox Server Solutions
+ * Copyright (c) 2021 Virtuozzo International GmbH.
+ *
+ * Authors:
+ * Dietmar Maurer (dietmar@proxmox.com)
+ * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef REQLIST_H
+#define REQLIST_H
+
+#include "qemu/coroutine.h"
+
+/*
+ * The API is not thread-safe and shouldn't be. The struct is public to be part
+ * of other structures and protected by third-party locks, see
+ * block/block-copy.c for example.
+ */
+
+typedef struct BlockReq {
+ int64_t offset;
+ int64_t bytes;
+
+ CoQueue wait_queue; /* coroutines blocked on this req */
+ QLIST_ENTRY(BlockReq) list;
+} BlockReq;
+
+typedef QLIST_HEAD(, BlockReq) BlockReqList;
+
+/*
+ * Initialize new request and add it to the list. Caller must be sure that
+ * there are no conflicting requests in the list.
+ */
+void reqlist_init_req(BlockReqList *reqs, BlockReq *req, int64_t offset,
+ int64_t bytes);
+/* Search for request in the list intersecting with @offset/@bytes area. */
+BlockReq *reqlist_find_conflict(BlockReqList *reqs, int64_t offset,
+ int64_t bytes);
+
+/*
+ * If there are no intersecting requests return false. Otherwise, wait for the
+ * first found intersecting request to finish and return true.
+ *
+ * @lock is passed to qemu_co_queue_wait()
+ * False return value proves that lock was released at no point.
+ */
+bool coroutine_fn reqlist_wait_one(BlockReqList *reqs, int64_t offset,
+ int64_t bytes, CoMutex *lock);
+
+/*
+ * Wait for all intersecting requests. It just calls reqlist_wait_one() in a
+ * loop, caller is responsible to stop producing new requests in this region
+ * in parallel, otherwise reqlist_wait_all() may never return.
+ */
+void coroutine_fn reqlist_wait_all(BlockReqList *reqs, int64_t offset,
+ int64_t bytes, CoMutex *lock);
+
+/*
+ * Shrink request and wake all waiting coroutines (maybe some of them are not
+ * intersecting with shrunk request).
+ */
+void coroutine_fn reqlist_shrink_req(BlockReq *req, int64_t new_bytes);
+
+/*
+ * Remove request and wake all waiting coroutines. Do not release any memory.
+ */
+void coroutine_fn reqlist_remove_req(BlockReq *req);
+
+#endif /* REQLIST_H */
diff --git a/include/block/snapshot.h b/include/block/snapshot.h
index f73d1094af..304cc6ea61 100644
--- a/include/block/snapshot.h
+++ b/include/block/snapshot.h
@@ -25,7 +25,8 @@
#ifndef SNAPSHOT_H
#define SNAPSHOT_H
-
+#include "block/graph-lock.h"
+#include "qapi/qapi-builtin-types.h"
#define SNAPSHOT_OPT_BASE "snapshot."
#define SNAPSHOT_OPT_ID "snapshot.id"
@@ -42,8 +43,16 @@ typedef struct QEMUSnapshotInfo {
uint32_t date_sec; /* UTC date of the snapshot */
uint32_t date_nsec;
uint64_t vm_clock_nsec; /* VM clock relative to boot */
+ uint64_t icount; /* record/replay step */
} QEMUSnapshotInfo;
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
int bdrv_snapshot_find(BlockDriverState *bs, QEMUSnapshotInfo *sn_info,
const char *name);
bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
@@ -51,19 +60,19 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
const char *name,
QEMUSnapshotInfo *sn_info,
Error **errp);
-int bdrv_can_snapshot(BlockDriverState *bs);
-int bdrv_snapshot_create(BlockDriverState *bs,
- QEMUSnapshotInfo *sn_info);
-int bdrv_snapshot_goto(BlockDriverState *bs,
- const char *snapshot_id,
- Error **errp);
-int bdrv_snapshot_delete(BlockDriverState *bs,
- const char *snapshot_id,
- const char *name,
- Error **errp);
-int bdrv_snapshot_delete_by_id_or_name(BlockDriverState *bs,
- const char *id_or_name,
- Error **errp);
+
+int GRAPH_RDLOCK bdrv_can_snapshot(BlockDriverState *bs);
+
+int GRAPH_RDLOCK
+bdrv_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
+
+int GRAPH_UNLOCKED
+bdrv_snapshot_goto(BlockDriverState *bs, const char *snapshot_id, Error **errp);
+
+int GRAPH_RDLOCK
+bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id,
+ const char *name, Error **errp);
+
int bdrv_snapshot_list(BlockDriverState *bs,
QEMUSnapshotInfo **psn_info);
int bdrv_snapshot_load_tmp(BlockDriverState *bs,
@@ -75,21 +84,30 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs,
Error **errp);
-/* Group operations. All block drivers are involved.
- * These functions will properly handle dataplane (take aio_context_acquire
- * when appropriate for appropriate block drivers */
+/*
+ * Group operations. All block drivers are involved.
+ */
-bool bdrv_all_can_snapshot(BlockDriverState **first_bad_bs);
-int bdrv_all_delete_snapshot(const char *name, BlockDriverState **first_bsd_bs,
- Error **err);
-int bdrv_all_goto_snapshot(const char *name, BlockDriverState **first_bad_bs,
+bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
+ Error **errp);
+int bdrv_all_delete_snapshot(const char *name,
+ bool has_devices, strList *devices,
+ Error **errp);
+int bdrv_all_goto_snapshot(const char *name,
+ bool has_devices, strList *devices,
Error **errp);
-int bdrv_all_find_snapshot(const char *name, BlockDriverState **first_bad_bs);
+int bdrv_all_has_snapshot(const char *name,
+ bool has_devices, strList *devices,
+ Error **errp);
int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
BlockDriverState *vm_state_bs,
uint64_t vm_state_size,
- BlockDriverState **first_bad_bs);
+ bool has_devices,
+ strList *devices,
+ Error **errp);
-BlockDriverState *bdrv_all_find_vmstate_bs(void);
+BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs,
+ bool has_devices, strList *devices,
+ Error **errp);
#endif
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
index 7dd7d730a0..948ff5f30c 100644
--- a/include/block/thread-pool.h
+++ b/include/block/thread-pool.h
@@ -18,7 +18,9 @@
#ifndef QEMU_THREAD_POOL_H
#define QEMU_THREAD_POOL_H
-#include "block/block.h"
+#include "block/aio.h"
+
+#define THREAD_POOL_MAX_THREADS_DEFAULT 64
typedef int ThreadPoolFunc(void *opaque);
@@ -27,11 +29,15 @@ typedef struct ThreadPool ThreadPool;
ThreadPool *thread_pool_new(struct AioContext *ctx);
void thread_pool_free(ThreadPool *pool);
-BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
- ThreadPoolFunc *func, void *arg,
- BlockCompletionFunc *cb, void *opaque);
-int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
- ThreadPoolFunc *func, void *arg);
-void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
+/*
+ * thread_pool_submit* API: submit I/O requests in the thread's
+ * current AioContext.
+ */
+BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
+ BlockCompletionFunc *cb, void *opaque);
+int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
+void thread_pool_submit(ThreadPoolFunc *func, void *arg);
+
+void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
#endif
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
index e2fd0513c4..2355e8d9de 100644
--- a/include/block/throttle-groups.h
+++ b/include/block/throttle-groups.h
@@ -25,8 +25,9 @@
#ifndef THROTTLE_GROUPS_H
#define THROTTLE_GROUPS_H
+#include "qemu/coroutine.h"
#include "qemu/throttle.h"
-#include "block/block_int.h"
+#include "qom/object.h"
/* The ThrottleGroupMember structure indicates membership in a ThrottleGroup
* and holds related data.
@@ -36,25 +37,30 @@ typedef struct ThrottleGroupMember {
AioContext *aio_context;
/* throttled_reqs_lock protects the CoQueues for throttled requests. */
CoMutex throttled_reqs_lock;
- CoQueue throttled_reqs[2];
+ CoQueue throttled_reqs[THROTTLE_MAX];
/* Nonzero if the I/O limits are currently being ignored; generally
* it is zero. Accessed with atomic operations.
*/
unsigned int io_limits_disabled;
+ /* Number of pending throttle_group_restart_queue_entry() coroutines.
+ * Accessed with atomic operations.
+ */
+ unsigned int restart_pending;
+
/* The following fields are protected by the ThrottleGroup lock.
* See the ThrottleGroup documentation for details.
* throttle_state tells us if I/O limits are configured. */
ThrottleState *throttle_state;
ThrottleTimers throttle_timers;
- unsigned pending_reqs[2];
+ unsigned pending_reqs[THROTTLE_MAX];
QLIST_ENTRY(ThrottleGroupMember) round_robin;
} ThrottleGroupMember;
#define TYPE_THROTTLE_GROUP "throttle-group"
-#define THROTTLE_GROUP(obj) OBJECT_CHECK(ThrottleGroup, (obj), TYPE_THROTTLE_GROUP)
+OBJECT_DECLARE_SIMPLE_TYPE(ThrottleGroup, THROTTLE_GROUP)
const char *throttle_group_get_name(ThrottleGroupMember *tgm);
@@ -71,8 +77,8 @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm);
void throttle_group_restart_tgm(ThrottleGroupMember *tgm);
void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
- unsigned int bytes,
- bool is_write);
+ int64_t bytes,
+ ThrottleDirection direction);
void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
AioContext *new_context);
void throttle_group_detach_aio_context(ThrottleGroupMember *tgm);
diff --git a/include/block/ufs.h b/include/block/ufs.h
new file mode 100644
index 0000000000..d61598b8f3
--- /dev/null
+++ b/include/block/ufs.h
@@ -0,0 +1,1090 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef BLOCK_UFS_H
+#define BLOCK_UFS_H
+
+#include "hw/registerfields.h"
+
+typedef struct QEMU_PACKED UfsReg {
+ uint32_t cap;
+ uint32_t rsvd0;
+ uint32_t ver;
+ uint32_t rsvd1;
+ uint32_t hcpid;
+ uint32_t hcmid;
+ uint32_t ahit;
+ uint32_t rsvd2;
+ uint32_t is;
+ uint32_t ie;
+ uint32_t rsvd3[2];
+ uint32_t hcs;
+ uint32_t hce;
+ uint32_t uecpa;
+ uint32_t uecdl;
+ uint32_t uecn;
+ uint32_t uect;
+ uint32_t uecdme;
+ uint32_t utriacr;
+ uint32_t utrlba;
+ uint32_t utrlbau;
+ uint32_t utrldbr;
+ uint32_t utrlclr;
+ uint32_t utrlrsr;
+ uint32_t utrlcnr;
+ uint32_t rsvd4[2];
+ uint32_t utmrlba;
+ uint32_t utmrlbau;
+ uint32_t utmrldbr;
+ uint32_t utmrlclr;
+ uint32_t utmrlrsr;
+ uint32_t rsvd5[3];
+ uint32_t uiccmd;
+ uint32_t ucmdarg1;
+ uint32_t ucmdarg2;
+ uint32_t ucmdarg3;
+ uint32_t rsvd6[4];
+ uint32_t rsvd7[4];
+ uint32_t rsvd8[16];
+ uint32_t ccap;
+} UfsReg;
+
+REG32(CAP, offsetof(UfsReg, cap))
+ FIELD(CAP, NUTRS, 0, 5)
+ FIELD(CAP, RTT, 8, 8)
+ FIELD(CAP, NUTMRS, 16, 3)
+ FIELD(CAP, AUTOH8, 23, 1)
+ FIELD(CAP, 64AS, 24, 1)
+ FIELD(CAP, OODDS, 25, 1)
+ FIELD(CAP, UICDMETMS, 26, 1)
+ FIELD(CAP, CS, 28, 1)
+REG32(VER, offsetof(UfsReg, ver))
+REG32(HCPID, offsetof(UfsReg, hcpid))
+REG32(HCMID, offsetof(UfsReg, hcmid))
+REG32(AHIT, offsetof(UfsReg, ahit))
+REG32(IS, offsetof(UfsReg, is))
+ FIELD(IS, UTRCS, 0, 1)
+ FIELD(IS, UDEPRI, 1, 1)
+ FIELD(IS, UE, 2, 1)
+ FIELD(IS, UTMS, 3, 1)
+ FIELD(IS, UPMS, 4, 1)
+ FIELD(IS, UHXS, 5, 1)
+ FIELD(IS, UHES, 6, 1)
+ FIELD(IS, ULLS, 7, 1)
+ FIELD(IS, ULSS, 8, 1)
+ FIELD(IS, UTMRCS, 9, 1)
+ FIELD(IS, UCCS, 10, 1)
+ FIELD(IS, DFES, 11, 1)
+ FIELD(IS, UTPES, 12, 1)
+ FIELD(IS, HCFES, 16, 1)
+ FIELD(IS, SBFES, 17, 1)
+ FIELD(IS, CEFES, 18, 1)
+REG32(IE, offsetof(UfsReg, ie))
+ FIELD(IE, UTRCE, 0, 1)
+ FIELD(IE, UDEPRIE, 1, 1)
+ FIELD(IE, UEE, 2, 1)
+ FIELD(IE, UTMSE, 3, 1)
+ FIELD(IE, UPMSE, 4, 1)
+ FIELD(IE, UHXSE, 5, 1)
+ FIELD(IE, UHESE, 6, 1)
+ FIELD(IE, ULLSE, 7, 1)
+ FIELD(IE, ULSSE, 8, 1)
+ FIELD(IE, UTMRCE, 9, 1)
+ FIELD(IE, UCCE, 10, 1)
+ FIELD(IE, DFEE, 11, 1)
+ FIELD(IE, UTPEE, 12, 1)
+ FIELD(IE, HCFEE, 16, 1)
+ FIELD(IE, SBFEE, 17, 1)
+ FIELD(IE, CEFEE, 18, 1)
+REG32(HCS, offsetof(UfsReg, hcs))
+ FIELD(HCS, DP, 0, 1)
+ FIELD(HCS, UTRLRDY, 1, 1)
+ FIELD(HCS, UTMRLRDY, 2, 1)
+ FIELD(HCS, UCRDY, 3, 1)
+ FIELD(HCS, UPMCRS, 8, 3)
+REG32(HCE, offsetof(UfsReg, hce))
+ FIELD(HCE, HCE, 0, 1)
+ FIELD(HCE, CGE, 1, 1)
+REG32(UECPA, offsetof(UfsReg, uecpa))
+REG32(UECDL, offsetof(UfsReg, uecdl))
+REG32(UECN, offsetof(UfsReg, uecn))
+REG32(UECT, offsetof(UfsReg, uect))
+REG32(UECDME, offsetof(UfsReg, uecdme))
+REG32(UTRIACR, offsetof(UfsReg, utriacr))
+REG32(UTRLBA, offsetof(UfsReg, utrlba))
+ FIELD(UTRLBA, UTRLBA, 10, 22)
+REG32(UTRLBAU, offsetof(UfsReg, utrlbau))
+REG32(UTRLDBR, offsetof(UfsReg, utrldbr))
+REG32(UTRLCLR, offsetof(UfsReg, utrlclr))
+REG32(UTRLRSR, offsetof(UfsReg, utrlrsr))
+REG32(UTRLCNR, offsetof(UfsReg, utrlcnr))
+REG32(UTMRLBA, offsetof(UfsReg, utmrlba))
+ FIELD(UTMRLBA, UTMRLBA, 10, 22)
+REG32(UTMRLBAU, offsetof(UfsReg, utmrlbau))
+REG32(UTMRLDBR, offsetof(UfsReg, utmrldbr))
+REG32(UTMRLCLR, offsetof(UfsReg, utmrlclr))
+REG32(UTMRLRSR, offsetof(UfsReg, utmrlrsr))
+REG32(UICCMD, offsetof(UfsReg, uiccmd))
+REG32(UCMDARG1, offsetof(UfsReg, ucmdarg1))
+REG32(UCMDARG2, offsetof(UfsReg, ucmdarg2))
+REG32(UCMDARG3, offsetof(UfsReg, ucmdarg3))
+REG32(CCAP, offsetof(UfsReg, ccap))
+
+#define UFS_INTR_MASK \
+ ((1 << R_IS_CEFES_SHIFT) | (1 << R_IS_SBFES_SHIFT) | \
+ (1 << R_IS_HCFES_SHIFT) | (1 << R_IS_UTPES_SHIFT) | \
+ (1 << R_IS_DFES_SHIFT) | (1 << R_IS_UCCS_SHIFT) | \
+ (1 << R_IS_UTMRCS_SHIFT) | (1 << R_IS_ULSS_SHIFT) | \
+ (1 << R_IS_ULLS_SHIFT) | (1 << R_IS_UHES_SHIFT) | \
+ (1 << R_IS_UHXS_SHIFT) | (1 << R_IS_UPMS_SHIFT) | \
+ (1 << R_IS_UTMS_SHIFT) | (1 << R_IS_UE_SHIFT) | \
+ (1 << R_IS_UDEPRI_SHIFT) | (1 << R_IS_UTRCS_SHIFT))
+
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE_SHIFT 24
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE_MASK 0xff
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE(dword0) \
+ ((be32_to_cpu(dword0) >> UFS_UPIU_HEADER_TRANSACTION_TYPE_SHIFT) & \
+ UFS_UPIU_HEADER_TRANSACTION_TYPE_MASK)
+
+#define UFS_UPIU_HEADER_QUERY_FUNC_SHIFT 16
+#define UFS_UPIU_HEADER_QUERY_FUNC_MASK 0xff
+#define UFS_UPIU_HEADER_QUERY_FUNC(dword1) \
+ ((be32_to_cpu(dword1) >> UFS_UPIU_HEADER_QUERY_FUNC_SHIFT) & \
+ UFS_UPIU_HEADER_QUERY_FUNC_MASK)
+
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_SHIFT 0
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_MASK 0xffff
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH(dword2) \
+ ((be32_to_cpu(dword2) >> UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_SHIFT) & \
+ UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_MASK)
+
+typedef struct QEMU_PACKED DeviceDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t device;
+ uint8_t device_class;
+ uint8_t device_sub_class;
+ uint8_t protocol;
+ uint8_t number_lu;
+ uint8_t number_wlu;
+ uint8_t boot_enable;
+ uint8_t descr_access_en;
+ uint8_t init_power_mode;
+ uint8_t high_priority_lun;
+ uint8_t secure_removal_type;
+ uint8_t security_lu;
+ uint8_t background_ops_term_lat;
+ uint8_t init_active_icc_level;
+ uint16_t spec_version;
+ uint16_t manufacture_date;
+ uint8_t manufacturer_name;
+ uint8_t product_name;
+ uint8_t serial_number;
+ uint8_t oem_id;
+ uint16_t manufacturer_id;
+ uint8_t ud_0_base_offset;
+ uint8_t ud_config_p_length;
+ uint8_t device_rtt_cap;
+ uint16_t periodic_rtc_update;
+ uint8_t ufs_features_support;
+ uint8_t ffu_timeout;
+ uint8_t queue_depth;
+ uint16_t device_version;
+ uint8_t num_secure_wp_area;
+ uint32_t psa_max_data_size;
+ uint8_t psa_state_timeout;
+ uint8_t product_revision_level;
+ uint8_t reserved[36];
+ uint32_t extended_ufs_features_support;
+ uint8_t write_booster_buffer_preserve_user_space_en;
+ uint8_t write_booster_buffer_type;
+ uint32_t num_shared_write_booster_buffer_alloc_units;
+} DeviceDescriptor;
+
+typedef struct QEMU_PACKED GeometryDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t media_technology;
+ uint8_t reserved;
+ uint64_t total_raw_device_capacity;
+ uint8_t max_number_lu;
+ uint32_t segment_size;
+ uint8_t allocation_unit_size;
+ uint8_t min_addr_block_size;
+ uint8_t optimal_read_block_size;
+ uint8_t optimal_write_block_size;
+ uint8_t max_in_buffer_size;
+ uint8_t max_out_buffer_size;
+ uint8_t rpmb_read_write_size;
+ uint8_t dynamic_capacity_resource_policy;
+ uint8_t data_ordering;
+ uint8_t max_context_id_number;
+ uint8_t sys_data_tag_unit_size;
+ uint8_t sys_data_tag_res_size;
+ uint8_t supported_sec_r_types;
+ uint16_t supported_memory_types;
+ uint32_t system_code_max_n_alloc_u;
+ uint16_t system_code_cap_adj_fac;
+ uint32_t non_persist_max_n_alloc_u;
+ uint16_t non_persist_cap_adj_fac;
+ uint32_t enhanced_1_max_n_alloc_u;
+ uint16_t enhanced_1_cap_adj_fac;
+ uint32_t enhanced_2_max_n_alloc_u;
+ uint16_t enhanced_2_cap_adj_fac;
+ uint32_t enhanced_3_max_n_alloc_u;
+ uint16_t enhanced_3_cap_adj_fac;
+ uint32_t enhanced_4_max_n_alloc_u;
+ uint16_t enhanced_4_cap_adj_fac;
+ uint32_t optimal_logical_block_size;
+ uint8_t reserved2[7];
+ uint32_t write_booster_buffer_max_n_alloc_units;
+ uint8_t device_max_write_booster_l_us;
+ uint8_t write_booster_buffer_cap_adj_fac;
+ uint8_t supported_write_booster_buffer_user_space_reduction_types;
+ uint8_t supported_write_booster_buffer_types;
+} GeometryDescriptor;
+
+#define UFS_GEOMETRY_CAPACITY_SHIFT 9
+
+typedef struct QEMU_PACKED UnitDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t unit_index;
+ uint8_t lu_enable;
+ uint8_t boot_lun_id;
+ uint8_t lu_write_protect;
+ uint8_t lu_queue_depth;
+ uint8_t psa_sensitive;
+ uint8_t memory_type;
+ uint8_t data_reliability;
+ uint8_t logical_block_size;
+ uint64_t logical_block_count;
+ uint32_t erase_block_size;
+ uint8_t provisioning_type;
+ uint64_t phy_mem_resource_count;
+ uint16_t context_capabilities;
+ uint8_t large_unit_granularity_m1;
+ uint8_t reserved[6];
+ uint32_t lu_num_write_booster_buffer_alloc_units;
+} UnitDescriptor;
+
+typedef struct QEMU_PACKED RpmbUnitDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t unit_index;
+ uint8_t lu_enable;
+ uint8_t boot_lun_id;
+ uint8_t lu_write_protect;
+ uint8_t lu_queue_depth;
+ uint8_t psa_sensitive;
+ uint8_t memory_type;
+ uint8_t reserved;
+ uint8_t logical_block_size;
+ uint64_t logical_block_count;
+ uint32_t erase_block_size;
+ uint8_t provisioning_type;
+ uint64_t phy_mem_resource_count;
+ uint8_t reserved2[3];
+} RpmbUnitDescriptor;
+
+typedef struct QEMU_PACKED PowerParametersDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint16_t active_icc_levels_vcc[16];
+ uint16_t active_icc_levels_vccq[16];
+ uint16_t active_icc_levels_vccq_2[16];
+} PowerParametersDescriptor;
+
+typedef struct QEMU_PACKED InterconnectDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint16_t bcd_unipro_version;
+ uint16_t bcd_mphy_version;
+} InterconnectDescriptor;
+
+typedef struct QEMU_PACKED StringDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint16_t UC[126];
+} StringDescriptor;
+
+typedef struct QEMU_PACKED DeviceHealthDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t pre_eol_info;
+ uint8_t device_life_time_est_a;
+ uint8_t device_life_time_est_b;
+ uint8_t vendor_prop_info[32];
+ uint32_t refresh_total_count;
+ uint32_t refresh_progress;
+} DeviceHealthDescriptor;
+
+typedef struct QEMU_PACKED Flags {
+ uint8_t reserved;
+ uint8_t device_init;
+ uint8_t permanent_wp_en;
+ uint8_t power_on_wp_en;
+ uint8_t background_ops_en;
+ uint8_t device_life_span_mode_en;
+ uint8_t purge_enable;
+ uint8_t refresh_enable;
+ uint8_t phy_resource_removal;
+ uint8_t busy_rtc;
+ uint8_t reserved2;
+ uint8_t permanently_disable_fw_update;
+ uint8_t reserved3[2];
+ uint8_t wb_en;
+ uint8_t wb_buffer_flush_en;
+ uint8_t wb_buffer_flush_during_hibernate;
+ uint8_t reserved4[2];
+} Flags;
+
+typedef struct Attributes {
+ uint8_t boot_lun_en;
+ uint8_t reserved;
+ uint8_t current_power_mode;
+ uint8_t active_icc_level;
+ uint8_t out_of_order_data_en;
+ uint8_t background_op_status;
+ uint8_t purge_status;
+ uint8_t max_data_in_size;
+ uint8_t max_data_out_size;
+ uint32_t dyn_cap_needed;
+ uint8_t ref_clk_freq;
+ uint8_t config_descr_lock;
+ uint8_t max_num_of_rtt;
+ uint16_t exception_event_control;
+ uint16_t exception_event_status;
+ uint32_t seconds_passed;
+ uint16_t context_conf;
+ uint8_t device_ffu_status;
+ uint8_t psa_state;
+ uint32_t psa_data_size;
+ uint8_t ref_clk_gating_wait_time;
+ uint8_t device_case_rough_temperaure;
+ uint8_t device_too_high_temp_boundary;
+ uint8_t device_too_low_temp_boundary;
+ uint8_t throttling_status;
+ uint8_t wb_buffer_flush_status;
+ uint8_t available_wb_buffer_size;
+ uint8_t wb_buffer_life_time_est;
+ uint32_t current_wb_buffer_size;
+ uint8_t refresh_status;
+ uint8_t refresh_freq;
+ uint8_t refresh_unit;
+ uint8_t refresh_method;
+} Attributes;
+
+#define UFS_TRANSACTION_SPECIFIC_FIELD_SIZE 20
+#define UFS_MAX_QUERY_DATA_SIZE 256
+
+/* Command response result code */
+typedef enum CommandRespCode {
+ UFS_COMMAND_RESULT_SUCCESS = 0x00,
+ UFS_COMMAND_RESULT_FAIL = 0x01,
+} CommandRespCode;
+
+enum {
+ UFS_UPIU_FLAG_UNDERFLOW = 0x20,
+ UFS_UPIU_FLAG_OVERFLOW = 0x40,
+};
+
+typedef struct QEMU_PACKED UtpUpiuHeader {
+ uint8_t trans_type;
+ uint8_t flags;
+ uint8_t lun;
+ uint8_t task_tag;
+ uint8_t iid_cmd_set_type;
+ uint8_t query_func;
+ uint8_t response;
+ uint8_t scsi_status;
+ uint8_t ehs_len;
+ uint8_t device_inf;
+ uint16_t data_segment_length;
+} UtpUpiuHeader;
+
+/*
+ * The code below is copied from the linux kernel
+ * ("include/uapi/scsi/scsi_bsg_ufs.h") and modified to fit the qemu style.
+ */
+
+typedef struct QEMU_PACKED UtpUpiuQuery {
+ uint8_t opcode;
+ uint8_t idn;
+ uint8_t index;
+ uint8_t selector;
+ uint16_t reserved_osf;
+ uint16_t length;
+ uint32_t value;
+ uint32_t reserved[2];
+ /* EHS length should be 0. We don't have to worry about EHS area. */
+ uint8_t data[UFS_MAX_QUERY_DATA_SIZE];
+} UtpUpiuQuery;
+
+#define UFS_CDB_SIZE 16
+
+/*
+ * struct UtpUpiuCmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+typedef struct QEMU_PACKED UtpUpiuCmd {
+ uint32_t exp_data_transfer_len;
+ uint8_t cdb[UFS_CDB_SIZE];
+} UtpUpiuCmd;
+
+/*
+ * struct UtpUpiuReq - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ * @uc: use utp_upiu_query to host the 4 dwords of uic command
+ */
+typedef struct QEMU_PACKED UtpUpiuReq {
+ UtpUpiuHeader header;
+ union {
+ UtpUpiuCmd sc;
+ UtpUpiuQuery qr;
+ };
+} UtpUpiuReq;
+
+/*
+ * The code below is copied from the linux kernel ("include/ufs/ufshci.h") and
+ * modified to fit the qemu style.
+ */
+
+enum {
+ UFS_PWR_OK = 0x0,
+ UFS_PWR_LOCAL = 0x01,
+ UFS_PWR_REMOTE = 0x02,
+ UFS_PWR_BUSY = 0x03,
+ UFS_PWR_ERROR_CAP = 0x04,
+ UFS_PWR_FATAL_ERROR = 0x05,
+};
+
+/* UIC Commands */
+enum uic_cmd_dme {
+ UFS_UIC_CMD_DME_GET = 0x01,
+ UFS_UIC_CMD_DME_SET = 0x02,
+ UFS_UIC_CMD_DME_PEER_GET = 0x03,
+ UFS_UIC_CMD_DME_PEER_SET = 0x04,
+ UFS_UIC_CMD_DME_POWERON = 0x10,
+ UFS_UIC_CMD_DME_POWEROFF = 0x11,
+ UFS_UIC_CMD_DME_ENABLE = 0x12,
+ UFS_UIC_CMD_DME_RESET = 0x14,
+ UFS_UIC_CMD_DME_END_PT_RST = 0x15,
+ UFS_UIC_CMD_DME_LINK_STARTUP = 0x16,
+ UFS_UIC_CMD_DME_HIBER_ENTER = 0x17,
+ UFS_UIC_CMD_DME_HIBER_EXIT = 0x18,
+ UFS_UIC_CMD_DME_TEST_MODE = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+ UFS_UIC_CMD_RESULT_SUCCESS = 0x00,
+ UFS_UIC_CMD_RESULT_INVALID_ATTR = 0x01,
+ UFS_UIC_CMD_RESULT_FAILURE = 0x01,
+ UFS_UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
+ UFS_UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
+ UFS_UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
+ UFS_UIC_CMD_RESULT_BAD_INDEX = 0x05,
+ UFS_UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
+ UFS_UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
+ UFS_UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
+ UFS_UIC_CMD_RESULT_BUSY = 0x09,
+ UFS_UIC_CMD_RESULT_DME_FAILURE = 0x0A,
+};
+
+#define UFS_MASK_UIC_COMMAND_RESULT 0xFF
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+ UFS_UTP_CMD_TYPE_SCSI = 0x0,
+ UFS_UTP_CMD_TYPE_UFS = 0x1,
+ UFS_UTP_CMD_TYPE_DEV_MANAGE = 0x2,
+};
+
+/* To accommodate UFS2.0 required Command type */
+enum {
+ UFS_UTP_CMD_TYPE_UFS_STORAGE = 0x1,
+};
+
+enum {
+ UFS_UTP_SCSI_COMMAND = 0x00000000,
+ UFS_UTP_NATIVE_UFS_COMMAND = 0x10000000,
+ UFS_UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
+ UFS_UTP_REQ_DESC_INT_CMD = 0x01000000,
+ UFS_UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+ UFS_UTP_NO_DATA_TRANSFER = 0x00000000,
+ UFS_UTP_HOST_TO_DEVICE = 0x02000000,
+ UFS_UTP_DEVICE_TO_HOST = 0x04000000,
+};
+
+/* Overall command status values */
+enum UtpOcsCodes {
+ UFS_OCS_SUCCESS = 0x0,
+ UFS_OCS_INVALID_CMD_TABLE_ATTR = 0x1,
+ UFS_OCS_INVALID_PRDT_ATTR = 0x2,
+ UFS_OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
+ UFS_OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
+ UFS_OCS_PEER_COMM_FAILURE = 0x5,
+ UFS_OCS_ABORTED = 0x6,
+ UFS_OCS_FATAL_ERROR = 0x7,
+ UFS_OCS_DEVICE_FATAL_ERROR = 0x8,
+ UFS_OCS_INVALID_CRYPTO_CONFIG = 0x9,
+ UFS_OCS_GENERAL_CRYPTO_ERROR = 0xa,
+ UFS_OCS_INVALID_COMMAND_STATUS = 0xf,
+};
+
+enum {
+ UFS_MASK_OCS = 0x0F,
+};
+
+/*
+ * struct UfshcdSgEntry - UFSHCI PRD Entry
+ * @addr: Physical address; DW-0 and DW-1.
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+typedef struct QEMU_PACKED UfshcdSgEntry {
+ uint64_t addr;
+ uint32_t reserved;
+ uint32_t size;
+ /*
+ * followed by variant-specific fields if
+ * CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined.
+ */
+} UfshcdSgEntry;
+
+/*
+ * struct RequestDescHeader - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+typedef struct QEMU_PACKED RequestDescHeader {
+ uint32_t dword_0;
+ uint32_t dword_1;
+ uint32_t dword_2;
+ uint32_t dword_3;
+} RequestDescHeader;
+
+/*
+ * struct UtpTransferReqDesc - UTP Transfer Request Descriptor (UTRD)
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+typedef struct QEMU_PACKED UtpTransferReqDesc {
+ /* DW 0-3 */
+ RequestDescHeader header;
+
+ /* DW 4-5*/
+ uint32_t command_desc_base_addr_lo;
+ uint32_t command_desc_base_addr_hi;
+
+ /* DW 6 */
+ uint16_t response_upiu_length;
+ uint16_t response_upiu_offset;
+
+ /* DW 7 */
+ uint16_t prd_table_length;
+ uint16_t prd_table_offset;
+} UtpTransferReqDesc;
+
+/*
+ * UTMRD structure.
+ */
+typedef struct QEMU_PACKED UtpTaskReqDesc {
+ /* DW 0-3 */
+ RequestDescHeader header;
+
+ /* DW 4-11 - Task request UPIU structure */
+ struct {
+ UtpUpiuHeader req_header;
+ uint32_t input_param1;
+ uint32_t input_param2;
+ uint32_t input_param3;
+ uint32_t reserved1[2];
+ } upiu_req;
+
+ /* DW 12-19 - Task Management Response UPIU structure */
+ struct {
+ UtpUpiuHeader rsp_header;
+ uint32_t output_param1;
+ uint32_t output_param2;
+ uint32_t reserved2[3];
+ } upiu_rsp;
+} UtpTaskReqDesc;
+
+/*
+ * The code below is copied from the linux kernel ("include/ufs/ufs.h") and
+ * modified to fit the qemu style.
+ */
+
+#define UFS_GENERAL_UPIU_REQUEST_SIZE (sizeof(UtpUpiuReq))
+#define UFS_QUERY_DESC_MAX_SIZE 255
+#define UFS_QUERY_DESC_MIN_SIZE 2
+#define UFS_QUERY_DESC_HDR_SIZE 2
+#define UFS_QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - (sizeof(UtpUpiuHeader)))
+#define UFS_SENSE_SIZE 18
+
+/*
+ * UFS device may have standard LUs and LUN id could be from 0x00 to
+ * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
+ * UFS device may also have the Well Known LUs (also referred as W-LU)
+ * which again could be from 0x00 to 0x7F. For W-LUs, device only use
+ * the "Extended Addressing Format" which means the W-LUNs would be
+ * from 0xc100 (SCSI_W_LUN_BASE) onwards.
+ * This means max. LUN number reported from UFS device could be 0xC17F.
+ */
+#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
+#define UFS_UPIU_WLUN_ID (1 << 7)
+
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
+/*
+ * WriteBooster buffer lifetime has a limit set by vendor.
+ * If it is over the limit, WriteBooster feature will be disabled.
+ */
+#define UFS_WB_EXCEED_LIFETIME 0x0B
+
+/*
+ * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU
+ * request/response packet
+ */
+#define UFS_EHS_OFFSET_IN_RESPONSE 32
+
+/* Well known logical unit id in LUN field of UPIU */
+enum {
+ UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
+ UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
+ UFS_UPIU_BOOT_WLUN = 0xB0,
+ UFS_UPIU_RPMB_WLUN = 0xC4,
+};
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+ UFS_ABORT_TASK = 0x01,
+ UFS_ABORT_TASK_SET = 0x02,
+ UFS_CLEAR_TASK_SET = 0x04,
+ UFS_LOGICAL_RESET = 0x08,
+ UFS_QUERY_TASK = 0x80,
+ UFS_QUERY_TASK_SET = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+ UFS_UPIU_TRANSACTION_NOP_OUT = 0x00,
+ UFS_UPIU_TRANSACTION_COMMAND = 0x01,
+ UFS_UPIU_TRANSACTION_DATA_OUT = 0x02,
+ UFS_UPIU_TRANSACTION_TASK_REQ = 0x04,
+ UFS_UPIU_TRANSACTION_QUERY_REQ = 0x16,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+ UFS_UPIU_TRANSACTION_NOP_IN = 0x20,
+ UFS_UPIU_TRANSACTION_RESPONSE = 0x21,
+ UFS_UPIU_TRANSACTION_DATA_IN = 0x22,
+ UFS_UPIU_TRANSACTION_TASK_RSP = 0x24,
+ UFS_UPIU_TRANSACTION_READY_XFER = 0x31,
+ UFS_UPIU_TRANSACTION_QUERY_RSP = 0x36,
+ UFS_UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
+};
+
+/* UPIU Read/Write flags */
+enum {
+ UFS_UPIU_CMD_FLAGS_NONE = 0x00,
+ UFS_UPIU_CMD_FLAGS_WRITE = 0x20,
+ UFS_UPIU_CMD_FLAGS_READ = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+ UFS_UPIU_TASK_ATTR_SIMPLE = 0x00,
+ UFS_UPIU_TASK_ATTR_ORDERED = 0x01,
+ UFS_UPIU_TASK_ATTR_HEADQ = 0x02,
+ UFS_UPIU_TASK_ATTR_ACA = 0x03,
+};
+
+/* UPIU Query request function */
+enum {
+ UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+ UFS_QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+ UFS_QUERY_FLAG_IDN_PERMANENT_WPE = 0x02,
+ UFS_QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
+ UFS_QUERY_FLAG_IDN_BKOPS_EN = 0x04,
+ UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05,
+ UFS_QUERY_FLAG_IDN_PURGE_ENABLE = 0x06,
+ UFS_QUERY_FLAG_IDN_REFRESH_ENABLE = 0x07,
+ UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08,
+ UFS_QUERY_FLAG_IDN_BUSY_RTC = 0x09,
+ UFS_QUERY_FLAG_IDN_RESERVED3 = 0x0A,
+ UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+ UFS_QUERY_FLAG_IDN_WB_EN = 0x0E,
+ UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+ UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
+ UFS_QUERY_FLAG_IDN_HPB_RESET = 0x11,
+ UFS_QUERY_FLAG_IDN_HPB_EN = 0x12,
+ UFS_QUERY_FLAG_IDN_COUNT,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+ UFS_QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
+ UFS_QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
+ UFS_QUERY_ATTR_IDN_POWER_MODE = 0x02,
+ UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
+ UFS_QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
+ UFS_QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
+ UFS_QUERY_ATTR_IDN_PURGE_STATUS = 0x06,
+ UFS_QUERY_ATTR_IDN_MAX_DATA_IN = 0x07,
+ UFS_QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08,
+ UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09,
+ UFS_QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
+ UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B,
+ UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C,
+ UFS_QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
+ UFS_QUERY_ATTR_IDN_EE_STATUS = 0x0E,
+ UFS_QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F,
+ UFS_QUERY_ATTR_IDN_CNTX_CONF = 0x10,
+ UFS_QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11,
+ UFS_QUERY_ATTR_IDN_RESERVED2 = 0x12,
+ UFS_QUERY_ATTR_IDN_RESERVED3 = 0x13,
+ UFS_QUERY_ATTR_IDN_FFU_STATUS = 0x14,
+ UFS_QUERY_ATTR_IDN_PSA_STATE = 0x15,
+ UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
+ UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
+ UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
+ UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
+ UFS_QUERY_ATTR_IDN_THROTTLING_STATUS = 0x1B,
+ UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+ UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+ UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+ UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
+ UFS_QUERY_ATTR_IDN_REFRESH_STATUS = 0x2C,
+ UFS_QUERY_ATTR_IDN_REFRESH_FREQ = 0x2D,
+ UFS_QUERY_ATTR_IDN_REFRESH_UNIT = 0x2E,
+ UFS_QUERY_ATTR_IDN_COUNT,
+};
+
+/* Descriptor idn for Query requests */
+enum desc_idn {
+ UFS_QUERY_DESC_IDN_DEVICE = 0x0,
+ UFS_QUERY_DESC_IDN_CONFIGURATION = 0x1,
+ UFS_QUERY_DESC_IDN_UNIT = 0x2,
+ UFS_QUERY_DESC_IDN_RFU_0 = 0x3,
+ UFS_QUERY_DESC_IDN_INTERCONNECT = 0x4,
+ UFS_QUERY_DESC_IDN_STRING = 0x5,
+ UFS_QUERY_DESC_IDN_RFU_1 = 0x6,
+ UFS_QUERY_DESC_IDN_GEOMETRY = 0x7,
+ UFS_QUERY_DESC_IDN_POWER = 0x8,
+ UFS_QUERY_DESC_IDN_HEALTH = 0x9,
+ UFS_QUERY_DESC_IDN_MAX,
+};
+
+enum desc_header_offset {
+ UFS_QUERY_DESC_LENGTH_OFFSET = 0x00,
+ UFS_QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+};
+
+/* Unit descriptor parameters offsets in bytes*/
+enum unit_desc_param {
+ UFS_UNIT_DESC_PARAM_LEN = 0x0,
+ UFS_UNIT_DESC_PARAM_TYPE = 0x1,
+ UFS_UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ UFS_UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ UFS_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ UFS_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ UFS_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ UFS_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+ UFS_UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ UFS_UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
+ UFS_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ UFS_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ UFS_UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
+ UFS_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ UFS_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+ UFS_UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
+ UFS_UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UFS_UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
+ UFS_UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
+ UFS_UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
+ UFS_UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
+};
+
+/* RPMB Unit descriptor parameters offsets in bytes*/
+enum rpmb_unit_desc_param {
+ UFS_RPMB_UNIT_DESC_PARAM_LEN = 0x0,
+ UFS_RPMB_UNIT_DESC_PARAM_TYPE = 0x1,
+ UFS_RPMB_UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ UFS_RPMB_UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ UFS_RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ UFS_RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ UFS_RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ UFS_RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+ UFS_RPMB_UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION_EN = 0x9,
+ UFS_RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ UFS_RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION0_SIZE = 0x13,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION1_SIZE = 0x14,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION2_SIZE = 0x15,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION3_SIZE = 0x16,
+ UFS_RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ UFS_RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+};
+
+/* Device descriptor parameters offsets in bytes*/
+enum device_desc_param {
+ UFS_DEVICE_DESC_PARAM_LEN = 0x0,
+ UFS_DEVICE_DESC_PARAM_TYPE = 0x1,
+ UFS_DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
+ UFS_DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
+ UFS_DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
+ UFS_DEVICE_DESC_PARAM_PRTCL = 0x5,
+ UFS_DEVICE_DESC_PARAM_NUM_LU = 0x6,
+ UFS_DEVICE_DESC_PARAM_NUM_WLU = 0x7,
+ UFS_DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
+ UFS_DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
+ UFS_DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
+ UFS_DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
+ UFS_DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
+ UFS_DEVICE_DESC_PARAM_SEC_LU = 0xD,
+ UFS_DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
+ UFS_DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
+ UFS_DEVICE_DESC_PARAM_SPEC_VER = 0x10,
+ UFS_DEVICE_DESC_PARAM_MANF_DATE = 0x12,
+ UFS_DEVICE_DESC_PARAM_MANF_NAME = 0x14,
+ UFS_DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
+ UFS_DEVICE_DESC_PARAM_SN = 0x16,
+ UFS_DEVICE_DESC_PARAM_OEM_ID = 0x17,
+ UFS_DEVICE_DESC_PARAM_MANF_ID = 0x18,
+ UFS_DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
+ UFS_DEVICE_DESC_PARAM_UD_LEN = 0x1B,
+ UFS_DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
+ UFS_DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
+ UFS_DEVICE_DESC_PARAM_UFS_FEAT = 0x1F,
+ UFS_DEVICE_DESC_PARAM_FFU_TMT = 0x20,
+ UFS_DEVICE_DESC_PARAM_Q_DPTH = 0x21,
+ UFS_DEVICE_DESC_PARAM_DEV_VER = 0x22,
+ UFS_DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24,
+ UFS_DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
+ UFS_DEVICE_DESC_PARAM_PSA_TMT = 0x29,
+ UFS_DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ UFS_DEVICE_DESC_PARAM_HPB_VER = 0x40,
+ UFS_DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
+ UFS_DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+ UFS_DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+ UFS_DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+ UFS_DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
+};
+
+/* Interconnect descriptor parameters offsets in bytes*/
+enum interconnect_desc_param {
+ UFS_INTERCONNECT_DESC_PARAM_LEN = 0x0,
+ UFS_INTERCONNECT_DESC_PARAM_TYPE = 0x1,
+ UFS_INTERCONNECT_DESC_PARAM_UNIPRO_VER = 0x2,
+ UFS_INTERCONNECT_DESC_PARAM_MPHY_VER = 0x4,
+};
+
+/* Geometry descriptor parameters offsets in bytes*/
+enum geometry_desc_param {
+ UFS_GEOMETRY_DESC_PARAM_LEN = 0x0,
+ UFS_GEOMETRY_DESC_PARAM_TYPE = 0x1,
+ UFS_GEOMETRY_DESC_PARAM_DEV_CAP = 0x4,
+ UFS_GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 0xC,
+ UFS_GEOMETRY_DESC_PARAM_SEG_SIZE = 0xD,
+ UFS_GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 0x11,
+ UFS_GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 0x12,
+ UFS_GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 0x13,
+ UFS_GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 0x14,
+ UFS_GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 0x15,
+ UFS_GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 0x16,
+ UFS_GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 0x17,
+ UFS_GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 0x18,
+ UFS_GEOMETRY_DESC_PARAM_DATA_ORDER = 0x19,
+ UFS_GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 0x1A,
+ UFS_GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 0x1B,
+ UFS_GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 0x1C,
+ UFS_GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 0x1D,
+ UFS_GEOMETRY_DESC_PARAM_MEM_TYPES = 0x1E,
+ UFS_GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 0x20,
+ UFS_GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 0x24,
+ UFS_GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 0x26,
+ UFS_GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 0x2A,
+ UFS_GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 0x2C,
+ UFS_GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 0x30,
+ UFS_GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 0x32,
+ UFS_GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 0x36,
+ UFS_GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 0x38,
+ UFS_GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 0x3C,
+ UFS_GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
+ UFS_GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
+ UFS_GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ UFS_GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
+ UFS_GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
+ UFS_GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
+ UFS_GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
+ UFS_GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+ UFS_GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+ UFS_GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+ UFS_GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+ UFS_GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
+};
+
+/* Health descriptor parameters offsets in bytes*/
+enum health_desc_param {
+ UFS_HEALTH_DESC_PARAM_LEN = 0x0,
+ UFS_HEALTH_DESC_PARAM_TYPE = 0x1,
+ UFS_HEALTH_DESC_PARAM_EOL_INFO = 0x2,
+ UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3,
+ UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
+};
+
+/* WriteBooster buffer mode */
+enum {
+ UFS_WB_BUF_MODE_LU_DEDICATED = 0x0,
+ UFS_WB_BUF_MODE_SHARED = 0x1,
+};
+
+/*
+ * Logical Unit Write Protect
+ * 00h: LU not write protected
+ * 01h: LU write protected when fPowerOnWPEn =1
+ * 02h: LU permanently write protected when fPermanentWPEn =1
+ */
+enum ufs_lu_wp_type {
+ UFS_LU_NO_WP = 0x00,
+ UFS_LU_POWER_ON_WP = 0x01,
+ UFS_LU_PERM_WP = 0x02,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
+ UFS_UPIU_QUERY_OPCODE_NOP = 0x0,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC = 0x1,
+ UFS_UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
+ UFS_UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
+ UFS_UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
+ UFS_UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
+ UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
+ UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
+};
+
+/* Query response result code */
+typedef enum QueryRespCode {
+ UFS_QUERY_RESULT_SUCCESS = 0x00,
+ UFS_QUERY_RESULT_NOT_READABLE = 0xF6,
+ UFS_QUERY_RESULT_NOT_WRITEABLE = 0xF7,
+ UFS_QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
+ UFS_QUERY_RESULT_INVALID_LENGTH = 0xF9,
+ UFS_QUERY_RESULT_INVALID_VALUE = 0xFA,
+ UFS_QUERY_RESULT_INVALID_SELECTOR = 0xFB,
+ UFS_QUERY_RESULT_INVALID_INDEX = 0xFC,
+ UFS_QUERY_RESULT_INVALID_IDN = 0xFD,
+ UFS_QUERY_RESULT_INVALID_OPCODE = 0xFE,
+ UFS_QUERY_RESULT_GENERAL_FAILURE = 0xFF,
+} QueryRespCode;
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+ UFS_UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
+ UFS_UPIU_COMMAND_SET_TYPE_UFS = 0x1,
+ UFS_UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
+};
+
+/* Task management service response */
+enum {
+ UFS_UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
+ UFS_UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+ UFS_UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
+ UFS_UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
+ UFS_UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
+};
+
+/* UFS device power modes */
+enum ufs_dev_pwr_mode {
+ UFS_ACTIVE_PWR_MODE = 1,
+ UFS_SLEEP_PWR_MODE = 2,
+ UFS_POWERDOWN_PWR_MODE = 3,
+ UFS_DEEPSLEEP_PWR_MODE = 4,
+};
+
+/*
+ * struct UtpCmdRsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+typedef struct QEMU_PACKED UtpCmdRsp {
+ uint32_t residual_transfer_count;
+ uint32_t reserved[4];
+ uint16_t sense_data_len;
+ uint8_t sense_data[UFS_SENSE_SIZE];
+} UtpCmdRsp;
+
+/*
+ * struct UtpUpiuRsp - general upiu response structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @sr: fields structure for scsi command DW-3 to DW-12
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+typedef struct QEMU_PACKED UtpUpiuRsp {
+ UtpUpiuHeader header;
+ union {
+ UtpCmdRsp sr;
+ UtpUpiuQuery qr;
+ };
+} UtpUpiuRsp;
+
+static inline void _ufs_check_size(void)
+{
+ QEMU_BUILD_BUG_ON(sizeof(UfsReg) != 0x104);
+ QEMU_BUILD_BUG_ON(sizeof(DeviceDescriptor) != 89);
+ QEMU_BUILD_BUG_ON(sizeof(GeometryDescriptor) != 87);
+ QEMU_BUILD_BUG_ON(sizeof(UnitDescriptor) != 45);
+ QEMU_BUILD_BUG_ON(sizeof(RpmbUnitDescriptor) != 35);
+ QEMU_BUILD_BUG_ON(sizeof(PowerParametersDescriptor) != 98);
+ QEMU_BUILD_BUG_ON(sizeof(InterconnectDescriptor) != 6);
+ QEMU_BUILD_BUG_ON(sizeof(StringDescriptor) != 254);
+ QEMU_BUILD_BUG_ON(sizeof(DeviceHealthDescriptor) != 45);
+ QEMU_BUILD_BUG_ON(sizeof(Flags) != 0x13);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuHeader) != 12);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuQuery) != 276);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuCmd) != 20);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuReq) != 288);
+ QEMU_BUILD_BUG_ON(sizeof(UfshcdSgEntry) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(RequestDescHeader) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(UtpTransferReqDesc) != 32);
+ QEMU_BUILD_BUG_ON(sizeof(UtpTaskReqDesc) != 80);
+ QEMU_BUILD_BUG_ON(sizeof(UtpCmdRsp) != 40);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuRsp) != 288);
+}
+#endif
diff --git a/include/block/write-threshold.h b/include/block/write-threshold.h
index 234d2193e0..63d1583887 100644
--- a/include/block/write-threshold.h
+++ b/include/block/write-threshold.h
@@ -9,11 +9,10 @@
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
* See the COPYING.LIB file in the top-level directory.
*/
+
#ifndef BLOCK_WRITE_THRESHOLD_H
#define BLOCK_WRITE_THRESHOLD_H
-#include "qemu-common.h"
-
/*
* bdrv_write_threshold_set:
*
@@ -35,27 +34,12 @@ void bdrv_write_threshold_set(BlockDriverState *bs, uint64_t threshold_bytes);
uint64_t bdrv_write_threshold_get(const BlockDriverState *bs);
/*
- * bdrv_write_threshold_is_set
- *
- * Tell if a write threshold is set for a given BDS.
- */
-bool bdrv_write_threshold_is_set(const BlockDriverState *bs);
-
-/*
- * bdrv_write_threshold_exceeded
- *
- * Return the extent of a write request that exceeded the threshold,
- * or zero if the request is below the threshold.
- * Return zero also if the threshold was not set.
- *
- * NOTE: here we assume the following holds for each request this code
- * deals with:
- *
- * assert((req->offset + req->bytes) <= UINT64_MAX)
+ * bdrv_write_threshold_check_write
*
- * Please not there is *not* an actual C assert().
+ * Check whether the specified request exceeds the write threshold.
+ * If so, send a corresponding event and disable write threshold checking.
*/
-uint64_t bdrv_write_threshold_exceeded(const BlockDriverState *bs,
- const BdrvTrackedRequest *req);
+void bdrv_write_threshold_check_write(BlockDriverState *bs, int64_t offset,
+ int64_t bytes);
#endif
diff --git a/include/chardev/char-fd.h b/include/chardev/char-fd.h
index e7c2b176f9..9de0e440de 100644
--- a/include/chardev/char-fd.h
+++ b/include/chardev/char-fd.h
@@ -26,17 +26,20 @@
#include "io/channel.h"
#include "chardev/char.h"
+#include "qom/object.h"
-typedef struct FDChardev {
+struct FDChardev {
Chardev parent;
QIOChannel *ioc_in, *ioc_out;
int max_size;
-} FDChardev;
+};
+typedef struct FDChardev FDChardev;
#define TYPE_CHARDEV_FD "chardev-fd"
-#define FD_CHARDEV(obj) OBJECT_CHECK(FDChardev, (obj), TYPE_CHARDEV_FD)
+DECLARE_INSTANCE_CHECKER(FDChardev, FD_CHARDEV,
+ TYPE_CHARDEV_FD)
void qemu_chr_open_fd(Chardev *chr, int fd_in, int fd_out);
int qmp_chardev_open_file_source(char *src, int flags, Error **errp);
diff --git a/include/chardev/char-fe.h b/include/chardev/char-fe.h
index 46c997d352..ecef182835 100644
--- a/include/chardev/char-fe.h
+++ b/include/chardev/char-fe.h
@@ -2,12 +2,17 @@
#define QEMU_CHAR_FE_H
#include "chardev/char.h"
+#include "qemu/main-loop.h"
-typedef void IOEventHandler(void *opaque, int event);
+typedef void IOEventHandler(void *opaque, QEMUChrEvent event);
typedef int BackendChangeHandler(void *opaque);
-/* This is the backend as seen by frontend, the actual backend is
- * Chardev */
+/**
+ * struct CharBackend - back end as seen by front end
+ * @fe_is_open: the front end is ready for IO
+ *
+ * The actual backend is Chardev
+ */
struct CharBackend {
Chardev *chr;
IOEventHandler *chr_event;
@@ -16,7 +21,7 @@ struct CharBackend {
BackendChangeHandler *chr_be_change;
void *opaque;
int tag;
- int fe_open;
+ bool fe_is_open;
};
/**
@@ -67,7 +72,7 @@ bool qemu_chr_fe_backend_connected(CharBackend *be);
bool qemu_chr_fe_backend_open(CharBackend *be);
/**
- * qemu_chr_fe_set_handlers:
+ * qemu_chr_fe_set_handlers_full:
* @b: a CharBackend
* @fd_can_read: callback to get the amount of data the frontend may
* receive
@@ -77,14 +82,30 @@ bool qemu_chr_fe_backend_open(CharBackend *be);
* is not supported and will not be attempted
* @opaque: an opaque pointer for the callbacks
* @context: a main loop context or NULL for the default
- * @set_open: whether to call qemu_chr_fe_set_open() implicitely when
+ * @set_open: whether to call qemu_chr_fe_set_open() implicitly when
* any of the handler is non-NULL
+ * @sync_state: whether to issue event callback with updated state
*
* Set the front end char handlers. The front end takes the focus if
* any of the handler is non-NULL.
*
* Without associated Chardev, nothing is changed.
*/
+void qemu_chr_fe_set_handlers_full(CharBackend *b,
+ IOCanReadHandler *fd_can_read,
+ IOReadHandler *fd_read,
+ IOEventHandler *fd_event,
+ BackendChangeHandler *be_change,
+ void *opaque,
+ GMainContext *context,
+ bool set_open,
+ bool sync_state);
+
+/**
+ * qemu_chr_fe_set_handlers:
+ *
+ * Version of qemu_chr_fe_set_handlers_full() with sync_state = true.
+ */
void qemu_chr_fe_set_handlers(CharBackend *b,
IOCanReadHandler *fd_can_read,
IOReadHandler *fd_read,
@@ -121,7 +142,7 @@ void qemu_chr_fe_disconnect(CharBackend *be);
/**
* qemu_chr_fe_wait_connected:
*
- * Wait for characted backend to be connected, return < 0 on error or
+ * Wait for character backend to be connected, return < 0 on error or
* if no associated Chardev.
*/
int qemu_chr_fe_wait_connected(CharBackend *be, Error **errp);
@@ -139,12 +160,13 @@ void qemu_chr_fe_set_echo(CharBackend *be, bool echo);
/**
* qemu_chr_fe_set_open:
+ * @be: a CharBackend
+ * @is_open: the front end open status
*
- * Set character frontend open status. This is an indication that the
- * front end is ready (or not) to begin doing I/O.
- * Without associated Chardev, do nothing.
+ * This is an indication that the front end is ready (or not) to begin
+ * doing I/O. Without associated Chardev, do nothing.
*/
-void qemu_chr_fe_set_open(CharBackend *be, int fe_open);
+void qemu_chr_fe_set_open(CharBackend *be, bool is_open);
/**
* qemu_chr_fe_printf:
@@ -155,7 +177,24 @@ void qemu_chr_fe_set_open(CharBackend *be, int fe_open);
* Chardev.
*/
void qemu_chr_fe_printf(CharBackend *be, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
+
+
+/**
+ * FEWatchFunc: a #GSourceFunc called when any conditions requested by
+ * qemu_chr_fe_add_watch() is satisfied.
+ * @do_not_use: depending on the underlying chardev, a GIOChannel or a
+ * QIOChannel. DO NOT USE!
+ * @cond: bitwise combination of conditions watched and satisfied
+ * before calling this callback.
+ * @data: user data passed at creation to qemu_chr_fe_add_watch(). Can
+ * be NULL.
+ *
+ * Returns: G_SOURCE_REMOVE if the GSource should be removed from the
+ * main loop, or G_SOURCE_CONTINUE to leave the GSource in
+ * the main loop.
+ */
+typedef gboolean (*FEWatchFunc)(void *do_not_use, GIOCondition condition, void *data);
/**
* qemu_chr_fe_add_watch:
@@ -168,10 +207,16 @@ void qemu_chr_fe_printf(CharBackend *be, const char *fmt, ...)
* is active; return the #GSource's tag. If it is disconnected,
* or without associated Chardev, return 0.
*
+ * Note that you are responsible to update the front-end sources if
+ * you are switching the main context with qemu_chr_fe_set_handlers().
+ *
+ * Warning: DO NOT use the first callback argument (it may be either
+ * a GIOChannel or a QIOChannel, depending on the underlying chardev)
+ *
* Returns: the source tag
*/
guint qemu_chr_fe_add_watch(CharBackend *be, GIOCondition cond,
- GIOFunc func, void *user_data);
+ FEWatchFunc func, void *user_data);
/**
* qemu_chr_fe_write:
diff --git a/include/chardev/char-io.h b/include/chardev/char-io.h
index 9638da5100..ac379ea70e 100644
--- a/include/chardev/char-io.h
+++ b/include/chardev/char-io.h
@@ -24,9 +24,9 @@
#ifndef CHAR_IO_H
#define CHAR_IO_H
-#include "qemu-common.h"
#include "io/channel.h"
#include "chardev/char.h"
+#include "qemu/main-loop.h"
/* Can only be used for read */
GSource *io_add_watch_poll(Chardev *chr,
diff --git a/include/chardev/char-mux.h b/include/chardev/char-mux.h
deleted file mode 100644
index 1e13187767..0000000000
--- a/include/chardev/char-mux.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * QEMU System Emulator
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#ifndef CHAR_MUX_H
-#define CHAR_MUX_H
-
-#include "chardev/char.h"
-#include "chardev/char-fe.h"
-
-#define MAX_MUX 4
-#define MUX_BUFFER_SIZE 32 /* Must be a power of 2. */
-#define MUX_BUFFER_MASK (MUX_BUFFER_SIZE - 1)
-typedef struct MuxChardev {
- Chardev parent;
- CharBackend *backends[MAX_MUX];
- CharBackend chr;
- int focus;
- int mux_cnt;
- int term_got_escape;
- int max_size;
- /* Intermediate input buffer catches escape sequences even if the
- currently active device is not accepting any input - but only until it
- is full as well. */
- unsigned char buffer[MAX_MUX][MUX_BUFFER_SIZE];
- int prod[MAX_MUX];
- int cons[MAX_MUX];
- int timestamps;
-
- /* Protected by the Chardev chr_write_lock. */
- int linestart;
- int64_t timestamps_start;
-} MuxChardev;
-
-#define MUX_CHARDEV(obj) OBJECT_CHECK(MuxChardev, (obj), TYPE_CHARDEV_MUX)
-#define CHARDEV_IS_MUX(chr) \
- object_dynamic_cast(OBJECT(chr), TYPE_CHARDEV_MUX)
-
-void mux_chr_set_handlers(Chardev *chr, GMainContext *context);
-void mux_set_focus(Chardev *chr, int focus);
-void mux_chr_send_all_event(Chardev *chr, int event);
-
-#endif /* CHAR_MUX_H */
diff --git a/include/chardev/char-socket.h b/include/chardev/char-socket.h
new file mode 100644
index 0000000000..0708ca6fa9
--- /dev/null
+++ b/include/chardev/char-socket.h
@@ -0,0 +1,87 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef CHAR_SOCKET_H
+#define CHAR_SOCKET_H
+
+#include "io/channel-socket.h"
+#include "io/channel-tls.h"
+#include "io/net-listener.h"
+#include "chardev/char.h"
+#include "qom/object.h"
+
+#define TCP_MAX_FDS 16
+
+typedef struct {
+ char buf[21];
+ size_t buflen;
+} TCPChardevTelnetInit;
+
+typedef enum {
+ TCP_CHARDEV_STATE_DISCONNECTED,
+ TCP_CHARDEV_STATE_CONNECTING,
+ TCP_CHARDEV_STATE_CONNECTED,
+} TCPChardevState;
+
+typedef ChardevClass SocketChardevClass;
+
+struct SocketChardev {
+ Chardev parent;
+ QIOChannel *ioc; /* Client I/O channel */
+ QIOChannelSocket *sioc; /* Client master channel */
+ QIONetListener *listener;
+ GSource *hup_source;
+ QCryptoTLSCreds *tls_creds;
+ char *tls_authz;
+ TCPChardevState state;
+ int max_size;
+ int do_telnetopt;
+ int do_nodelay;
+ int *read_msgfds;
+ size_t read_msgfds_num;
+ int *write_msgfds;
+ size_t write_msgfds_num;
+ bool registered_yank;
+
+ SocketAddress *addr;
+ bool is_listen;
+ bool is_telnet;
+ bool is_tn3270;
+ GSource *telnet_source;
+ TCPChardevTelnetInit *telnet_init;
+
+ bool is_websock;
+
+ GSource *reconnect_timer;
+ int64_t reconnect_time;
+ bool connect_err_reported;
+
+ QIOTask *connect_task;
+};
+typedef struct SocketChardev SocketChardev;
+
+DECLARE_INSTANCE_CHECKER(SocketChardev, SOCKET_CHARDEV,
+ TYPE_CHARDEV_SOCKET)
+
+#endif /* CHAR_SOCKET_H */
diff --git a/include/chardev/char-win.h b/include/chardev/char-win.h
index fa59e9e423..485521469c 100644
--- a/include/chardev/char-win.h
+++ b/include/chardev/char-win.h
@@ -25,8 +25,9 @@
#define CHAR_WIN_H
#include "chardev/char.h"
+#include "qom/object.h"
-typedef struct {
+struct WinChardev {
Chardev parent;
bool keep_open; /* console do not close file */
@@ -36,13 +37,15 @@ typedef struct {
/* Protected by the Chardev chr_write_lock. */
OVERLAPPED osend;
-} WinChardev;
+};
+typedef struct WinChardev WinChardev;
#define NSENDBUF 2048
#define NRECVBUF 2048
#define TYPE_CHARDEV_WIN "chardev-win"
-#define WIN_CHARDEV(obj) OBJECT_CHECK(WinChardev, (obj), TYPE_CHARDEV_WIN)
+DECLARE_INSTANCE_CHECKER(WinChardev, WIN_CHARDEV,
+ TYPE_CHARDEV_WIN)
void win_chr_set_file(Chardev *chr, HANDLE file, bool keep_open);
int win_chr_serial_init(Chardev *chr, const char *filename, Error **errp);
diff --git a/include/chardev/char.h b/include/chardev/char.h
index 014566c3de..01df55f9e8 100644
--- a/include/chardev/char.h
+++ b/include/chardev/char.h
@@ -2,8 +2,8 @@
#define QEMU_CHAR_H
#include "qapi/qapi-types-char.h"
-#include "qemu/main-loop.h"
#include "qemu/bitmap.h"
+#include "qemu/thread.h"
#include "qom/object.h"
#define IAC_EOR 239
@@ -65,6 +65,8 @@ struct Chardev {
char *filename;
int logfd;
int be_open;
+ /* used to coordinate the chardev-change special-case: */
+ bool handover_yank_instance;
GSource *gsource;
GMainContext *gcontext;
DECLARE_BITMAP(features, QEMU_CHAR_FEATURE_LAST);
@@ -73,6 +75,7 @@ struct Chardev {
/**
* qemu_chr_new_from_opts:
* @opts: see qemu-config.c for a list of valid options
+ * @context: the #GMainContext to be used at initialization time
*
* Create a new character backend from a QemuOpts list.
*
@@ -81,6 +84,7 @@ struct Chardev {
* or left untouched in case of help option
*/
Chardev *qemu_chr_new_from_opts(QemuOpts *opts,
+ GMainContext *context,
Error **errp);
/**
@@ -106,25 +110,29 @@ ChardevBackend *qemu_chr_parse_opts(QemuOpts *opts,
* qemu_chr_new:
* @label: the name of the backend
* @filename: the URI
+ * @context: the #GMainContext to be used at initialization time
*
* Create a new character backend from a URI.
* Do not implicitly initialize a monitor if the chardev is muxed.
*
* Returns: a new character backend
*/
-Chardev *qemu_chr_new(const char *label, const char *filename);
+Chardev *qemu_chr_new(const char *label, const char *filename,
+ GMainContext *context);
/**
* qemu_chr_new_mux_mon:
* @label: the name of the backend
* @filename: the URI
+ * @context: the #GMainContext to be used at initialization time
*
* Create a new character backend from a URI.
* Implicitly initialize a monitor if the chardev is muxed.
*
* Returns: a new character backend
*/
-Chardev *qemu_chr_new_mux_mon(const char *label, const char *filename);
+Chardev *qemu_chr_new_mux_mon(const char *label, const char *filename,
+ GMainContext *context);
/**
* qemu_chr_change:
@@ -146,6 +154,7 @@ void qemu_chr_cleanup(void);
* @label: the name of the backend
* @filename: the URI
* @permit_mux_mon: if chardev is muxed, initialize a monitor
+ * @context: the #GMainContext to be used at initialization time
*
* Create a new character backend from a URI.
* Character device communications are not written
@@ -154,7 +163,7 @@ void qemu_chr_cleanup(void);
* Returns: a new character backend
*/
Chardev *qemu_chr_new_noreplay(const char *label, const char *filename,
- bool permit_mux_mon);
+ bool permit_mux_mon, GMainContext *context);
/**
* qemu_chr_be_can_write:
@@ -177,7 +186,7 @@ int qemu_chr_be_can_write(Chardev *s);
* the caller should call @qemu_chr_be_can_write to determine how much data
* the front end can currently accept.
*/
-void qemu_chr_be_write(Chardev *s, uint8_t *buf, int len);
+void qemu_chr_be_write(Chardev *s, const uint8_t *buf, int len);
/**
* qemu_chr_be_write_impl:
@@ -186,7 +195,7 @@ void qemu_chr_be_write(Chardev *s, uint8_t *buf, int len);
*
* Implementation of back end writing. Used by replay module.
*/
-void qemu_chr_be_write_impl(Chardev *s, uint8_t *buf, int len);
+void qemu_chr_be_write_impl(Chardev *s, const uint8_t *buf, int len);
/**
* qemu_chr_be_update_read_handlers:
@@ -203,7 +212,7 @@ void qemu_chr_be_update_read_handlers(Chardev *s,
*
* Send an event from the back end to the front end.
*/
-void qemu_chr_be_event(Chardev *s, int event);
+void qemu_chr_be_event(Chardev *s, QEMUChrEvent event);
int qemu_chr_add_client(Chardev *s, int fd);
Chardev *qemu_chr_find(const char *name);
@@ -219,11 +228,7 @@ int qemu_chr_write(Chardev *s, const uint8_t *buf, int len, bool write_all);
int qemu_chr_wait_connected(Chardev *chr, Error **errp);
#define TYPE_CHARDEV "chardev"
-#define CHARDEV(obj) OBJECT_CHECK(Chardev, (obj), TYPE_CHARDEV)
-#define CHARDEV_CLASS(klass) \
- OBJECT_CLASS_CHECK(ChardevClass, (klass), TYPE_CHARDEV)
-#define CHARDEV_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ChardevClass, (obj), TYPE_CHARDEV)
+OBJECT_DECLARE_TYPE(Chardev, ChardevClass, CHARDEV)
#define TYPE_CHARDEV_NULL "chardev-null"
#define TYPE_CHARDEV_MUX "chardev-mux"
@@ -244,42 +249,75 @@ int qemu_chr_wait_connected(Chardev *chr, Error **errp);
#define CHARDEV_IS_PTY(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_CHARDEV_PTY)
-typedef struct ChardevClass {
+struct ChardevClass {
ObjectClass parent_class;
bool internal; /* TODO: eventually use TYPE_USER_CREATABLE */
+ bool supports_yank;
+
+ /* parse command line options and populate QAPI @backend */
void (*parse)(QemuOpts *opts, ChardevBackend *backend, Error **errp);
+ /* called after construction, open/starts the backend */
void (*open)(Chardev *chr, ChardevBackend *backend,
bool *be_opened, Error **errp);
+ /* write buf to the backend */
int (*chr_write)(Chardev *s, const uint8_t *buf, int len);
+
+ /*
+ * Read from the backend (blocking). A typical front-end will instead rely
+ * on chr_can_read/chr_read being called when polling/looping.
+ */
int (*chr_sync_read)(Chardev *s, const uint8_t *buf, int len);
+
+ /* create a watch on the backend */
GSource *(*chr_add_watch)(Chardev *s, GIOCondition cond);
+
+ /* update the backend internal sources */
void (*chr_update_read_handler)(Chardev *s);
+
+ /* send an ioctl to the backend */
int (*chr_ioctl)(Chardev *s, int cmd, void *arg);
+
+ /* get ancillary-received fds during last read */
int (*get_msgfds)(Chardev *s, int* fds, int num);
+
+ /* set ancillary fds to be sent with next write */
int (*set_msgfds)(Chardev *s, int *fds, int num);
+
+ /* accept the given fd */
int (*chr_add_client)(Chardev *chr, int fd);
+
+ /* wait for a connection */
int (*chr_wait_connected)(Chardev *chr, Error **errp);
+
+ /* disconnect a connection */
void (*chr_disconnect)(Chardev *chr);
+
+ /* called by frontend when it can read */
void (*chr_accept_input)(Chardev *chr);
+
+ /* set terminal echo */
void (*chr_set_echo)(Chardev *chr, bool echo);
+
+ /* notify the backend of frontend open state */
void (*chr_set_fe_open)(Chardev *chr, int fe_open);
- void (*chr_be_event)(Chardev *s, int event);
- /* Return 0 if succeeded, 1 if failed */
- int (*chr_machine_done)(Chardev *chr);
-} ChardevClass;
+
+ /* handle various events */
+ void (*chr_be_event)(Chardev *s, QEMUChrEvent event);
+};
Chardev *qemu_chardev_new(const char *id, const char *typename,
- ChardevBackend *backend, Error **errp);
+ ChardevBackend *backend, GMainContext *context,
+ Error **errp);
extern int term_escape_char;
GSource *qemu_chr_timeout_add_ms(Chardev *chr, guint ms,
GSourceFunc func, void *private);
-/* console.c */
-void qemu_chr_parse_vc(QemuOpts *opts, ChardevBackend *backend, Error **errp);
+void suspend_mux_open(void);
+void resume_mux_open(void);
#endif
diff --git a/include/chardev/spice.h b/include/chardev/spice.h
new file mode 100644
index 0000000000..58e5b727e9
--- /dev/null
+++ b/include/chardev/spice.h
@@ -0,0 +1,26 @@
+#ifndef CHARDEV_SPICE_H
+#define CHARDEV_SPICE_H
+
+#include <spice.h>
+#include "chardev/char-fe.h"
+#include "qom/object.h"
+
+struct SpiceChardev {
+ Chardev parent;
+
+ SpiceCharDeviceInstance sin;
+ bool active;
+ bool blocked;
+ const uint8_t *datapos;
+ int datalen;
+};
+typedef struct SpiceChardev SpiceChardev;
+
+#define TYPE_CHARDEV_SPICE "chardev-spice"
+#define TYPE_CHARDEV_SPICEVMC "chardev-spicevmc"
+#define TYPE_CHARDEV_SPICEPORT "chardev-spiceport"
+
+DECLARE_INSTANCE_CHECKER(SpiceChardev, SPICE_CHARDEV,
+ TYPE_CHARDEV_SPICE)
+
+#endif
diff --git a/include/crypto/aes-round.h b/include/crypto/aes-round.h
new file mode 100644
index 0000000000..854fb0966a
--- /dev/null
+++ b/include/crypto/aes-round.h
@@ -0,0 +1,164 @@
+/*
+ * AES round fragments, generic version
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef CRYPTO_AES_ROUND_H
+#define CRYPTO_AES_ROUND_H
+
+/* Hosts with acceleration will usually need a 16-byte vector type. */
+typedef uint8_t AESStateVec __attribute__((vector_size(16)));
+
+typedef union {
+ uint8_t b[16];
+ uint32_t w[4];
+ uint64_t d[2];
+ AESStateVec v;
+} AESState;
+
+#include "host/crypto/aes-round.h"
+
+/*
+ * Perform MixColumns.
+ */
+
+void aesenc_MC_gen(AESState *ret, const AESState *st);
+void aesenc_MC_genrev(AESState *ret, const AESState *st);
+
+static inline void aesenc_MC(AESState *r, const AESState *st, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesenc_MC_accel(r, st, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesenc_MC_gen(r, st);
+ } else {
+ aesenc_MC_genrev(r, st);
+ }
+}
+
+/*
+ * Perform SubBytes + ShiftRows + AddRoundKey.
+ */
+
+void aesenc_SB_SR_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesenc_SB_SR_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesenc_SB_SR_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesenc_SB_SR_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesenc_SB_SR_AK_gen(r, st, rk);
+ } else {
+ aesenc_SB_SR_AK_genrev(r, st, rk);
+ }
+}
+
+/*
+ * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey.
+ */
+
+void aesenc_SB_SR_MC_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesenc_SB_SR_MC_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesenc_SB_SR_MC_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesenc_SB_SR_MC_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesenc_SB_SR_MC_AK_gen(r, st, rk);
+ } else {
+ aesenc_SB_SR_MC_AK_genrev(r, st, rk);
+ }
+}
+
+/*
+ * Perform InvMixColumns.
+ */
+
+void aesdec_IMC_gen(AESState *ret, const AESState *st);
+void aesdec_IMC_genrev(AESState *ret, const AESState *st);
+
+static inline void aesdec_IMC(AESState *r, const AESState *st, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_IMC_accel(r, st, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_IMC_gen(r, st);
+ } else {
+ aesdec_IMC_genrev(r, st);
+ }
+}
+
+/*
+ * Perform InvSubBytes + InvShiftRows + AddRoundKey.
+ */
+
+void aesdec_ISB_ISR_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesdec_ISB_ISR_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesdec_ISB_ISR_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_ISB_ISR_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_ISB_ISR_AK_gen(r, st, rk);
+ } else {
+ aesdec_ISB_ISR_AK_genrev(r, st, rk);
+ }
+}
+
+/*
+ * Perform InvSubBytes + InvShiftRows + AddRoundKey + InvMixColumns.
+ */
+
+void aesdec_ISB_ISR_AK_IMC_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesdec_ISB_ISR_AK_IMC_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesdec_ISB_ISR_AK_IMC(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_ISB_ISR_AK_IMC_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_ISB_ISR_AK_IMC_gen(r, st, rk);
+ } else {
+ aesdec_ISB_ISR_AK_IMC_genrev(r, st, rk);
+ }
+}
+
+/*
+ * Perform InvSubBytes + InvShiftRows + InvMixColumns + AddRoundKey.
+ */
+
+void aesdec_ISB_ISR_IMC_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesdec_ISB_ISR_IMC_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesdec_ISB_ISR_IMC_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_ISB_ISR_IMC_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_ISB_ISR_IMC_AK_gen(r, st, rk);
+ } else {
+ aesdec_ISB_ISR_IMC_AK_genrev(r, st, rk);
+ }
+}
+
+#endif /* CRYPTO_AES_ROUND_H */
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 12fb321b89..381f24c902 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -16,52 +16,25 @@ typedef struct aes_key_st AES_KEY;
#define AES_set_decrypt_key QEMU_AES_set_decrypt_key
#define AES_encrypt QEMU_AES_encrypt
#define AES_decrypt QEMU_AES_decrypt
-#define AES_cbc_encrypt QEMU_AES_cbc_encrypt
int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
- AES_KEY *key);
+ AES_KEY *key);
int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
- AES_KEY *key);
+ AES_KEY *key);
void AES_encrypt(const unsigned char *in, unsigned char *out,
- const AES_KEY *key);
+ const AES_KEY *key);
void AES_decrypt(const unsigned char *in, unsigned char *out,
- const AES_KEY *key);
-void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
- const unsigned long length, const AES_KEY *key,
- unsigned char *ivec, const int enc);
+ const AES_KEY *key);
extern const uint8_t AES_sbox[256];
extern const uint8_t AES_isbox[256];
-/* AES ShiftRows and InvShiftRows */
-extern const uint8_t AES_shifts[16];
-extern const uint8_t AES_ishifts[16];
-
-/* AES InvMixColumns */
-/* AES_imc[x][0] = [x].[0e, 09, 0d, 0b]; */
-/* AES_imc[x][1] = [x].[0b, 0e, 09, 0d]; */
-/* AES_imc[x][2] = [x].[0d, 0b, 0e, 09]; */
-/* AES_imc[x][3] = [x].[09, 0d, 0b, 0e]; */
-extern const uint32_t AES_imc[256][4];
-
/*
AES_Te0[x] = S [x].[02, 01, 01, 03];
-AES_Te1[x] = S [x].[03, 02, 01, 01];
-AES_Te2[x] = S [x].[01, 03, 02, 01];
-AES_Te3[x] = S [x].[01, 01, 03, 02];
-AES_Te4[x] = S [x].[01, 01, 01, 01];
-
AES_Td0[x] = Si[x].[0e, 09, 0d, 0b];
-AES_Td1[x] = Si[x].[0b, 0e, 09, 0d];
-AES_Td2[x] = Si[x].[0d, 0b, 0e, 09];
-AES_Td3[x] = Si[x].[09, 0d, 0b, 0e];
-AES_Td4[x] = Si[x].[01, 01, 01, 01];
*/
-extern const uint32_t AES_Te0[256], AES_Te1[256], AES_Te2[256],
- AES_Te3[256], AES_Te4[256];
-extern const uint32_t AES_Td0[256], AES_Td1[256], AES_Td2[256],
- AES_Td3[256], AES_Td4[256];
+extern const uint32_t AES_Te0[256], AES_Td0[256];
#endif
diff --git a/include/crypto/afsplit.h b/include/crypto/afsplit.h
index 7dd21f0a67..4894d64330 100644
--- a/include/crypto/afsplit.h
+++ b/include/crypto/afsplit.h
@@ -3,19 +3,18 @@
*
* Copyright (c) 2015-2016 Red Hat, Inc.
*
- * This library is free software; you can redistribute it and/or
+ * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ * General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef QCRYPTO_AFSPLIT_H
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
new file mode 100644
index 0000000000..8756105f22
--- /dev/null
+++ b/include/crypto/akcipher.h
@@ -0,0 +1,179 @@
+/*
+ * QEMU Crypto asymmetric algorithms
+ *
+ * Copyright (c) 2022 Bytedance
+ * Author: zhenwei pi <pizhenwei@bytedance.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QCRYPTO_AKCIPHER_H
+#define QCRYPTO_AKCIPHER_H
+
+#include "qapi/qapi-types-crypto.h"
+
+typedef struct QCryptoAkCipher QCryptoAkCipher;
+
+/**
+ * qcrypto_akcipher_supports:
+ * @opts: the asymmetric key algorithm and related options
+ *
+ * Determine if asymmetric key cipher described with @opts is
+ * supported by the current configured build
+ *
+ * Returns: true if it is supported, false otherwise.
+ */
+bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts);
+
+/**
+ * qcrypto_akcipher_new:
+ * @opts: specify the algorithm and the related arguments
+ * @type: private or public key type
+ * @key: buffer to store the key
+ * @key_len: the length of key buffer
+ * @errp: error pointer
+ *
+ * Create akcipher context
+ *
+ * Returns: On success, a new QCryptoAkCipher initialized with @opt
+ * is created and returned, otherwise NULL is returned.
+ */
+
+QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts,
+ QCryptoAkCipherKeyType type,
+ const uint8_t *key, size_t key_len,
+ Error **errp);
+
+/**
+ * qcrypto_akcipher_encrypt:
+ * @akcipher: akcipher context
+ * @in: plaintext pending to be encrypted
+ * @in_len: length of plaintext, less or equal to the size reported
+ * by a call to qcrypto_akcipher_max_plaintext_len()
+ * @out: buffer to store the ciphertext
+ * @out_len: length of ciphertext, less or equal to the size reported
+ * by a call to qcrypto_akcipher_max_ciphertext_len()
+ * @errp: error pointer
+ *
+ * Encrypt @in and write ciphertext into @out
+ *
+ * Returns: length of ciphertext if encrypt succeed,
+ * otherwise -1 is returned
+ */
+int qcrypto_akcipher_encrypt(QCryptoAkCipher *akcipher,
+ const void *in, size_t in_len,
+ void *out, size_t out_len, Error **errp);
+
+/**
+ * qcrypto_akcipher_decrypt:
+ * @akcipher: akcipher context
+ * @in: ciphertext to be decrypted
+ * @in_len: the length of ciphertext, less or equal to the size reported
+ * by a call to qcrypto_akcipher_max_ciphertext_len()
+ * @out: buffer to store the plaintext
+ * @out_len: length of the plaintext buffer, less or equal to the size
+ * reported by a call to qcrypto_akcipher_max_plaintext_len()
+ * @errp: error pointer
+ *
+ * Decrypt @in and write plaintext into @out
+ *
+ * Returns: length of plaintext if decrypt succeed,
+ * otherwise -1 is returned
+ */
+int qcrypto_akcipher_decrypt(QCryptoAkCipher *akcipher,
+ const void *in, size_t in_len,
+ void *out, size_t out_len, Error **errp);
+
+/**
+ * qcrypto_akcipher_sign:
+ * @akcipher: akcipher context
+ * @in: data to be signed
+ * @in_len: the length of data, less or equal to the size reported
+ * by a call to qcrypto_akcipher_max_dgst_len()
+ * @out: buffer to store the signature
+ * @out_len: length of the signature buffer, less or equal to the size
+ * by a call to qcrypto_akcipher_max_signature_len()
+ * @errp: error pointer
+ *
+ * Generate signature for @in, write into @out
+ *
+ * Returns: length of signature if succeed,
+ * otherwise -1 is returned
+ */
+int qcrypto_akcipher_sign(QCryptoAkCipher *akcipher,
+ const void *in, size_t in_len,
+ void *out, size_t out_len, Error **errp);
+
+/**
+ * qcrypto_akcipher_verify:
+ * @akcipher: akcipher context
+ * @in: pointer to the signature
+ * @in_len: length of signature, ess or equal to the size reported
+ * by a call to qcrypto_akcipher_max_signature_len()
+ * @in2: pointer to original data
+ * @in2_len: the length of original data, less or equal to the size
+ * by a call to qcrypto_akcipher_max_dgst_len()
+ * @errp: error pointer
+ *
+ * Verify @in and @in2 match or not
+ *
+ * Returns: 0 for succeed,
+ * otherwise -1 is returned
+ */
+int qcrypto_akcipher_verify(QCryptoAkCipher *akcipher,
+ const void *in, size_t in_len,
+ const void *in2, size_t in2_len, Error **errp);
+
+int qcrypto_akcipher_max_plaintext_len(QCryptoAkCipher *akcipher);
+
+int qcrypto_akcipher_max_ciphertext_len(QCryptoAkCipher *akcipher);
+
+int qcrypto_akcipher_max_signature_len(QCryptoAkCipher *akcipher);
+
+int qcrypto_akcipher_max_dgst_len(QCryptoAkCipher *akcipher);
+
+/**
+ * qcrypto_akcipher_free:
+ * @akcipher: akcipher context
+ *
+ * Free the akcipher context
+ *
+ */
+void qcrypto_akcipher_free(QCryptoAkCipher *akcipher);
+
+/**
+ * qcrypto_akcipher_export_p8info:
+ * @opts: the options of the akcipher to be exported.
+ * @key: the original key of the akcipher to be exported.
+ * @keylen: length of the 'key'
+ * @dst: output parameter, if export succeed, *dst is set to the
+ * PKCS#8 encoded private key, caller MUST free this key with
+ * g_free after use.
+ * @dst_len: output parameter, indicates the length of PKCS#8 encoded
+ * key.
+ *
+ * Export the akcipher into DER encoded pkcs#8 private key info, expects
+ * |key| stores a valid asymmetric PRIVATE key.
+ *
+ * Returns: 0 for succeed, otherwise -1 is returned.
+ */
+int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts,
+ uint8_t *key, size_t keylen,
+ uint8_t **dst, size_t *dst_len,
+ Error **errp);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoAkCipher, qcrypto_akcipher_free)
+
+#endif /* QCRYPTO_AKCIPHER_H */
diff --git a/include/crypto/block.h b/include/crypto/block.h
index e729d5bd66..92e823c9f2 100644
--- a/include/crypto/block.h
+++ b/include/crypto/block.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -29,24 +29,24 @@ typedef struct QCryptoBlock QCryptoBlock;
/* See also QCryptoBlockFormat, QCryptoBlockCreateOptions
* and QCryptoBlockOpenOptions in qapi/crypto.json */
-typedef ssize_t (*QCryptoBlockReadFunc)(QCryptoBlock *block,
- size_t offset,
- uint8_t *buf,
- size_t buflen,
- void *opaque,
- Error **errp);
+typedef int (*QCryptoBlockReadFunc)(QCryptoBlock *block,
+ size_t offset,
+ uint8_t *buf,
+ size_t buflen,
+ void *opaque,
+ Error **errp);
-typedef ssize_t (*QCryptoBlockInitFunc)(QCryptoBlock *block,
- size_t headerlen,
- void *opaque,
- Error **errp);
+typedef int (*QCryptoBlockInitFunc)(QCryptoBlock *block,
+ size_t headerlen,
+ void *opaque,
+ Error **errp);
-typedef ssize_t (*QCryptoBlockWriteFunc)(QCryptoBlock *block,
- size_t offset,
- const uint8_t *buf,
- size_t buflen,
- void *opaque,
- Error **errp);
+typedef int (*QCryptoBlockWriteFunc)(QCryptoBlock *block,
+ size_t offset,
+ const uint8_t *buf,
+ size_t buflen,
+ void *opaque,
+ Error **errp);
/**
* qcrypto_block_has_format:
@@ -66,6 +66,7 @@ bool qcrypto_block_has_format(QCryptoBlockFormat format,
typedef enum {
QCRYPTO_BLOCK_OPEN_NO_IO = (1 << 0),
+ QCRYPTO_BLOCK_OPEN_DETACHED = (1 << 1),
} QCryptoBlockOpenFlags;
/**
@@ -95,6 +96,10 @@ typedef enum {
* metadata such as the payload offset. There will be
* no cipher or ivgen objects available.
*
+ * If @flags contains QCRYPTO_BLOCK_OPEN_DETACHED then
+ * the open process will be optimized to skip the LUKS
+ * payload overlap check.
+ *
* If any part of initializing the encryption context
* fails an error will be returned. This could be due
* to the volume being in the wrong format, a cipher
@@ -111,6 +116,10 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options,
size_t n_threads,
Error **errp);
+typedef enum {
+ QCRYPTO_BLOCK_CREATE_DETACHED = (1 << 0),
+} QCryptoBlockCreateFlags;
+
/**
* qcrypto_block_create:
* @options: the encryption options
@@ -118,6 +127,7 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options,
* @initfunc: callback for initializing volume header
* @writefunc: callback for writing data to the volume header
* @opaque: data to pass to @initfunc and @writefunc
+ * @flags: bitmask of QCryptoBlockCreateFlags values
* @errp: pointer to a NULL-initialized error object
*
* Create a new block encryption object for initializing
@@ -129,6 +139,11 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options,
* generating new master keys, etc as required. Any existing
* data present on the volume will be irrevocably destroyed.
*
+ * If @flags contains QCRYPTO_BLOCK_CREATE_DETACHED then
+ * the open process will set the payload_offset_sector to 0
+ * to specify the starting point for the read/write of a
+ * detached LUKS header image.
+ *
* If any part of initializing the encryption context
* fails an error will be returned. This could be due
* to the volume being in the wrong format, a cipher
@@ -142,8 +157,51 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options,
QCryptoBlockInitFunc initfunc,
QCryptoBlockWriteFunc writefunc,
void *opaque,
+ unsigned int flags,
Error **errp);
+/**
+ * qcrypto_block_amend_options:
+ * @block: the block encryption object
+ *
+ * @readfunc: callback for reading data from the volume header
+ * @writefunc: callback for writing data to the volume header
+ * @opaque: data to pass to @readfunc and @writefunc
+ * @options: the new/amended encryption options
+ * @force: hint for the driver to allow unsafe operation
+ * @errp: error pointer
+ *
+ * Changes the crypto options of the encryption format
+ *
+ */
+int qcrypto_block_amend_options(QCryptoBlock *block,
+ QCryptoBlockReadFunc readfunc,
+ QCryptoBlockWriteFunc writefunc,
+ void *opaque,
+ QCryptoBlockAmendOptions *options,
+ bool force,
+ Error **errp);
+
+
+/**
+ * qcrypto_block_calculate_payload_offset:
+ * @create_opts: the encryption options
+ * @optprefix: name prefix for options
+ * @len: output for number of header bytes before payload
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Calculate the number of header bytes before the payload in an encrypted
+ * storage volume. The header is an area before the payload that is reserved
+ * for encryption metadata.
+ *
+ * Returns: true on success, false on error
+ */
+bool
+qcrypto_block_calculate_payload_offset(QCryptoBlockCreateOptions *create_opts,
+ const char *optprefix,
+ size_t *len,
+ Error **errp);
+
/**
* qcrypto_block_get_info:
@@ -268,4 +326,6 @@ uint64_t qcrypto_block_get_sector_size(QCryptoBlock *block);
*/
void qcrypto_block_free(QCryptoBlock *block);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlock, qcrypto_block_free)
+
#endif /* QCRYPTO_BLOCK_H */
diff --git a/include/crypto/cipher.h b/include/crypto/cipher.h
index bce2d4c8e4..083e12a7d9 100644
--- a/include/crypto/cipher.h
+++ b/include/crypto/cipher.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,6 +24,7 @@
#include "qapi/qapi-types-crypto.h"
typedef struct QCryptoCipher QCryptoCipher;
+typedef struct QCryptoCipherDriver QCryptoCipherDriver;
/* See also "QCryptoCipherAlgorithm" and "QCryptoCipherMode"
* enums defined in qapi/crypto.json */
@@ -79,8 +80,7 @@ typedef struct QCryptoCipher QCryptoCipher;
struct QCryptoCipher {
QCryptoCipherAlgorithm alg;
QCryptoCipherMode mode;
- void *opaque;
- void *driver;
+ const QCryptoCipherDriver *driver;
};
/**
@@ -170,6 +170,8 @@ QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
*/
void qcrypto_cipher_free(QCryptoCipher *cipher);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoCipher, qcrypto_cipher_free)
+
/**
* qcrypto_cipher_encrypt:
* @cipher: the cipher object
diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
new file mode 100644
index 0000000000..446931fe05
--- /dev/null
+++ b/include/crypto/clmul.h
@@ -0,0 +1,83 @@
+/*
+ * Carry-less multiply operations.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef CRYPTO_CLMUL_H
+#define CRYPTO_CLMUL_H
+
+#include "qemu/int128.h"
+#include "host/crypto/clmul.h"
+
+/**
+ * clmul_8x8_low:
+ *
+ * Perform eight 8x8->8 carry-less multiplies.
+ */
+uint64_t clmul_8x8_low(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_even:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ * The odd bytes of the inputs are ignored.
+ */
+uint64_t clmul_8x4_even(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_odd:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ * The even bytes of the inputs are ignored.
+ */
+uint64_t clmul_8x4_odd(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_packed:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ */
+uint64_t clmul_8x4_packed(uint32_t, uint32_t);
+
+/**
+ * clmul_16x2_even:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The odd words of the inputs are ignored.
+ */
+uint64_t clmul_16x2_even(uint64_t, uint64_t);
+
+/**
+ * clmul_16x2_odd:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The even words of the inputs are ignored.
+ */
+uint64_t clmul_16x2_odd(uint64_t, uint64_t);
+
+/**
+ * clmul_32:
+ *
+ * Perform a 32x32->64 carry-less multiply.
+ */
+uint64_t clmul_32(uint32_t, uint32_t);
+
+/**
+ * clmul_64:
+ *
+ * Perform a 64x64->128 carry-less multiply.
+ */
+Int128 clmul_64_gen(uint64_t, uint64_t);
+
+static inline Int128 clmul_64(uint64_t a, uint64_t b)
+{
+ if (HAVE_CLMUL_ACCEL) {
+ return clmul_64_accel(a, b);
+ } else {
+ return clmul_64_gen(a, b);
+ }
+}
+
+#endif /* CRYPTO_CLMUL_H */
diff --git a/include/crypto/desrfb.h b/include/crypto/desrfb.h
index 7ca596c387..af8d12234d 100644
--- a/include/crypto/desrfb.h
+++ b/include/crypto/desrfb.h
@@ -15,30 +15,30 @@
/* d3des.h -
*
- * Headers and defines for d3des.c
- * Graven Imagery, 1992.
+ * Headers and defines for d3des.c
+ * Graven Imagery, 1992.
*
* Copyright (c) 1988,1989,1990,1991,1992 by Richard Outerbridge
- * (GEnie : OUTER; CIS : [71755,204])
+ * (GEnie : OUTER; CIS : [71755,204])
*/
-#define EN0 0 /* MODE == encrypt */
-#define DE1 1 /* MODE == decrypt */
+#define EN0 0 /* MODE == encrypt */
+#define DE1 1 /* MODE == decrypt */
void deskey(unsigned char *, int);
-/* hexkey[8] MODE
+/* hexkey[8] MODE
* Sets the internal key register according to the hexadecimal
* key contained in the 8 bytes of hexkey, according to the DES,
* for encryption or decryption according to MODE.
*/
void usekey(unsigned long *);
-/* cookedkey[32]
+/* cookedkey[32]
* Loads the internal key register with the data in cookedkey.
*/
void des(unsigned char *, unsigned char *);
-/* from[8] to[8]
+/* from[8] to[8]
* Encrypts/Decrypts (according to the key currently loaded in the
* internal key register) one block of eight bytes at address 'from'
* into the block at address 'to'. They can be the same.
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 077ac7bea0..54d87aa2a1 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h
index aa3c97a2ff..ad4d778416 100644
--- a/include/crypto/hmac.h
+++ b/include/crypto/hmac.h
@@ -65,6 +65,8 @@ QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg,
*/
void qcrypto_hmac_free(QCryptoHmac *hmac);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoHmac, qcrypto_hmac_free)
+
/**
* qcrypto_hmac_bytesv:
* @hmac: the hmac object
diff --git a/include/crypto/init.h b/include/crypto/init.h
index f79c02266b..00e0f637ce 100644
--- a/include/crypto/init.h
+++ b/include/crypto/init.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/crypto/ivgen.h b/include/crypto/ivgen.h
index 0350cd2a93..a09d5732da 100644
--- a/include/crypto/ivgen.h
+++ b/include/crypto/ivgen.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -32,7 +32,7 @@
* sector.
*
* <example>
- * <title>Encrypting block data with initialiation vectors</title>
+ * <title>Encrypting block data with initialization vectors</title>
* <programlisting>
* uint8_t *data = ....data to encrypt...
* size_t ndata = XXX;
@@ -147,7 +147,7 @@ QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgorithm alg,
* @niv: the number of bytes in @iv
* @errp: pointer to a NULL-initialized error object
*
- * Calculate a new initialiation vector for the data
+ * Calculate a new initialization vector for the data
* to be stored in sector @sector. The IV will be
* written into the buffer @iv of size @niv.
*
@@ -203,4 +203,6 @@ QCryptoHashAlgorithm qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen);
*/
void qcrypto_ivgen_free(QCryptoIVGen *ivgen);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoIVGen, qcrypto_ivgen_free)
+
#endif /* QCRYPTO_IVGEN_H */
diff --git a/include/crypto/pbkdf.h b/include/crypto/pbkdf.h
index ef209b3e03..2c31a44a27 100644
--- a/include/crypto/pbkdf.h
+++ b/include/crypto/pbkdf.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/crypto/random.h b/include/crypto/random.h
index 8764ca0562..325ff075d8 100644
--- a/include/crypto/random.h
+++ b/include/crypto/random.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,7 +21,6 @@
#ifndef QCRYPTO_RANDOM_H
#define QCRYPTO_RANDOM_H
-#include "qemu-common.h"
/**
* qcrypto_random_bytes:
@@ -34,7 +33,7 @@
*
* Returns 0 on success, -1 on error
*/
-int qcrypto_random_bytes(uint8_t *buf,
+int qcrypto_random_bytes(void *buf,
size_t buflen,
Error **errp);
diff --git a/include/crypto/secret.h b/include/crypto/secret.h
index edd0e13236..5d20ae6d2f 100644
--- a/include/crypto/secret.h
+++ b/include/crypto/secret.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,12 +23,13 @@
#include "qapi/qapi-types-crypto.h"
#include "qom/object.h"
+#include "crypto/secret_common.h"
#define TYPE_QCRYPTO_SECRET "secret"
-#define QCRYPTO_SECRET(obj) \
- OBJECT_CHECK(QCryptoSecret, (obj), TYPE_QCRYPTO_SECRET)
-
typedef struct QCryptoSecret QCryptoSecret;
+DECLARE_INSTANCE_CHECKER(QCryptoSecret, QCRYPTO_SECRET,
+ TYPE_QCRYPTO_SECRET)
+
typedef struct QCryptoSecretClass QCryptoSecretClass;
/**
@@ -119,29 +120,14 @@ typedef struct QCryptoSecretClass QCryptoSecretClass;
*/
struct QCryptoSecret {
- Object parent_obj;
- uint8_t *rawdata;
- size_t rawlen;
- QCryptoSecretFormat format;
+ QCryptoSecretCommon parent_obj;
char *data;
char *file;
- char *keyid;
- char *iv;
};
struct QCryptoSecretClass {
- ObjectClass parent_class;
+ QCryptoSecretCommonClass parent_class;
};
-
-extern int qcrypto_secret_lookup(const char *secretid,
- uint8_t **data,
- size_t *datalen,
- Error **errp);
-extern char *qcrypto_secret_lookup_as_utf8(const char *secretid,
- Error **errp);
-extern char *qcrypto_secret_lookup_as_base64(const char *secretid,
- Error **errp);
-
#endif /* QCRYPTO_SECRET_H */
diff --git a/include/crypto/secret_common.h b/include/crypto/secret_common.h
new file mode 100644
index 0000000000..a0a22e1abd
--- /dev/null
+++ b/include/crypto/secret_common.h
@@ -0,0 +1,58 @@
+/*
+ * QEMU crypto secret support
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QCRYPTO_SECRET_COMMON_H
+#define QCRYPTO_SECRET_COMMON_H
+
+#include "qapi/qapi-types-crypto.h"
+#include "qom/object.h"
+
+#define TYPE_QCRYPTO_SECRET_COMMON "secret_common"
+OBJECT_DECLARE_TYPE(QCryptoSecretCommon, QCryptoSecretCommonClass,
+ QCRYPTO_SECRET_COMMON)
+
+
+struct QCryptoSecretCommon {
+ Object parent_obj;
+ uint8_t *rawdata;
+ size_t rawlen;
+ QCryptoSecretFormat format;
+ char *keyid;
+ char *iv;
+};
+
+
+struct QCryptoSecretCommonClass {
+ ObjectClass parent_class;
+ void (*load_data)(QCryptoSecretCommon *secret,
+ uint8_t **output,
+ size_t *outputlen,
+ Error **errp);
+};
+
+
+int qcrypto_secret_lookup(const char *secretid,
+ uint8_t **data,
+ size_t *datalen,
+ Error **errp);
+char *qcrypto_secret_lookup_as_utf8(const char *secretid, Error **errp);
+char *qcrypto_secret_lookup_as_base64(const char *secretid, Error **errp);
+
+#endif /* QCRYPTO_SECRET_COMMON_H */
diff --git a/include/crypto/secret_keyring.h b/include/crypto/secret_keyring.h
new file mode 100644
index 0000000000..3758852cb8
--- /dev/null
+++ b/include/crypto/secret_keyring.h
@@ -0,0 +1,40 @@
+/*
+ * QEMU crypto secret support
+ *
+ * Copyright 2020 Yandex N.V.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QCRYPTO_SECRET_KEYRING_H
+#define QCRYPTO_SECRET_KEYRING_H
+
+#include "qapi/qapi-types-crypto.h"
+#include "qom/object.h"
+#include "crypto/secret_common.h"
+
+#define TYPE_QCRYPTO_SECRET_KEYRING "secret_keyring"
+OBJECT_DECLARE_SIMPLE_TYPE(QCryptoSecretKeyring,
+ QCRYPTO_SECRET_KEYRING)
+
+
+struct QCryptoSecretKeyring {
+ QCryptoSecretCommon parent;
+ int32_t serial;
+};
+
+
+
+#endif /* QCRYPTO_SECRET_KEYRING_H */
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
new file mode 100644
index 0000000000..382b26d922
--- /dev/null
+++ b/include/crypto/sm4.h
@@ -0,0 +1,15 @@
+#ifndef QEMU_SM4_H
+#define QEMU_SM4_H
+
+extern const uint8_t sm4_sbox[256];
+extern const uint32_t sm4_ck[32];
+
+static inline uint32_t sm4_subword(uint32_t word)
+{
+ return sm4_sbox[word & 0xff] |
+ sm4_sbox[(word >> 8) & 0xff] << 8 |
+ sm4_sbox[(word >> 16) & 0xff] << 16 |
+ sm4_sbox[(word >> 24) & 0xff] << 24;
+}
+
+#endif
diff --git a/include/crypto/tls-cipher-suites.h b/include/crypto/tls-cipher-suites.h
new file mode 100644
index 0000000000..3bd2003f32
--- /dev/null
+++ b/include/crypto/tls-cipher-suites.h
@@ -0,0 +1,34 @@
+/*
+ * QEMU TLS Cipher Suites Registry (RFC8447)
+ *
+ * Copyright (c) 2018-2020 Red Hat, Inc.
+ *
+ * Author: Philippe Mathieu-Daudé <philmd@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QCRYPTO_TLS_CIPHER_SUITES_H
+#define QCRYPTO_TLS_CIPHER_SUITES_H
+
+#include "qom/object.h"
+#include "crypto/tlscreds.h"
+
+#define TYPE_QCRYPTO_TLS_CIPHER_SUITES "tls-cipher-suites"
+typedef struct QCryptoTLSCipherSuites QCryptoTLSCipherSuites;
+DECLARE_INSTANCE_CHECKER(QCryptoTLSCipherSuites, QCRYPTO_TLS_CIPHER_SUITES,
+ TYPE_QCRYPTO_TLS_CIPHER_SUITES)
+
+/**
+ * qcrypto_tls_cipher_suites_get_data:
+ * @obj: pointer to a TLS cipher suites object
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Returns: reference to a byte array containing the data.
+ * The caller should release the reference when no longer
+ * required.
+ */
+GByteArray *qcrypto_tls_cipher_suites_get_data(QCryptoTLSCipherSuites *obj,
+ Error **errp);
+
+#endif /* QCRYPTO_TLS_CIPHER_SUITES_H */
diff --git a/include/crypto/tlscreds.h b/include/crypto/tlscreds.h
index 6b011e1dbc..2a8a857010 100644
--- a/include/crypto/tlscreds.h
+++ b/include/crypto/tlscreds.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,20 +24,17 @@
#include "qapi/qapi-types-crypto.h"
#include "qom/object.h"
-#ifdef CONFIG_GNUTLS
-#include <gnutls/gnutls.h>
-#endif
-
#define TYPE_QCRYPTO_TLS_CREDS "tls-creds"
-#define QCRYPTO_TLS_CREDS(obj) \
- OBJECT_CHECK(QCryptoTLSCreds, (obj), TYPE_QCRYPTO_TLS_CREDS)
-
typedef struct QCryptoTLSCreds QCryptoTLSCreds;
typedef struct QCryptoTLSCredsClass QCryptoTLSCredsClass;
+DECLARE_OBJ_CHECKERS(QCryptoTLSCreds, QCryptoTLSCredsClass, QCRYPTO_TLS_CREDS,
+ TYPE_QCRYPTO_TLS_CREDS)
+
#define QCRYPTO_TLS_CREDS_DH_PARAMS "dh-params.pem"
+typedef bool (*CryptoTLSCredsReload)(QCryptoTLSCreds *, Error **);
/**
* QCryptoTLSCreds:
*
@@ -47,21 +44,24 @@ typedef struct QCryptoTLSCredsClass QCryptoTLSCredsClass;
* certificate credentials.
*/
-struct QCryptoTLSCreds {
- Object parent_obj;
- char *dir;
- QCryptoTLSCredsEndpoint endpoint;
-#ifdef CONFIG_GNUTLS
- gnutls_dh_params_t dh_params;
-#endif
- bool verifyPeer;
- char *priority;
-};
-
-
struct QCryptoTLSCredsClass {
ObjectClass parent_class;
+ CryptoTLSCredsReload reload;
};
+/**
+ * qcrypto_tls_creds_check_endpoint:
+ * @creds: pointer to a TLS credentials object
+ * @endpoint: type of network endpoint that will be using the credentials
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Check whether the credentials is setup according to
+ * the type of @endpoint argument.
+ *
+ * Returns true if the credentials is setup for the endpoint, false otherwise
+ */
+bool qcrypto_tls_creds_check_endpoint(QCryptoTLSCreds *creds,
+ QCryptoTLSCredsEndpoint endpoint,
+ Error **errp);
#endif /* QCRYPTO_TLSCREDS_H */
diff --git a/include/crypto/tlscredsanon.h b/include/crypto/tlscredsanon.h
index 4d6b7e4d29..bd3023f9ea 100644
--- a/include/crypto/tlscredsanon.h
+++ b/include/crypto/tlscredsanon.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,13 +22,14 @@
#define QCRYPTO_TLSCREDSANON_H
#include "crypto/tlscreds.h"
+#include "qom/object.h"
#define TYPE_QCRYPTO_TLS_CREDS_ANON "tls-creds-anon"
-#define QCRYPTO_TLS_CREDS_ANON(obj) \
- OBJECT_CHECK(QCryptoTLSCredsAnon, (obj), TYPE_QCRYPTO_TLS_CREDS_ANON)
+typedef struct QCryptoTLSCredsAnon QCryptoTLSCredsAnon;
+DECLARE_INSTANCE_CHECKER(QCryptoTLSCredsAnon, QCRYPTO_TLS_CREDS_ANON,
+ TYPE_QCRYPTO_TLS_CREDS_ANON)
-typedef struct QCryptoTLSCredsAnon QCryptoTLSCredsAnon;
typedef struct QCryptoTLSCredsAnonClass QCryptoTLSCredsAnonClass;
/**
@@ -91,18 +92,6 @@ typedef struct QCryptoTLSCredsAnonClass QCryptoTLSCredsAnonClass;
*
*/
-
-struct QCryptoTLSCredsAnon {
- QCryptoTLSCreds parent_obj;
-#ifdef CONFIG_GNUTLS
- union {
- gnutls_anon_server_credentials_t server;
- gnutls_anon_client_credentials_t client;
- } data;
-#endif
-};
-
-
struct QCryptoTLSCredsAnonClass {
QCryptoTLSCredsClass parent_class;
};
diff --git a/include/crypto/tlscredspsk.h b/include/crypto/tlscredspsk.h
index 306d36c67d..bcd07dc4f6 100644
--- a/include/crypto/tlscredspsk.h
+++ b/include/crypto/tlscredspsk.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,12 +22,13 @@
#define QCRYPTO_TLSCREDSPSK_H
#include "crypto/tlscreds.h"
+#include "qom/object.h"
#define TYPE_QCRYPTO_TLS_CREDS_PSK "tls-creds-psk"
-#define QCRYPTO_TLS_CREDS_PSK(obj) \
- OBJECT_CHECK(QCryptoTLSCredsPSK, (obj), TYPE_QCRYPTO_TLS_CREDS_PSK)
-
typedef struct QCryptoTLSCredsPSK QCryptoTLSCredsPSK;
+DECLARE_INSTANCE_CHECKER(QCryptoTLSCredsPSK, QCRYPTO_TLS_CREDS_PSK,
+ TYPE_QCRYPTO_TLS_CREDS_PSK)
+
typedef struct QCryptoTLSCredsPSKClass QCryptoTLSCredsPSKClass;
#define QCRYPTO_TLS_CREDS_PSKFILE "keys.psk"
@@ -86,18 +87,6 @@ typedef struct QCryptoTLSCredsPSKClass QCryptoTLSCredsPSKClass;
* The PSK file can be created and managed using psktool.
*/
-struct QCryptoTLSCredsPSK {
- QCryptoTLSCreds parent_obj;
- char *username;
-#ifdef CONFIG_GNUTLS
- union {
- gnutls_psk_server_credentials_t server;
- gnutls_psk_client_credentials_t client;
- } data;
-#endif
-};
-
-
struct QCryptoTLSCredsPSKClass {
QCryptoTLSCredsClass parent_class;
};
diff --git a/include/crypto/tlscredsx509.h b/include/crypto/tlscredsx509.h
index 66ad6a7486..c4daba21a6 100644
--- a/include/crypto/tlscredsx509.h
+++ b/include/crypto/tlscredsx509.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,12 +22,13 @@
#define QCRYPTO_TLSCREDSX509_H
#include "crypto/tlscreds.h"
+#include "qom/object.h"
#define TYPE_QCRYPTO_TLS_CREDS_X509 "tls-creds-x509"
-#define QCRYPTO_TLS_CREDS_X509(obj) \
- OBJECT_CHECK(QCryptoTLSCredsX509, (obj), TYPE_QCRYPTO_TLS_CREDS_X509)
-
typedef struct QCryptoTLSCredsX509 QCryptoTLSCredsX509;
+DECLARE_INSTANCE_CHECKER(QCryptoTLSCredsX509, QCRYPTO_TLS_CREDS_X509,
+ TYPE_QCRYPTO_TLS_CREDS_X509)
+
typedef struct QCryptoTLSCredsX509Class QCryptoTLSCredsX509Class;
#define QCRYPTO_TLS_CREDS_X509_CA_CERT "ca-cert.pem"
@@ -95,16 +96,6 @@ typedef struct QCryptoTLSCredsX509Class QCryptoTLSCredsX509Class;
*
*/
-struct QCryptoTLSCredsX509 {
- QCryptoTLSCreds parent_obj;
-#ifdef CONFIG_GNUTLS
- gnutls_certificate_credentials_t data;
-#endif
- bool sanityCheck;
- char *passwordid;
-};
-
-
struct QCryptoTLSCredsX509Class {
QCryptoTLSCredsClass parent_class;
};
diff --git a/include/crypto/tlssession.h b/include/crypto/tlssession.h
index 1c7414e4ff..571049bd0e 100644
--- a/include/crypto/tlssession.h
+++ b/include/crypto/tlssession.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -56,7 +56,7 @@
*
* static int mysock_run_tls(int sockfd,
* QCryptoTLSCreds *creds,
- * Error *errp)
+ * Error **errp)
* {
* QCryptoTLSSession *sess;
*
@@ -160,6 +160,8 @@ QCryptoTLSSession *qcrypto_tls_session_new(QCryptoTLSCreds *creds,
*/
void qcrypto_tls_session_free(QCryptoTLSSession *sess);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoTLSSession, qcrypto_tls_session_free)
+
/**
* qcrypto_tls_session_check_credentials:
* @sess: the TLS session object
@@ -247,6 +249,17 @@ ssize_t qcrypto_tls_session_read(QCryptoTLSSession *sess,
size_t len);
/**
+ * qcrypto_tls_session_check_pending:
+ * @sess: the TLS session object
+ *
+ * Check if there are unread data in the TLS buffers that have
+ * already been read from the underlying data source.
+ *
+ * Returns: the number of bytes available or zero
+ */
+size_t qcrypto_tls_session_check_pending(QCryptoTLSSession *sess);
+
+/**
* qcrypto_tls_session_handshake:
* @sess: the TLS session object
* @errp: pointer to a NULL-initialized error object
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 3c8967ac6c..f267b7824a 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -26,7 +26,6 @@
#ifndef QCRYPTO_XTS_H
#define QCRYPTO_XTS_H
-#include "qemu-common.h"
#define XTS_BLOCK_SIZE 16
diff --git a/include/disas/capstone.h b/include/disas/capstone.h
index 84e214956d..e29068dd97 100644
--- a/include/disas/capstone.h
+++ b/include/disas/capstone.h
@@ -1,5 +1,5 @@
#ifndef QEMU_CAPSTONE_H
-#define QEMU_CAPSTONE_H 1
+#define QEMU_CAPSTONE_H
#ifdef CONFIG_CAPSTONE
diff --git a/include/disas/bfd.h b/include/disas/dis-asm.h
index 41b61c85f9..a1d26ce903 100644
--- a/include/disas/bfd.h
+++ b/include/disas/dis-asm.h
@@ -6,10 +6,10 @@
interface, for making instruction-processing programs more independent
of the instruction set being processed. */
-#ifndef DISAS_BFD_H
-#define DISAS_BFD_H
+#ifndef DISAS_DIS_ASM_H
+#define DISAS_DIS_ASM_H
-#include "qemu/fprintf-fn.h"
+#include "qemu/bswap.h"
typedef void *PTR;
typedef uint64_t bfd_vma;
@@ -188,20 +188,20 @@ enum bfd_architecture
#define bfd_mach_alpha_ev5 0x20
#define bfd_mach_alpha_ev6 0x30
bfd_arch_arm, /* Advanced Risc Machines ARM */
-#define bfd_mach_arm_unknown 0
-#define bfd_mach_arm_2 1
-#define bfd_mach_arm_2a 2
-#define bfd_mach_arm_3 3
-#define bfd_mach_arm_3M 4
-#define bfd_mach_arm_4 5
-#define bfd_mach_arm_4T 6
-#define bfd_mach_arm_5 7
-#define bfd_mach_arm_5T 8
-#define bfd_mach_arm_5TE 9
-#define bfd_mach_arm_XScale 10
-#define bfd_mach_arm_ep9312 11
-#define bfd_mach_arm_iWMMXt 12
-#define bfd_mach_arm_iWMMXt2 13
+#define bfd_mach_arm_unknown 0
+#define bfd_mach_arm_2 1
+#define bfd_mach_arm_2a 2
+#define bfd_mach_arm_3 3
+#define bfd_mach_arm_3M 4
+#define bfd_mach_arm_4 5
+#define bfd_mach_arm_4T 6
+#define bfd_mach_arm_5 7
+#define bfd_mach_arm_5T 8
+#define bfd_mach_arm_5TE 9
+#define bfd_mach_arm_XScale 10
+#define bfd_mach_arm_ep9312 11
+#define bfd_mach_arm_iWMMXt 12
+#define bfd_mach_arm_iWMMXt2 13
bfd_arch_ns32k, /* National Semiconductors ns32000 */
bfd_arch_w65, /* WDC 65816 */
bfd_arch_tic30, /* Texas Instruments TMS320C30 */
@@ -213,6 +213,25 @@ enum bfd_architecture
#define bfd_mach_m32r 0 /* backwards compatibility */
bfd_arch_mn10200, /* Matsushita MN10200 */
bfd_arch_mn10300, /* Matsushita MN10300 */
+ bfd_arch_avr, /* AVR microcontrollers */
+#define bfd_mach_avr1 1
+#define bfd_mach_avr2 2
+#define bfd_mach_avr25 25
+#define bfd_mach_avr3 3
+#define bfd_mach_avr31 31
+#define bfd_mach_avr35 35
+#define bfd_mach_avr4 4
+#define bfd_mach_avr5 5
+#define bfd_mach_avr51 51
+#define bfd_mach_avr6 6
+#define bfd_mach_avrtiny 100
+#define bfd_mach_avrxmega1 101
+#define bfd_mach_avrxmega2 102
+#define bfd_mach_avrxmega3 103
+#define bfd_mach_avrxmega4 104
+#define bfd_mach_avrxmega5 105
+#define bfd_mach_avrxmega6 106
+#define bfd_mach_avrxmega7 107
bfd_arch_cris, /* Axis CRIS */
#define bfd_mach_cris_v0_v10 255
#define bfd_mach_cris_v32 32
@@ -222,12 +241,11 @@ enum bfd_architecture
bfd_arch_ia64, /* HP/Intel ia64 */
#define bfd_mach_ia64_elf64 64
#define bfd_mach_ia64_elf32 32
- bfd_arch_nios2, /* Nios II */
-#define bfd_mach_nios2 0
-#define bfd_mach_nios2r1 1
-#define bfd_mach_nios2r2 2
- bfd_arch_lm32, /* Lattice Mico32 */
-#define bfd_mach_lm32 1
+ bfd_arch_rx, /* Renesas RX */
+#define bfd_mach_rx 0x75
+#define bfd_mach_rx_v2 0x76
+#define bfd_mach_rx_v3 0x77
+ bfd_arch_loongarch,
bfd_arch_last
};
#define bfd_mach_s390_31 31
@@ -243,15 +261,18 @@ typedef struct symbol_cache_entry
} udata;
} asymbol;
+typedef int (*fprintf_function)(FILE *f, const char *fmt, ...)
+ G_GNUC_PRINTF(2, 3);
+
enum dis_insn_type {
- dis_noninsn, /* Not a valid instruction */
- dis_nonbranch, /* Not a branch instruction */
- dis_branch, /* Unconditional branch */
- dis_condbranch, /* Conditional branch */
- dis_jsr, /* Jump to subroutine */
- dis_condjsr, /* Conditional jump to subroutine */
- dis_dref, /* Data reference instruction */
- dis_dref2 /* Two data references in instruction */
+ dis_noninsn, /* Not a valid instruction */
+ dis_nonbranch, /* Not a branch instruction */
+ dis_branch, /* Unconditional branch */
+ dis_condbranch, /* Conditional branch */
+ dis_jsr, /* Jump to subroutine */
+ dis_condjsr, /* Conditional jump to subroutine */
+ dis_dref, /* Data reference instruction */
+ dis_dref2 /* Two data references in instruction */
};
/* This struct is passed into the instruction decoding routine,
@@ -294,8 +315,8 @@ typedef struct disassemble_info {
The top 16 bits are reserved for public use (and are documented here).
The bottom 16 bits are for the internal use of the disassembler. */
unsigned long flags;
-#define INSN_HAS_RELOC 0x80000000
-#define INSN_ARM_BE32 0x00010000
+#define INSN_HAS_RELOC 0x80000000
+#define INSN_ARM_BE32 0x00010000
PTR private_data;
/* Function used to get bytes to disassemble. MEMADDR is the
@@ -305,7 +326,7 @@ typedef struct disassemble_info {
Returns an errno value or 0 for success. */
int (*read_memory_func)
(bfd_vma memaddr, bfd_byte *myaddr, int length,
- struct disassemble_info *info);
+ struct disassemble_info *info);
/* Function which should be called if we get an error that we can't
recover from. STATUS is the errno value from read_memory_func and
@@ -334,7 +355,7 @@ typedef struct disassemble_info {
(bfd_vma addr, struct disassemble_info * info);
/* These are for buffer_read_memory. */
- bfd_byte *buffer;
+ const bfd_byte *buffer;
bfd_vma buffer_vma;
int buffer_length;
@@ -359,18 +380,29 @@ typedef struct disassemble_info {
To determine whether this decoder supports this information, set
insn_info_valid to 0, decode an instruction, then check it. */
- char insn_info_valid; /* Branch info has been set. */
- char branch_delay_insns; /* How many sequential insn's will run before
- a branch takes effect. (0 = normal) */
- char data_size; /* Size of data reference in insn, in bytes */
- enum dis_insn_type insn_type; /* Type of instruction */
- bfd_vma target; /* Target address of branch or dref, if known;
- zero if unknown. */
- bfd_vma target2; /* Second target address for dref2 */
+ char insn_info_valid; /* Branch info has been set. */
+ char branch_delay_insns; /* How many sequential insn's will run before
+ a branch takes effect. (0 = normal) */
+ char data_size; /* Size of data reference in insn, in bytes */
+ enum dis_insn_type insn_type; /* Type of instruction */
+ bfd_vma target; /* Target address of branch or dref, if known;
+ zero if unknown. */
+ bfd_vma target2; /* Second target address for dref2 */
/* Command line options specific to the target disassembler. */
char * disassembler_options;
+ /*
+ * When true instruct the disassembler it may preface the
+ * disassembly with the opcodes values if it wants to. This is
+ * mainly for the benefit of the plugin interface which doesn't want
+ * that.
+ */
+ bool show_opcodes;
+
+ /* Field intended to be used by targets in any way they deem suitable. */
+ void *target_info;
+
/* Options for Capstone disassembly. */
int cap_arch;
int cap_mode;
@@ -379,7 +411,6 @@ typedef struct disassemble_info {
} disassemble_info;
-
/* Standard disassemblers. Disassemble one instruction at the given
target address. Return number of bytes processed. */
typedef int (*disassembler_ftype) (bfd_vma, disassemble_info *);
@@ -388,7 +419,6 @@ int print_insn_tci(bfd_vma, disassemble_info*);
int print_insn_big_mips (bfd_vma, disassemble_info*);
int print_insn_little_mips (bfd_vma, disassemble_info*);
int print_insn_nanomips (bfd_vma, disassemble_info*);
-int print_insn_i386 (bfd_vma, disassemble_info*);
int print_insn_m68k (bfd_vma, disassemble_info*);
int print_insn_z8001 (bfd_vma, disassemble_info*);
int print_insn_z8002 (bfd_vma, disassemble_info*);
@@ -399,7 +429,6 @@ int print_insn_h8500 (bfd_vma, disassemble_info*);
int print_insn_arm_a64 (bfd_vma, disassemble_info*);
int print_insn_alpha (bfd_vma, disassemble_info*);
disassembler_ftype arc_get_disassembler (int, int);
-int print_insn_arm (bfd_vma, disassemble_info*);
int print_insn_sparc (bfd_vma, disassemble_info*);
int print_insn_big_a29k (bfd_vma, disassemble_info*);
int print_insn_little_a29k (bfd_vma, disassemble_info*);
@@ -411,7 +440,6 @@ int print_insn_m32r (bfd_vma, disassemble_info*);
int print_insn_m88k (bfd_vma, disassemble_info*);
int print_insn_mn10200 (bfd_vma, disassemble_info*);
int print_insn_mn10300 (bfd_vma, disassemble_info*);
-int print_insn_moxie (bfd_vma, disassemble_info*);
int print_insn_ns32k (bfd_vma, disassemble_info*);
int print_insn_big_powerpc (bfd_vma, disassemble_info*);
int print_insn_little_powerpc (bfd_vma, disassemble_info*);
@@ -420,79 +448,29 @@ int print_insn_w65 (bfd_vma, disassemble_info*);
int print_insn_d10v (bfd_vma, disassemble_info*);
int print_insn_v850 (bfd_vma, disassemble_info*);
int print_insn_tic30 (bfd_vma, disassemble_info*);
-int print_insn_ppc (bfd_vma, disassemble_info*);
-int print_insn_s390 (bfd_vma, disassemble_info*);
int print_insn_crisv32 (bfd_vma, disassemble_info*);
int print_insn_crisv10 (bfd_vma, disassemble_info*);
int print_insn_microblaze (bfd_vma, disassemble_info*);
int print_insn_ia64 (bfd_vma, disassemble_info*);
-int print_insn_lm32 (bfd_vma, disassemble_info*);
-int print_insn_big_nios2 (bfd_vma, disassemble_info*);
-int print_insn_little_nios2 (bfd_vma, disassemble_info*);
int print_insn_xtensa (bfd_vma, disassemble_info*);
int print_insn_riscv32 (bfd_vma, disassemble_info*);
int print_insn_riscv64 (bfd_vma, disassemble_info*);
-
-#if 0
-/* Fetch the disassembler for a given BFD, if that support is available. */
-disassembler_ftype disassembler(bfd *);
-#endif
-
-
-/* This block of definitions is for particular callers who read instructions
- into a buffer before calling the instruction decoder. */
-
-/* Here is a function which callers may wish to use for read_memory_func.
- It gets bytes from a buffer. */
-int buffer_read_memory(bfd_vma, bfd_byte *, int, struct disassemble_info *);
-
-/* This function goes with buffer_read_memory.
- It prints a message using info->fprintf_func and info->stream. */
-void perror_memory(int, bfd_vma, struct disassemble_info *);
-
-
-/* Just print the address in hex. This is included for completeness even
- though both GDB and objdump provide their own (to print symbolic
- addresses). */
-void generic_print_address(bfd_vma, struct disassemble_info *);
-
-/* Always true. */
-int generic_symbol_at_address(bfd_vma, struct disassemble_info *);
-
-/* Macro to initialize a disassemble_info struct. This should be called
- by all applications creating such a struct. */
-#define INIT_DISASSEMBLE_INFO(INFO, STREAM, FPRINTF_FUNC) \
- (INFO).flavour = bfd_target_unknown_flavour, \
- (INFO).arch = bfd_arch_unknown, \
- (INFO).mach = 0, \
- (INFO).endian = BFD_ENDIAN_UNKNOWN, \
- INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC)
-
-/* Call this macro to initialize only the internal variables for the
- disassembler. Architecture dependent things such as byte order, or machine
- variant are not touched by this macro. This makes things much easier for
- GDB which must initialize these things separately. */
-
-#define INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) \
- (INFO).fprintf_func = (FPRINTF_FUNC), \
- (INFO).stream = (STREAM), \
- (INFO).symbols = NULL, \
- (INFO).num_symbols = 0, \
- (INFO).private_data = NULL, \
- (INFO).buffer = NULL, \
- (INFO).buffer_vma = 0, \
- (INFO).buffer_length = 0, \
- (INFO).read_memory_func = buffer_read_memory, \
- (INFO).memory_error_func = perror_memory, \
- (INFO).print_address_func = generic_print_address, \
- (INFO).print_insn = NULL, \
- (INFO).symbol_at_address_func = generic_symbol_at_address, \
- (INFO).flags = 0, \
- (INFO).bytes_per_line = 0, \
- (INFO).bytes_per_chunk = 0, \
- (INFO).display_endian = BFD_ENDIAN_UNKNOWN, \
- (INFO).disassembler_options = NULL, \
- (INFO).insn_info_valid = 0
+int print_insn_riscv128 (bfd_vma, disassemble_info*);
+int print_insn_rx(bfd_vma, disassemble_info *);
+int print_insn_hexagon(bfd_vma, disassemble_info *);
+int print_insn_loongarch(bfd_vma, disassemble_info *);
+
+#ifdef CONFIG_CAPSTONE
+bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size);
+bool cap_disas_host(disassemble_info *info, const void *code, size_t size);
+bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count);
+bool cap_disas_plugin(disassemble_info *info, uint64_t pc, size_t size);
+#else
+# define cap_disas_target(i, p, s) false
+# define cap_disas_host(i, p, s) false
+# define cap_disas_monitor(i, p, c) false
+# define cap_disas_plugin(i, p, c) false
+#endif /* CONFIG_CAPSTONE */
#ifndef ATTRIBUTE_UNUSED
#define ATTRIBUTE_UNUSED __attribute__((unused))
@@ -500,11 +478,31 @@ int generic_symbol_at_address(bfd_vma, struct disassemble_info *);
/* from libbfd */
-bfd_vma bfd_getl64 (const bfd_byte *addr);
-bfd_vma bfd_getl32 (const bfd_byte *addr);
-bfd_vma bfd_getb32 (const bfd_byte *addr);
-bfd_vma bfd_getl16 (const bfd_byte *addr);
-bfd_vma bfd_getb16 (const bfd_byte *addr);
+static inline bfd_vma bfd_getl64(const bfd_byte *addr)
+{
+ return ldq_le_p(addr);
+}
+
+static inline bfd_vma bfd_getl32(const bfd_byte *addr)
+{
+ return (uint32_t)ldl_le_p(addr);
+}
+
+static inline bfd_vma bfd_getl16(const bfd_byte *addr)
+{
+ return lduw_le_p(addr);
+}
+
+static inline bfd_vma bfd_getb32(const bfd_byte *addr)
+{
+ return (uint32_t)ldl_be_p(addr);
+}
+
+static inline bfd_vma bfd_getb16(const bfd_byte *addr)
+{
+ return lduw_be_p(addr);
+}
+
typedef bool bfd_boolean;
-#endif /* DISAS_BFD_H */
+#endif /* DISAS_DIS_ASM_H */
diff --git a/include/disas/disas.h b/include/disas/disas.h
index 4d48c13c65..176775eff7 100644
--- a/include/disas/disas.h
+++ b/include/disas/disas.h
@@ -1,32 +1,23 @@
#ifndef QEMU_DISAS_H
#define QEMU_DISAS_H
-#include "qemu-common.h"
-
-#ifdef NEED_CPU_H
-#include "cpu.h"
-
/* Disassemble this for me please... (debugging). */
-void disas(FILE *out, void *code, unsigned long size);
-void target_disas(FILE *out, CPUState *cpu, target_ulong code,
- target_ulong size);
+void disas(FILE *out, const void *code, size_t size);
+void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size);
+
+void monitor_disas(Monitor *mon, CPUState *cpu, uint64_t pc,
+ int nb_insn, bool is_physical);
-void monitor_disas(Monitor *mon, CPUState *cpu,
- target_ulong pc, int nb_insn, int is_physical);
+char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size);
/* Look up symbol for debugging purpose. Returns "" if unknown. */
-const char *lookup_symbol(target_ulong orig_addr);
-#endif
+const char *lookup_symbol(uint64_t orig_addr);
struct syminfo;
struct elf32_sym;
struct elf64_sym;
-#if defined(CONFIG_USER_ONLY)
-typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_ulong orig_addr);
-#else
-typedef const char *(*lookup_symbol_t)(struct syminfo *s, hwaddr orig_addr);
-#endif
+typedef const char *(*lookup_symbol_t)(struct syminfo *s, uint64_t orig_addr);
struct syminfo {
lookup_symbol_t lookup_symbol;
diff --git a/include/elf.h b/include/elf.h
index e816fb4d76..e7259ec366 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -11,23 +11,28 @@ typedef uint32_t Elf32_Word;
/* 64-bit ELF base types. */
typedef uint64_t Elf64_Addr;
typedef uint16_t Elf64_Half;
-typedef int16_t Elf64_SHalf;
+typedef int16_t Elf64_SHalf;
typedef uint64_t Elf64_Off;
-typedef int32_t Elf64_Sword;
+typedef int32_t Elf64_Sword;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Xword;
typedef int64_t Elf64_Sxword;
/* These constants are for the segment types stored in the image headers */
-#define PT_NULL 0
-#define PT_LOAD 1
-#define PT_DYNAMIC 2
-#define PT_INTERP 3
-#define PT_NOTE 4
-#define PT_SHLIB 5
-#define PT_PHDR 6
-#define PT_LOPROC 0x70000000
-#define PT_HIPROC 0x7fffffff
+#define PT_NULL 0
+#define PT_LOAD 1
+#define PT_DYNAMIC 2
+#define PT_INTERP 3
+#define PT_NOTE 4
+#define PT_SHLIB 5
+#define PT_PHDR 6
+#define PT_LOOS 0x60000000
+#define PT_HIOS 0x6fffffff
+#define PT_LOPROC 0x70000000
+#define PT_HIPROC 0x7fffffff
+
+#define PT_GNU_STACK (PT_LOOS + 0x474e551)
+#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
#define PT_MIPS_REGINFO 0x70000000
#define PT_MIPS_RTPROC 0x70000001
@@ -36,34 +41,34 @@ typedef int64_t Elf64_Sxword;
/* Flags in the e_flags field of the header */
/* MIPS architecture level. */
-#define EF_MIPS_ARCH 0xf0000000
+#define EF_MIPS_ARCH 0xf0000000
/* Legal values for MIPS architecture level. */
-#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
-#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
-#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
-#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
-#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
-#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
-#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
-#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */
-#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */
-#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */
-#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */
+#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
+#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
+#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */
+#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */
+#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */
+#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */
/* The ABI of a file. */
-#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
-#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
-
-#define EF_MIPS_NOREORDER 0x00000001
-#define EF_MIPS_PIC 0x00000002
-#define EF_MIPS_CPIC 0x00000004
-#define EF_MIPS_ABI2 0x00000020
-#define EF_MIPS_OPTIONS_FIRST 0x00000080
-#define EF_MIPS_32BITMODE 0x00000100
-#define EF_MIPS_ABI 0x0000f000
-#define EF_MIPS_FP64 0x00000200
-#define EF_MIPS_NAN2008 0x00000400
+#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
+#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
+
+#define EF_MIPS_NOREORDER 0x00000001
+#define EF_MIPS_PIC 0x00000002
+#define EF_MIPS_CPIC 0x00000004
+#define EF_MIPS_ABI2 0x00000020
+#define EF_MIPS_OPTIONS_FIRST 0x00000080
+#define EF_MIPS_32BITMODE 0x00000100
+#define EF_MIPS_ABI 0x0000f000
+#define EF_MIPS_FP64 0x00000200
+#define EF_MIPS_NAN2008 0x00000400
/* MIPS machine variant */
#define EF_MIPS_MACH_NONE 0x00000000 /* A standard MIPS implementation */
@@ -124,159 +129,162 @@ typedef struct mips_elf_abiflags_v0 {
#define ET_HIPROC 0xffff
/* These constants define the various ELF target machines */
-#define EM_NONE 0
-#define EM_M32 1
-#define EM_SPARC 2
-#define EM_386 3
-#define EM_68K 4
-#define EM_88K 5
-#define EM_486 6 /* Perhaps disused */
-#define EM_860 7
+#define EM_NONE 0
+#define EM_M32 1
+#define EM_SPARC 2
+#define EM_386 3
+#define EM_68K 4
+#define EM_88K 5
+#define EM_486 6 /* Perhaps disused */
+#define EM_860 7
-#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */
+#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */
-#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */
+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */
-#define EM_PARISC 15 /* HPPA */
+#define EM_PARISC 15 /* HPPA */
-#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
+#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
-#define EM_PPC 20 /* PowerPC */
-#define EM_PPC64 21 /* PowerPC64 */
+#define EM_PPC 20 /* PowerPC */
+#define EM_PPC64 21 /* PowerPC64 */
-#define EM_ARM 40 /* ARM */
+#define EM_ARM 40 /* ARM */
-#define EM_SH 42 /* SuperH */
+#define EM_SH 42 /* SuperH */
-#define EM_SPARCV9 43 /* SPARC v9 64-bit */
+#define EM_SPARCV9 43 /* SPARC v9 64-bit */
-#define EM_TRICORE 44 /* Infineon TriCore */
+#define EM_TRICORE 44 /* Infineon TriCore */
-#define EM_IA_64 50 /* HP/Intel IA-64 */
+#define EM_IA_64 50 /* HP/Intel IA-64 */
-#define EM_X86_64 62 /* AMD x86-64 */
+#define EM_X86_64 62 /* AMD x86-64 */
-#define EM_S390 22 /* IBM S/390 */
+#define EM_S390 22 /* IBM S/390 */
-#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
+#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
-#define EM_V850 87 /* NEC v850 */
+#define EM_AVR 83 /* AVR 8-bit microcontroller */
-#define EM_H8_300H 47 /* Hitachi H8/300H */
-#define EM_H8S 48 /* Hitachi H8S */
-#define EM_LATTICEMICO32 138 /* LatticeMico32 */
+#define EM_V850 87 /* NEC v850 */
-#define EM_OPENRISC 92 /* OpenCores OpenRISC */
+#define EM_H8_300H 47 /* Hitachi H8/300H */
+#define EM_H8S 48 /* Hitachi H8S */
+#define EM_LATTICEMICO32 138 /* LatticeMico32 */
-#define EM_UNICORE32 110 /* UniCore32 */
+#define EM_OPENRISC 92 /* OpenCores OpenRISC */
-#define EM_RISCV 243 /* RISC-V */
+#define EM_HEXAGON 164 /* Qualcomm Hexagon */
-#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */
+#define EM_RX 173 /* Renesas RX family */
+
+#define EM_RISCV 243 /* RISC-V */
+
+#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */
+
+#define EM_LOONGARCH 258 /* LoongArch */
/*
* This is an interim value that we will use until the committee comes
* up with a final number.
*/
-#define EM_ALPHA 0x9026
+#define EM_ALPHA 0x9026
/* Bogus old v850 magic number, used by old tools. */
-#define EM_CYGNUS_V850 0x9080
+#define EM_CYGNUS_V850 0x9080
/*
* This is the old interim value for S/390 architecture
*/
-#define EM_S390_OLD 0xA390
+#define EM_S390_OLD 0xA390
-#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
+#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
-#define EM_MICROBLAZE 189
-#define EM_MICROBLAZE_OLD 0xBAAB
+#define EM_MICROBLAZE 189
+#define EM_MICROBLAZE_OLD 0xBAAB
-#define EM_XTENSA 94 /* Tensilica Xtensa */
+#define EM_XTENSA 94 /* Tensilica Xtensa */
-#define EM_AARCH64 183
+#define EM_AARCH64 183
-#define EM_TILEGX 191 /* TILE-Gx */
-
-#define EM_MOXIE 223 /* Moxie processor family */
-#define EM_MOXIE_OLD 0xFEED
+#define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */
/* This is the info that is needed to parse the dynamic section of the file */
-#define DT_NULL 0
-#define DT_NEEDED 1
-#define DT_PLTRELSZ 2
-#define DT_PLTGOT 3
-#define DT_HASH 4
-#define DT_STRTAB 5
-#define DT_SYMTAB 6
-#define DT_RELA 7
-#define DT_RELASZ 8
-#define DT_RELAENT 9
-#define DT_STRSZ 10
-#define DT_SYMENT 11
-#define DT_INIT 12
-#define DT_FINI 13
-#define DT_SONAME 14
-#define DT_RPATH 15
-#define DT_SYMBOLIC 16
-#define DT_REL 17
-#define DT_RELSZ 18
-#define DT_RELENT 19
-#define DT_PLTREL 20
-#define DT_DEBUG 21
-#define DT_TEXTREL 22
-#define DT_JMPREL 23
-#define DT_BINDNOW 24
-#define DT_INIT_ARRAY 25
-#define DT_FINI_ARRAY 26
-#define DT_INIT_ARRAYSZ 27
-#define DT_FINI_ARRAYSZ 28
-#define DT_RUNPATH 29
-#define DT_FLAGS 30
-#define DT_LOOS 0x6000000d
-#define DT_HIOS 0x6ffff000
-#define DT_LOPROC 0x70000000
-#define DT_HIPROC 0x7fffffff
+#define DT_NULL 0
+#define DT_NEEDED 1
+#define DT_PLTRELSZ 2
+#define DT_PLTGOT 3
+#define DT_HASH 4
+#define DT_STRTAB 5
+#define DT_SYMTAB 6
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_STRSZ 10
+#define DT_SYMENT 11
+#define DT_INIT 12
+#define DT_FINI 13
+#define DT_SONAME 14
+#define DT_RPATH 15
+#define DT_SYMBOLIC 16
+#define DT_REL 17
+#define DT_RELSZ 18
+#define DT_RELENT 19
+#define DT_PLTREL 20
+#define DT_DEBUG 21
+#define DT_TEXTREL 22
+#define DT_JMPREL 23
+#define DT_BINDNOW 24
+#define DT_INIT_ARRAY 25
+#define DT_FINI_ARRAY 26
+#define DT_INIT_ARRAYSZ 27
+#define DT_FINI_ARRAYSZ 28
+#define DT_RUNPATH 29
+#define DT_FLAGS 30
+#define DT_LOOS 0x6000000d
+#define DT_HIOS 0x6ffff000
+#define DT_LOPROC 0x70000000
+#define DT_HIPROC 0x7fffffff
/* DT_ entries which fall between DT_VALRNGLO and DT_VALRNDHI use
the d_val field of the Elf*_Dyn structure. I.e. they contain scalars. */
-#define DT_VALRNGLO 0x6ffffd00
-#define DT_VALRNGHI 0x6ffffdff
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_VALRNGHI 0x6ffffdff
/* DT_ entries which fall between DT_ADDRRNGLO and DT_ADDRRNGHI use
the d_ptr field of the Elf*_Dyn structure. I.e. they contain pointers. */
-#define DT_ADDRRNGLO 0x6ffffe00
-#define DT_ADDRRNGHI 0x6ffffeff
-
-#define DT_VERSYM 0x6ffffff0
-#define DT_RELACOUNT 0x6ffffff9
-#define DT_RELCOUNT 0x6ffffffa
-#define DT_FLAGS_1 0x6ffffffb
-#define DT_VERDEF 0x6ffffffc
-#define DT_VERDEFNUM 0x6ffffffd
-#define DT_VERNEED 0x6ffffffe
-#define DT_VERNEEDNUM 0x6fffffff
-
-#define DT_MIPS_RLD_VERSION 0x70000001
-#define DT_MIPS_TIME_STAMP 0x70000002
-#define DT_MIPS_ICHECKSUM 0x70000003
-#define DT_MIPS_IVERSION 0x70000004
-#define DT_MIPS_FLAGS 0x70000005
- #define RHF_NONE 0
- #define RHF_HARDWAY 1
- #define RHF_NOTPOT 2
-#define DT_MIPS_BASE_ADDRESS 0x70000006
-#define DT_MIPS_CONFLICT 0x70000008
-#define DT_MIPS_LIBLIST 0x70000009
-#define DT_MIPS_LOCAL_GOTNO 0x7000000a
-#define DT_MIPS_CONFLICTNO 0x7000000b
-#define DT_MIPS_LIBLISTNO 0x70000010
-#define DT_MIPS_SYMTABNO 0x70000011
-#define DT_MIPS_UNREFEXTNO 0x70000012
-#define DT_MIPS_GOTSYM 0x70000013
-#define DT_MIPS_HIPAGENO 0x70000014
-#define DT_MIPS_RLD_MAP 0x70000016
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_ADDRRNGHI 0x6ffffeff
+
+#define DT_VERSYM 0x6ffffff0
+#define DT_RELACOUNT 0x6ffffff9
+#define DT_RELCOUNT 0x6ffffffa
+#define DT_FLAGS_1 0x6ffffffb
+#define DT_VERDEF 0x6ffffffc
+#define DT_VERDEFNUM 0x6ffffffd
+#define DT_VERNEED 0x6ffffffe
+#define DT_VERNEEDNUM 0x6fffffff
+
+#define DT_MIPS_RLD_VERSION 0x70000001
+#define DT_MIPS_TIME_STAMP 0x70000002
+#define DT_MIPS_ICHECKSUM 0x70000003
+#define DT_MIPS_IVERSION 0x70000004
+#define DT_MIPS_FLAGS 0x70000005
+ #define RHF_NONE 0
+ #define RHF_HARDWAY 1
+ #define RHF_NOTPOT 2
+#define DT_MIPS_BASE_ADDRESS 0x70000006
+#define DT_MIPS_CONFLICT 0x70000008
+#define DT_MIPS_LIBLIST 0x70000009
+#define DT_MIPS_LOCAL_GOTNO 0x7000000a
+#define DT_MIPS_CONFLICTNO 0x7000000b
+#define DT_MIPS_LIBLISTNO 0x70000010
+#define DT_MIPS_SYMTABNO 0x70000011
+#define DT_MIPS_UNREFEXTNO 0x70000012
+#define DT_MIPS_GOTSYM 0x70000013
+#define DT_MIPS_HIPAGENO 0x70000014
+#define DT_MIPS_RLD_MAP 0x70000016
/* This info is needed when parsing the symbol table */
#define STB_LOCAL 0
@@ -289,61 +297,61 @@ typedef struct mips_elf_abiflags_v0 {
#define STT_SECTION 3
#define STT_FILE 4
-#define ELF_ST_BIND(x) ((x) >> 4)
-#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF_ST_BIND(x) ((x) >> 4)
+#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
#define ELF_ST_INFO(bind, type) (((bind) << 4) | ((type) & 0xf))
-#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
-#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
-#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
-#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
/* Symbolic values for the entries in the auxiliary table
put on the initial stack */
-#define AT_NULL 0 /* end of vector */
-#define AT_IGNORE 1 /* entry should be ignored */
-#define AT_EXECFD 2 /* file descriptor of program */
-#define AT_PHDR 3 /* program headers for program */
-#define AT_PHENT 4 /* size of program header entry */
-#define AT_PHNUM 5 /* number of program headers */
-#define AT_PAGESZ 6 /* system page size */
-#define AT_BASE 7 /* base address of interpreter */
-#define AT_FLAGS 8 /* flags */
-#define AT_ENTRY 9 /* entry point of program */
-#define AT_NOTELF 10 /* program is not ELF */
-#define AT_UID 11 /* real uid */
-#define AT_EUID 12 /* effective uid */
-#define AT_GID 13 /* real gid */
-#define AT_EGID 14 /* effective gid */
-#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
-#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
-#define AT_CLKTCK 17 /* frequency at which times() increments */
-#define AT_FPUCW 18 /* info about fpu initialization by kernel */
-#define AT_DCACHEBSIZE 19 /* data cache block size */
-#define AT_ICACHEBSIZE 20 /* instruction cache block size */
-#define AT_UCACHEBSIZE 21 /* unified cache block size */
-#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */
-#define AT_SECURE 23 /* boolean, was exec suid-like? */
-#define AT_BASE_PLATFORM 24 /* string identifying real platforms */
-#define AT_RANDOM 25 /* address of 16 random bytes */
-#define AT_HWCAP2 26 /* extension of AT_HWCAP */
-#define AT_EXECFN 31 /* filename of the executable */
-#define AT_SYSINFO 32 /* address of kernel entry point */
-#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */
-#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */
-#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */
-#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */
-#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */
+#define AT_NULL 0 /* end of vector */
+#define AT_IGNORE 1 /* entry should be ignored */
+#define AT_EXECFD 2 /* file descriptor of program */
+#define AT_PHDR 3 /* program headers for program */
+#define AT_PHENT 4 /* size of program header entry */
+#define AT_PHNUM 5 /* number of program headers */
+#define AT_PAGESZ 6 /* system page size */
+#define AT_BASE 7 /* base address of interpreter */
+#define AT_FLAGS 8 /* flags */
+#define AT_ENTRY 9 /* entry point of program */
+#define AT_NOTELF 10 /* program is not ELF */
+#define AT_UID 11 /* real uid */
+#define AT_EUID 12 /* effective uid */
+#define AT_GID 13 /* real gid */
+#define AT_EGID 14 /* effective gid */
+#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
+#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
+#define AT_CLKTCK 17 /* frequency at which times() increments */
+#define AT_FPUCW 18 /* info about fpu initialization by kernel */
+#define AT_DCACHEBSIZE 19 /* data cache block size */
+#define AT_ICACHEBSIZE 20 /* instruction cache block size */
+#define AT_UCACHEBSIZE 21 /* unified cache block size */
+#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */
+#define AT_SECURE 23 /* boolean, was exec suid-like? */
+#define AT_BASE_PLATFORM 24 /* string identifying real platforms */
+#define AT_RANDOM 25 /* address of 16 random bytes */
+#define AT_HWCAP2 26 /* extension of AT_HWCAP */
+#define AT_EXECFN 31 /* filename of the executable */
+#define AT_SYSINFO 32 /* address of kernel entry point */
+#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */
+#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */
+#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */
+#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */
+#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */
typedef struct dynamic{
Elf32_Sword d_tag;
union{
- Elf32_Sword d_val;
- Elf32_Addr d_ptr;
+ Elf32_Sword d_val;
+ Elf32_Addr d_ptr;
} d_un;
} Elf32_Dyn;
typedef struct {
- Elf64_Sxword d_tag; /* entry tag value */
+ Elf64_Sxword d_tag; /* entry tag value */
union {
Elf64_Xword d_val;
Elf64_Addr d_ptr;
@@ -354,72 +362,72 @@ typedef struct {
#define ELF32_R_SYM(x) ((x) >> 8)
#define ELF32_R_TYPE(x) ((x) & 0xff)
-#define ELF64_R_SYM(i) ((i) >> 32)
-#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
+#define ELF64_R_SYM(i) ((i) >> 32)
+#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
#define ELF64_R_TYPE_DATA(i) (((ELF64_R_TYPE(i) >> 8) ^ 0x00800000) - 0x00800000)
-#define R_386_NONE 0
-#define R_386_32 1
-#define R_386_PC32 2
-#define R_386_GOT32 3
-#define R_386_PLT32 4
-#define R_386_COPY 5
-#define R_386_GLOB_DAT 6
-#define R_386_JMP_SLOT 7
-#define R_386_RELATIVE 8
-#define R_386_GOTOFF 9
-#define R_386_GOTPC 10
-#define R_386_NUM 11
+#define R_386_NONE 0
+#define R_386_32 1
+#define R_386_PC32 2
+#define R_386_GOT32 3
+#define R_386_PLT32 4
+#define R_386_COPY 5
+#define R_386_GLOB_DAT 6
+#define R_386_JMP_SLOT 7
+#define R_386_RELATIVE 8
+#define R_386_GOTOFF 9
+#define R_386_GOTPC 10
+#define R_386_NUM 11
/* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */
-#define R_386_PC8 23
-
-#define R_MIPS_NONE 0
-#define R_MIPS_16 1
-#define R_MIPS_32 2
-#define R_MIPS_REL32 3
-#define R_MIPS_26 4
-#define R_MIPS_HI16 5
-#define R_MIPS_LO16 6
-#define R_MIPS_GPREL16 7
-#define R_MIPS_LITERAL 8
-#define R_MIPS_GOT16 9
-#define R_MIPS_PC16 10
-#define R_MIPS_CALL16 11
-#define R_MIPS_GPREL32 12
+#define R_386_PC8 23
+
+#define R_MIPS_NONE 0
+#define R_MIPS_16 1
+#define R_MIPS_32 2
+#define R_MIPS_REL32 3
+#define R_MIPS_26 4
+#define R_MIPS_HI16 5
+#define R_MIPS_LO16 6
+#define R_MIPS_GPREL16 7
+#define R_MIPS_LITERAL 8
+#define R_MIPS_GOT16 9
+#define R_MIPS_PC16 10
+#define R_MIPS_CALL16 11
+#define R_MIPS_GPREL32 12
/* The remaining relocs are defined on Irix, although they are not
in the MIPS ELF ABI. */
-#define R_MIPS_UNUSED1 13
-#define R_MIPS_UNUSED2 14
-#define R_MIPS_UNUSED3 15
-#define R_MIPS_SHIFT5 16
-#define R_MIPS_SHIFT6 17
-#define R_MIPS_64 18
-#define R_MIPS_GOT_DISP 19
-#define R_MIPS_GOT_PAGE 20
-#define R_MIPS_GOT_OFST 21
+#define R_MIPS_UNUSED1 13
+#define R_MIPS_UNUSED2 14
+#define R_MIPS_UNUSED3 15
+#define R_MIPS_SHIFT5 16
+#define R_MIPS_SHIFT6 17
+#define R_MIPS_64 18
+#define R_MIPS_GOT_DISP 19
+#define R_MIPS_GOT_PAGE 20
+#define R_MIPS_GOT_OFST 21
/*
* The following two relocation types are specified in the MIPS ABI
* conformance guide version 1.2 but not yet in the psABI.
*/
-#define R_MIPS_GOTHI16 22
-#define R_MIPS_GOTLO16 23
-#define R_MIPS_SUB 24
-#define R_MIPS_INSERT_A 25
-#define R_MIPS_INSERT_B 26
-#define R_MIPS_DELETE 27
-#define R_MIPS_HIGHER 28
-#define R_MIPS_HIGHEST 29
+#define R_MIPS_GOTHI16 22
+#define R_MIPS_GOTLO16 23
+#define R_MIPS_SUB 24
+#define R_MIPS_INSERT_A 25
+#define R_MIPS_INSERT_B 26
+#define R_MIPS_DELETE 27
+#define R_MIPS_HIGHER 28
+#define R_MIPS_HIGHEST 29
/*
* The following two relocation types are specified in the MIPS ABI
* conformance guide version 1.2 but not yet in the psABI.
*/
-#define R_MIPS_CALLHI16 30
-#define R_MIPS_CALLLO16 31
+#define R_MIPS_CALLHI16 30
+#define R_MIPS_CALLLO16 31
/*
* This range is reserved for vendor specific relocations.
*/
-#define R_MIPS_LOVENDOR 100
-#define R_MIPS_HIVENDOR 127
+#define R_MIPS_LOVENDOR 100
+#define R_MIPS_HIVENDOR 127
/* SUN SPARC specific definitions. */
@@ -440,48 +448,48 @@ typedef struct {
/*
* Sparc ELF relocation types
*/
-#define R_SPARC_NONE 0
-#define R_SPARC_8 1
-#define R_SPARC_16 2
-#define R_SPARC_32 3
-#define R_SPARC_DISP8 4
-#define R_SPARC_DISP16 5
-#define R_SPARC_DISP32 6
-#define R_SPARC_WDISP30 7
-#define R_SPARC_WDISP22 8
-#define R_SPARC_HI22 9
-#define R_SPARC_22 10
-#define R_SPARC_13 11
-#define R_SPARC_LO10 12
-#define R_SPARC_GOT10 13
-#define R_SPARC_GOT13 14
-#define R_SPARC_GOT22 15
-#define R_SPARC_PC10 16
-#define R_SPARC_PC22 17
-#define R_SPARC_WPLT30 18
-#define R_SPARC_COPY 19
-#define R_SPARC_GLOB_DAT 20
-#define R_SPARC_JMP_SLOT 21
-#define R_SPARC_RELATIVE 22
-#define R_SPARC_UA32 23
-#define R_SPARC_PLT32 24
-#define R_SPARC_HIPLT22 25
-#define R_SPARC_LOPLT10 26
-#define R_SPARC_PCPLT32 27
-#define R_SPARC_PCPLT22 28
-#define R_SPARC_PCPLT10 29
-#define R_SPARC_10 30
-#define R_SPARC_11 31
-#define R_SPARC_64 32
-#define R_SPARC_OLO10 33
-#define R_SPARC_HH22 34
-#define R_SPARC_HM10 35
-#define R_SPARC_LM22 36
-#define R_SPARC_WDISP16 40
-#define R_SPARC_WDISP19 41
-#define R_SPARC_7 43
-#define R_SPARC_5 44
-#define R_SPARC_6 45
+#define R_SPARC_NONE 0
+#define R_SPARC_8 1
+#define R_SPARC_16 2
+#define R_SPARC_32 3
+#define R_SPARC_DISP8 4
+#define R_SPARC_DISP16 5
+#define R_SPARC_DISP32 6
+#define R_SPARC_WDISP30 7
+#define R_SPARC_WDISP22 8
+#define R_SPARC_HI22 9
+#define R_SPARC_22 10
+#define R_SPARC_13 11
+#define R_SPARC_LO10 12
+#define R_SPARC_GOT10 13
+#define R_SPARC_GOT13 14
+#define R_SPARC_GOT22 15
+#define R_SPARC_PC10 16
+#define R_SPARC_PC22 17
+#define R_SPARC_WPLT30 18
+#define R_SPARC_COPY 19
+#define R_SPARC_GLOB_DAT 20
+#define R_SPARC_JMP_SLOT 21
+#define R_SPARC_RELATIVE 22
+#define R_SPARC_UA32 23
+#define R_SPARC_PLT32 24
+#define R_SPARC_HIPLT22 25
+#define R_SPARC_LOPLT10 26
+#define R_SPARC_PCPLT32 27
+#define R_SPARC_PCPLT22 28
+#define R_SPARC_PCPLT10 29
+#define R_SPARC_10 30
+#define R_SPARC_11 31
+#define R_SPARC_64 32
+#define R_SPARC_OLO10 33
+#define R_SPARC_HH22 34
+#define R_SPARC_HM10 35
+#define R_SPARC_LM22 36
+#define R_SPARC_WDISP16 40
+#define R_SPARC_WDISP19 41
+#define R_SPARC_7 43
+#define R_SPARC_5 44
+#define R_SPARC_6 45
/* Bits present in AT_HWCAP for ARM. */
@@ -554,6 +562,7 @@ typedef struct {
#define PPC_FEATURE2_HTM_NOSC 0x01000000
#define PPC_FEATURE2_ARCH_3_00 0x00800000
#define PPC_FEATURE2_HAS_IEEE128 0x00400000
+#define PPC_FEATURE2_ARCH_3_10 0x00040000
/* Bits present in AT_HWCAP for Sparc. */
@@ -587,17 +596,53 @@ typedef struct {
/* Bits present in AT_HWCAP for s390. */
-#define HWCAP_S390_ESAN3 1
-#define HWCAP_S390_ZARCH 2
-#define HWCAP_S390_STFLE 4
-#define HWCAP_S390_MSA 8
-#define HWCAP_S390_LDISP 16
-#define HWCAP_S390_EIMM 32
-#define HWCAP_S390_DFP 64
-#define HWCAP_S390_HPAGE 128
-#define HWCAP_S390_ETF3EH 256
-#define HWCAP_S390_HIGH_GPRS 512
-#define HWCAP_S390_TE 1024
+#define HWCAP_S390_NR_ESAN3 0
+#define HWCAP_S390_NR_ZARCH 1
+#define HWCAP_S390_NR_STFLE 2
+#define HWCAP_S390_NR_MSA 3
+#define HWCAP_S390_NR_LDISP 4
+#define HWCAP_S390_NR_EIMM 5
+#define HWCAP_S390_NR_DFP 6
+#define HWCAP_S390_NR_HPAGE 7
+#define HWCAP_S390_NR_ETF3EH 8
+#define HWCAP_S390_NR_HIGH_GPRS 9
+#define HWCAP_S390_NR_TE 10
+#define HWCAP_S390_NR_VXRS 11
+#define HWCAP_S390_NR_VXRS_BCD 12
+#define HWCAP_S390_NR_VXRS_EXT 13
+#define HWCAP_S390_NR_GS 14
+#define HWCAP_S390_NR_VXRS_EXT2 15
+#define HWCAP_S390_NR_VXRS_PDE 16
+#define HWCAP_S390_NR_SORT 17
+#define HWCAP_S390_NR_DFLT 18
+#define HWCAP_S390_NR_VXRS_PDE2 19
+#define HWCAP_S390_NR_NNPA 20
+#define HWCAP_S390_NR_PCI_MIO 21
+#define HWCAP_S390_NR_SIE 22
+
+#define HWCAP_S390_ESAN3 (1 << HWCAP_S390_NR_ESAN3)
+#define HWCAP_S390_ZARCH (1 << HWCAP_S390_NR_ZARCH)
+#define HWCAP_S390_STFLE (1 << HWCAP_S390_NR_STFLE)
+#define HWCAP_S390_MSA (1 << HWCAP_S390_NR_MSA)
+#define HWCAP_S390_LDISP (1 << HWCAP_S390_NR_LDISP)
+#define HWCAP_S390_EIMM (1 << HWCAP_S390_NR_EIMM)
+#define HWCAP_S390_DFP (1 << HWCAP_S390_NR_DFP)
+#define HWCAP_S390_HPAGE (1 << HWCAP_S390_NR_HPAGE)
+#define HWCAP_S390_ETF3EH (1 << HWCAP_S390_NR_ETF3EH)
+#define HWCAP_S390_HIGH_GPRS (1 << HWCAP_S390_NR_HIGH_GPRS)
+#define HWCAP_S390_TE (1 << HWCAP_S390_NR_TE)
+#define HWCAP_S390_VXRS (1 << HWCAP_S390_NR_VXRS)
+#define HWCAP_S390_VXRS_BCD (1 << HWCAP_S390_NR_VXRS_BCD)
+#define HWCAP_S390_VXRS_EXT (1 << HWCAP_S390_NR_VXRS_EXT)
+#define HWCAP_S390_GS (1 << HWCAP_S390_NR_GS)
+#define HWCAP_S390_VXRS_EXT2 (1 << HWCAP_S390_NR_VXRS_EXT2)
+#define HWCAP_S390_VXRS_PDE (1 << HWCAP_S390_NR_VXRS_PDE)
+#define HWCAP_S390_SORT (1 << HWCAP_S390_NR_SORT)
+#define HWCAP_S390_DFLT (1 << HWCAP_S390_NR_DFLT)
+#define HWCAP_S390_VXRS_PDE2 (1 << HWCAP_S390_NR_VXRS_PDE2)
+#define HWCAP_S390_NNPA (1 << HWCAP_S390_NR_NNPA)
+#define HWCAP_S390_PCI_MIO (1 << HWCAP_S390_NR_PCI_MIO)
+#define HWCAP_S390_SIE (1 << HWCAP_S390_NR_SIE)
/* M68K specific definitions. */
/* We use the top 24 bits to encode information about the
@@ -630,29 +675,29 @@ typedef struct {
/*
* 68k ELF relocation types
*/
-#define R_68K_NONE 0
-#define R_68K_32 1
-#define R_68K_16 2
-#define R_68K_8 3
-#define R_68K_PC32 4
-#define R_68K_PC16 5
-#define R_68K_PC8 6
-#define R_68K_GOT32 7
-#define R_68K_GOT16 8
-#define R_68K_GOT8 9
-#define R_68K_GOT32O 10
-#define R_68K_GOT16O 11
-#define R_68K_GOT8O 12
-#define R_68K_PLT32 13
-#define R_68K_PLT16 14
-#define R_68K_PLT8 15
-#define R_68K_PLT32O 16
-#define R_68K_PLT16O 17
-#define R_68K_PLT8O 18
-#define R_68K_COPY 19
-#define R_68K_GLOB_DAT 20
-#define R_68K_JMP_SLOT 21
-#define R_68K_RELATIVE 22
+#define R_68K_NONE 0
+#define R_68K_32 1
+#define R_68K_16 2
+#define R_68K_8 3
+#define R_68K_PC32 4
+#define R_68K_PC16 5
+#define R_68K_PC8 6
+#define R_68K_GOT32 7
+#define R_68K_GOT16 8
+#define R_68K_GOT8 9
+#define R_68K_GOT32O 10
+#define R_68K_GOT16O 11
+#define R_68K_GOT8O 12
+#define R_68K_PLT32 13
+#define R_68K_PLT16 14
+#define R_68K_PLT8 15
+#define R_68K_PLT32O 16
+#define R_68K_PLT16O 17
+#define R_68K_PLT8O 18
+#define R_68K_COPY 19
+#define R_68K_GLOB_DAT 20
+#define R_68K_JMP_SLOT 21
+#define R_68K_RELATIVE 22
/*
* Alpha ELF relocation types
@@ -676,7 +721,7 @@ typedef struct {
#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */
#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
#define R_ALPHA_RELATIVE 27 /* Adjust by program base */
-#define R_ALPHA_BRSGP 28
+#define R_ALPHA_BRSGP 28
#define R_ALPHA_TLSGD 29
#define R_ALPHA_TLS_LDM 30
#define R_ALPHA_DTPMOD64 31
@@ -691,78 +736,78 @@ typedef struct {
#define R_ALPHA_TPRELLO 40
#define R_ALPHA_TPREL16 41
-#define SHF_ALPHA_GPREL 0x10000000
+#define SHF_ALPHA_GPREL 0x10000000
/* PowerPC specific definitions. */
/* Processor specific flags for the ELF header e_flags field. */
-#define EF_PPC64_ABI 0x3
+#define EF_PPC64_ABI 0x3
/* PowerPC relocations defined by the ABIs */
-#define R_PPC_NONE 0
-#define R_PPC_ADDR32 1 /* 32bit absolute address */
-#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
-#define R_PPC_ADDR16 3 /* 16bit absolute address */
-#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
-#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
-#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
-#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
-#define R_PPC_ADDR14_BRTAKEN 8
-#define R_PPC_ADDR14_BRNTAKEN 9
-#define R_PPC_REL24 10 /* PC relative 26 bit */
-#define R_PPC_REL14 11 /* PC relative 16 bit */
-#define R_PPC_REL14_BRTAKEN 12
-#define R_PPC_REL14_BRNTAKEN 13
-#define R_PPC_GOT16 14
-#define R_PPC_GOT16_LO 15
-#define R_PPC_GOT16_HI 16
-#define R_PPC_GOT16_HA 17
-#define R_PPC_PLTREL24 18
-#define R_PPC_COPY 19
-#define R_PPC_GLOB_DAT 20
-#define R_PPC_JMP_SLOT 21
-#define R_PPC_RELATIVE 22
-#define R_PPC_LOCAL24PC 23
-#define R_PPC_UADDR32 24
-#define R_PPC_UADDR16 25
-#define R_PPC_REL32 26
-#define R_PPC_PLT32 27
-#define R_PPC_PLTREL32 28
-#define R_PPC_PLT16_LO 29
-#define R_PPC_PLT16_HI 30
-#define R_PPC_PLT16_HA 31
-#define R_PPC_SDAREL16 32
-#define R_PPC_SECTOFF 33
-#define R_PPC_SECTOFF_LO 34
-#define R_PPC_SECTOFF_HI 35
-#define R_PPC_SECTOFF_HA 36
+#define R_PPC_NONE 0
+#define R_PPC_ADDR32 1 /* 32bit absolute address */
+#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
+#define R_PPC_ADDR16 3 /* 16bit absolute address */
+#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
+#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
+#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
+#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10 /* PC relative 26 bit */
+#define R_PPC_REL14 11 /* PC relative 16 bit */
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
/* Keep this the last entry. */
#ifndef R_PPC_NUM
-#define R_PPC_NUM 37
+#define R_PPC_NUM 37
#endif
/* ARM specific declarations */
/* Processor specific flags for the ELF header e_flags field. */
-#define EF_ARM_RELEXEC 0x01
-#define EF_ARM_HASENTRY 0x02
-#define EF_ARM_INTERWORK 0x04
-#define EF_ARM_APCS_26 0x08
-#define EF_ARM_APCS_FLOAT 0x10
-#define EF_ARM_PIC 0x20
-#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */
-#define EF_NEW_ABI 0x80
-#define EF_OLD_ABI 0x100
-#define EF_ARM_SOFT_FLOAT 0x200
-#define EF_ARM_VFP_FLOAT 0x400
+#define EF_ARM_RELEXEC 0x01
+#define EF_ARM_HASENTRY 0x02
+#define EF_ARM_INTERWORK 0x04
+#define EF_ARM_APCS_26 0x08
+#define EF_ARM_APCS_FLOAT 0x10
+#define EF_ARM_PIC 0x20
+#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */
+#define EF_NEW_ABI 0x80
+#define EF_OLD_ABI 0x100
+#define EF_ARM_SOFT_FLOAT 0x200
+#define EF_ARM_VFP_FLOAT 0x400
#define EF_ARM_MAVERICK_FLOAT 0x800
/* Other constants defined in the ARM ELF spec. version B-01. */
-#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */
-#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */
-#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */
-#define EF_ARM_EABIMASK 0xFF000000
+#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */
+#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */
+#define EF_ARM_EABIMASK 0xFF000000
/* Constants defined in AAELF. */
#define EF_ARM_BE8 0x00800000
@@ -789,46 +834,46 @@ typedef struct {
addressed by the static base */
/* ARM relocs. */
-#define R_ARM_NONE 0 /* No reloc */
-#define R_ARM_PC24 1 /* PC relative 26 bit branch */
-#define R_ARM_ABS32 2 /* Direct 32 bit */
-#define R_ARM_REL32 3 /* PC relative 32 bit */
-#define R_ARM_PC13 4
-#define R_ARM_ABS16 5 /* Direct 16 bit */
-#define R_ARM_ABS12 6 /* Direct 12 bit */
-#define R_ARM_THM_ABS5 7
-#define R_ARM_ABS8 8 /* Direct 8 bit */
-#define R_ARM_SBREL32 9
-#define R_ARM_THM_PC22 10
-#define R_ARM_THM_PC8 11
-#define R_ARM_AMP_VCALL9 12
-#define R_ARM_SWI24 13
-#define R_ARM_THM_SWI8 14
-#define R_ARM_XPC25 15
-#define R_ARM_THM_XPC22 16
-#define R_ARM_COPY 20 /* Copy symbol at runtime */
-#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
-#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
-#define R_ARM_RELATIVE 23 /* Adjust by program base */
-#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
-#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
-#define R_ARM_GOT32 26 /* 32 bit GOT entry */
-#define R_ARM_PLT32 27 /* 32 bit PLT address */
+#define R_ARM_NONE 0 /* No reloc */
+#define R_ARM_PC24 1 /* PC relative 26 bit branch */
+#define R_ARM_ABS32 2 /* Direct 32 bit */
+#define R_ARM_REL32 3 /* PC relative 32 bit */
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5 /* Direct 16 bit */
+#define R_ARM_ABS12 6 /* Direct 12 bit */
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8 /* Direct 8 bit */
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+#define R_ARM_COPY 20 /* Copy symbol at runtime */
+#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
+#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
+#define R_ARM_RELATIVE 23 /* Adjust by program base */
+#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
+#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
+#define R_ARM_GOT32 26 /* 32 bit GOT entry */
+#define R_ARM_PLT32 27 /* 32 bit PLT address */
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
-#define R_ARM_GNU_VTENTRY 100
-#define R_ARM_GNU_VTINHERIT 101
-#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
-#define R_ARM_THM_PC9 103 /* thumb conditional branch */
-#define R_ARM_RXPC25 249
-#define R_ARM_RSBREL32 250
-#define R_ARM_THM_RPC22 251
-#define R_ARM_RREL32 252
-#define R_ARM_RABS22 253
-#define R_ARM_RPC24 254
-#define R_ARM_RBASE 255
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
+#define R_ARM_THM_PC9 103 /* thumb conditional branch */
+#define R_ARM_RXPC25 249
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS22 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
/* Keep this the last entry. */
-#define R_ARM_NUM 256
+#define R_ARM_NUM 256
/* ARM Aarch64 relocation types */
#define R_AARCH64_NONE 256 /* also accepts R_ARM_NONE (0) */
@@ -958,385 +1003,385 @@ typedef struct {
#define R_AARCH64_TLS_TPREL32 1033
/* s390 relocations defined by the ABIs */
-#define R_390_NONE 0 /* No reloc. */
-#define R_390_8 1 /* Direct 8 bit. */
-#define R_390_12 2 /* Direct 12 bit. */
-#define R_390_16 3 /* Direct 16 bit. */
-#define R_390_32 4 /* Direct 32 bit. */
-#define R_390_PC32 5 /* PC relative 32 bit. */
-#define R_390_GOT12 6 /* 12 bit GOT offset. */
-#define R_390_GOT32 7 /* 32 bit GOT offset. */
-#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
-#define R_390_COPY 9 /* Copy symbol at runtime. */
-#define R_390_GLOB_DAT 10 /* Create GOT entry. */
-#define R_390_JMP_SLOT 11 /* Create PLT entry. */
-#define R_390_RELATIVE 12 /* Adjust by program base. */
-#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
-#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
-#define R_390_GOT16 15 /* 16 bit GOT offset. */
-#define R_390_PC16 16 /* PC relative 16 bit. */
-#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
-#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
-#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
-#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
-#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
-#define R_390_64 22 /* Direct 64 bit. */
-#define R_390_PC64 23 /* PC relative 64 bit. */
-#define R_390_GOT64 24 /* 64 bit GOT offset. */
-#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
-#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
-#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
-#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
-#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
-#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
-#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
-#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
-#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
-#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
-#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
-#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
-#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
-#define R_390_TLS_GDCALL 38 /* Tag for function call in general
- dynamic TLS code. */
-#define R_390_TLS_LDCALL 39 /* Tag for function call in local
- dynamic TLS code. */
-#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
- thread local data. */
-#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
- thread local data. */
-#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
- thread local data in LD code. */
-#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
- thread local data in LD code. */
-#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
- static TLS block. */
-#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
- static TLS block. */
-#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
- block. */
-#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
- block. */
-#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
-#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
-#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
+#define R_390_NONE 0 /* No reloc. */
+#define R_390_8 1 /* Direct 8 bit. */
+#define R_390_12 2 /* Direct 12 bit. */
+#define R_390_16 3 /* Direct 16 bit. */
+#define R_390_32 4 /* Direct 32 bit. */
+#define R_390_PC32 5 /* PC relative 32 bit. */
+#define R_390_GOT12 6 /* 12 bit GOT offset. */
+#define R_390_GOT32 7 /* 32 bit GOT offset. */
+#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
+#define R_390_COPY 9 /* Copy symbol at runtime. */
+#define R_390_GLOB_DAT 10 /* Create GOT entry. */
+#define R_390_JMP_SLOT 11 /* Create PLT entry. */
+#define R_390_RELATIVE 12 /* Adjust by program base. */
+#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
+#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
+#define R_390_GOT16 15 /* 16 bit GOT offset. */
+#define R_390_PC16 16 /* PC relative 16 bit. */
+#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
+#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
+#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
+#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
+#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
+#define R_390_64 22 /* Direct 64 bit. */
+#define R_390_PC64 23 /* PC relative 64 bit. */
+#define R_390_GOT64 24 /* 64 bit GOT offset. */
+#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
+#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
+#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
+#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
+#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
+#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
+#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
+#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
+#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
+#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
+#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
+#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
+#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
+#define R_390_TLS_GDCALL 38 /* Tag for function call in general
+ dynamic TLS code. */
+#define R_390_TLS_LDCALL 39 /* Tag for function call in local
+ dynamic TLS code. */
+#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
+ block. */
+#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
+ block. */
+#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
+#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
+#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
block. */
#define R_390_20 57
/* Keep this the last entry. */
#define R_390_NUM 58
/* x86-64 relocation types */
-#define R_X86_64_NONE 0 /* No reloc */
-#define R_X86_64_64 1 /* Direct 64 bit */
-#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
-#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
-#define R_X86_64_PLT32 4 /* 32 bit PLT address */
-#define R_X86_64_COPY 5 /* Copy symbol at runtime */
-#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
-#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
-#define R_X86_64_RELATIVE 8 /* Adjust by program base */
-#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
+#define R_X86_64_NONE 0 /* No reloc */
+#define R_X86_64_64 1 /* Direct 64 bit */
+#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
+#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
+#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+#define R_X86_64_COPY 5 /* Copy symbol at runtime */
+#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
+#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
+#define R_X86_64_RELATIVE 8 /* Adjust by program base */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
offset to GOT */
-#define R_X86_64_32 10 /* Direct 32 bit zero extended */
-#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
-#define R_X86_64_16 12 /* Direct 16 bit zero extended */
-#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
-#define R_X86_64_8 14 /* Direct 8 bit sign extended */
-#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
+#define R_X86_64_32 10 /* Direct 32 bit zero extended */
+#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
+#define R_X86_64_16 12 /* Direct 16 bit zero extended */
+#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
+#define R_X86_64_8 14 /* Direct 8 bit sign extended */
+#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
-#define R_X86_64_NUM 16
+#define R_X86_64_NUM 16
/* Legal values for e_flags field of Elf64_Ehdr. */
-#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
+#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
/* HPPA specific definitions. */
/* Legal values for e_flags field of Elf32_Ehdr. */
-#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
-#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
-#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
-#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
-#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
+#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
+#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
+#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
+#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
+#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
prediction. */
-#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
-#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
+#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
+#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
-#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
-#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
-#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
+#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
+#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
+#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
-/* Additional section indeces. */
+/* Additional section indices. */
-#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
+#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tentatively declared
symbols in ANSI C. */
-#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
+#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
/* Legal values for sh_type field of Elf32_Shdr. */
-#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
-#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
-#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
+#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
+#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
+#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
/* Legal values for sh_flags field of Elf32_Shdr. */
-#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
-#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
-#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
+#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
+#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
+#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
/* Legal values for ST_TYPE subfield of st_info (symbol type). */
-#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
+#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
-#define STT_HP_OPAQUE (STT_LOOS + 0x1)
-#define STT_HP_STUB (STT_LOOS + 0x2)
+#define STT_HP_OPAQUE (STT_LOOS + 0x1)
+#define STT_HP_STUB (STT_LOOS + 0x2)
/* HPPA relocs. */
-#define R_PARISC_NONE 0 /* No reloc. */
-#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
-#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
-#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
-#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
-#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
-#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
-#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
-#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
-#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
-#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
-#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
-#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
-#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
-#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
-#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
-#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
-#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
-#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
-#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
-#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
-#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
-#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
-#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
-#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
-#define R_PARISC_FPTR64 64 /* 64 bits function address. */
-#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
-#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
-#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
-#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
-#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
-#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
-#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
-#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
-#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
-#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
-#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
-#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
-#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
-#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
-#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
-#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
-#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
-#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
-#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
-#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
-#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
-#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
-#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
-#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
-#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
-#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
-#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
-#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
-#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
-#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
-#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
-#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
-#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
-#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LORESERVE 128
-#define R_PARISC_COPY 128 /* Copy relocation. */
-#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
-#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
-#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
-#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
-#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
-#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
-#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
-#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
-#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
-#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
-#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_HIRESERVE 255
+#define R_PARISC_NONE 0 /* No reloc. */
+#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
+#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
+#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
+#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
+#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
+#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
+#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
+#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
+#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
+#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
+#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
+#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
+#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
+#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
+#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
+#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
+#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
+#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
+#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
+#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
+#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
+#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
+#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
+#define R_PARISC_FPTR64 64 /* 64 bits function address. */
+#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
+#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
+#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
+#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
+#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
+#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
+#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
+#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
+#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
+#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
+#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
+#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
+#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
+#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
+#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
+#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
+#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
+#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
+#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
+#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LORESERVE 128
+#define R_PARISC_COPY 128 /* Copy relocation. */
+#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
+#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
+#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
+#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
+#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
+#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
+#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
+#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
+#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_HIRESERVE 255
/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
-#define PT_HP_TLS (PT_LOOS + 0x0)
-#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
-#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
-#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
-#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
-#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
-#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
-#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
-#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
-#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
-#define PT_HP_PARALLEL (PT_LOOS + 0x10)
-#define PT_HP_FASTBIND (PT_LOOS + 0x11)
-#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
-#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
-#define PT_HP_STACK (PT_LOOS + 0x14)
-
-#define PT_PARISC_ARCHEXT 0x70000000
-#define PT_PARISC_UNWIND 0x70000001
+#define PT_HP_TLS (PT_LOOS + 0x0)
+#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
+#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
+#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
+#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
+#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
+#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
+#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
+#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
+#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
+#define PT_HP_PARALLEL (PT_LOOS + 0x10)
+#define PT_HP_FASTBIND (PT_LOOS + 0x11)
+#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
+#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
+#define PT_HP_STACK (PT_LOOS + 0x14)
+
+#define PT_PARISC_ARCHEXT 0x70000000
+#define PT_PARISC_UNWIND 0x70000001
/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
-#define PF_PARISC_SBP 0x08000000
+#define PF_PARISC_SBP 0x08000000
-#define PF_HP_PAGE_SIZE 0x00100000
-#define PF_HP_FAR_SHARED 0x00200000
-#define PF_HP_NEAR_SHARED 0x00400000
-#define PF_HP_CODE 0x01000000
-#define PF_HP_MODIFY 0x02000000
-#define PF_HP_LAZYSWAP 0x04000000
-#define PF_HP_SBP 0x08000000
+#define PF_HP_PAGE_SIZE 0x00100000
+#define PF_HP_FAR_SHARED 0x00200000
+#define PF_HP_NEAR_SHARED 0x00400000
+#define PF_HP_CODE 0x01000000
+#define PF_HP_MODIFY 0x02000000
+#define PF_HP_LAZYSWAP 0x04000000
+#define PF_HP_SBP 0x08000000
/* IA-64 specific declarations. */
/* Processor specific flags for the Ehdr e_flags field. */
-#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
-#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
-#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
+#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
+#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
+#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
/* Processor specific values for the Phdr p_type field. */
-#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
-#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
+#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
+#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
/* Processor specific flags for the Phdr p_flags field. */
-#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
+#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
/* Processor specific values for the Shdr sh_type field. */
-#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
-#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
+#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
+#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
/* Processor specific flags for the Shdr sh_flags field. */
-#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
-#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
+#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
+#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
/* Processor specific values for the Dyn d_tag field. */
-#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
-#define DT_IA_64_NUM 1
+#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
+#define DT_IA_64_NUM 1
/* IA-64 relocations. */
-#define R_IA64_NONE 0x00 /* none */
-#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
-#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
-#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
-#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
-#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
-#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
-#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
-#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
-#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
-#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
-#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
-#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
-#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
-#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
-#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
-#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
-#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
-#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
-#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
-#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
-#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
-#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
-#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
-#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
-#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
-#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
-#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
-#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
-#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
-#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
-#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
-#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
-#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
-#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
-#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
-#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
-#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
-#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
-#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
-#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
-#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
-#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
-#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
-#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
-#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
-#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
-#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
-#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
-#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
-#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
-#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
-#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
-#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
-#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
-#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
-#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
-#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
-#define R_IA64_COPY 0x84 /* copy relocation */
-#define R_IA64_SUB 0x85 /* Addend and symbol difference */
-#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
-#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
-#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
-#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
-#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
-#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
-#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
-#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
-#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
-#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
-#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
-#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
-#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
-#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
-#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
-#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
-#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
+#define R_IA64_NONE 0x00 /* none */
+#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
+#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
+#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
+#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
+#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
+#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
+#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
+#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
+#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
+#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
+#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
+#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
+#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
+#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
+#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
+#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
+#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
+#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
+#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
+#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
+#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
+#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
+#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
+#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
+#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
+#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
+#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
+#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
+#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
+#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
+#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
+#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
+#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
+#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
+#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
+#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
+#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
+#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
+#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
+#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY 0x84 /* copy relocation */
+#define R_IA64_SUB 0x85 /* Addend and symbol difference */
+#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
+#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
+#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
+#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
+#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
+#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
+#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
+#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
+#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
+#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
+#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
+#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
+#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
+#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
+#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
+#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
/* RISC-V relocations. */
#define R_RISCV_NONE 0
@@ -1393,48 +1438,58 @@ typedef struct {
#define R_RISCV_SET16 55
#define R_RISCV_SET32 56
+/* RISC-V ELF Flags. */
+#define EF_RISCV_RVC 0x0001
+#define EF_RISCV_FLOAT_ABI 0x0006
+#define EF_RISCV_FLOAT_ABI_SOFT 0x0000
+#define EF_RISCV_FLOAT_ABI_SINGLE 0x0002
+#define EF_RISCV_FLOAT_ABI_DOUBLE 0x0004
+#define EF_RISCV_FLOAT_ABI_QUAD 0x0006
+#define EF_RISCV_RVE 0x0008
+#define EF_RISCV_TSO 0x0010
+
typedef struct elf32_rel {
- Elf32_Addr r_offset;
- Elf32_Word r_info;
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
} Elf32_Rel;
typedef struct elf64_rel {
- Elf64_Addr r_offset; /* Location at which to apply the action */
- Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
} Elf64_Rel;
typedef struct elf32_rela{
- Elf32_Addr r_offset;
- Elf32_Word r_info;
- Elf32_Sword r_addend;
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
} Elf32_Rela;
typedef struct elf64_rela {
- Elf64_Addr r_offset; /* Location at which to apply the action */
- Elf64_Xword r_info; /* index and type of relocation */
- Elf64_Sxword r_addend; /* Constant addend used to compute value */
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Sxword r_addend; /* Constant addend used to compute value */
} Elf64_Rela;
typedef struct elf32_sym{
- Elf32_Word st_name;
- Elf32_Addr st_value;
- Elf32_Word st_size;
- unsigned char st_info;
- unsigned char st_other;
- Elf32_Half st_shndx;
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
} Elf32_Sym;
typedef struct elf64_sym {
- Elf64_Word st_name; /* Symbol name, index in string tbl */
- unsigned char st_info; /* Type and binding attributes */
- unsigned char st_other; /* No defined meaning, 0 */
- Elf64_Half st_shndx; /* Associated section index */
- Elf64_Addr st_value; /* Value of the symbol */
- Elf64_Xword st_size; /* Associated symbol size */
+ Elf64_Word st_name; /* Symbol name, index in string tbl */
+ unsigned char st_info; /* Type and binding attributes */
+ unsigned char st_other; /* No defined meaning, 0 */
+ Elf64_Half st_shndx; /* Associated section index */
+ Elf64_Addr st_value; /* Value of the symbol */
+ Elf64_Xword st_size; /* Associated symbol size */
} Elf64_Sym;
-#define EI_NIDENT 16
+#define EI_NIDENT 16
/* Special value for e_phnum. This indicates that the real number of
program headers is too large to fit into e_phnum. Instead the real
@@ -1442,30 +1497,30 @@ typedef struct elf64_sym {
#define PN_XNUM 0xffff
typedef struct elf32_hdr{
- unsigned char e_ident[EI_NIDENT];
- Elf32_Half e_type;
- Elf32_Half e_machine;
- Elf32_Word e_version;
- Elf32_Addr e_entry; /* Entry point */
- Elf32_Off e_phoff;
- Elf32_Off e_shoff;
- Elf32_Word e_flags;
- Elf32_Half e_ehsize;
- Elf32_Half e_phentsize;
- Elf32_Half e_phnum;
- Elf32_Half e_shentsize;
- Elf32_Half e_shnum;
- Elf32_Half e_shstrndx;
+ unsigned char e_ident[EI_NIDENT];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry; /* Entry point */
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
} Elf32_Ehdr;
typedef struct elf64_hdr {
- unsigned char e_ident[16]; /* ELF "magic number" */
+ unsigned char e_ident[16]; /* ELF "magic number" */
Elf64_Half e_type;
Elf64_Half e_machine;
Elf64_Word e_version;
- Elf64_Addr e_entry; /* Entry point virtual address */
- Elf64_Off e_phoff; /* Program header table file offset */
- Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
Elf64_Word e_flags;
Elf64_Half e_ehsize;
Elf64_Half e_phentsize;
@@ -1477,107 +1532,107 @@ typedef struct elf64_hdr {
/* These constants define the permissions on sections in the program
header, p_flags. */
-#define PF_R 0x4
-#define PF_W 0x2
-#define PF_X 0x1
+#define PF_R 0x4
+#define PF_W 0x2
+#define PF_X 0x1
typedef struct elf32_phdr{
- Elf32_Word p_type;
- Elf32_Off p_offset;
- Elf32_Addr p_vaddr;
- Elf32_Addr p_paddr;
- Elf32_Word p_filesz;
- Elf32_Word p_memsz;
- Elf32_Word p_flags;
- Elf32_Word p_align;
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
} Elf32_Phdr;
typedef struct elf64_phdr {
Elf64_Word p_type;
Elf64_Word p_flags;
- Elf64_Off p_offset; /* Segment file offset */
- Elf64_Addr p_vaddr; /* Segment virtual address */
- Elf64_Addr p_paddr; /* Segment physical address */
- Elf64_Xword p_filesz; /* Segment size in file */
- Elf64_Xword p_memsz; /* Segment size in memory */
- Elf64_Xword p_align; /* Segment alignment, file & memory */
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment, file & memory */
} Elf64_Phdr;
/* sh_type */
-#define SHT_NULL 0
-#define SHT_PROGBITS 1
-#define SHT_SYMTAB 2
-#define SHT_STRTAB 3
-#define SHT_RELA 4
-#define SHT_HASH 5
-#define SHT_DYNAMIC 6
-#define SHT_NOTE 7
-#define SHT_NOBITS 8
-#define SHT_REL 9
-#define SHT_SHLIB 10
-#define SHT_DYNSYM 11
-#define SHT_NUM 12
-#define SHT_LOPROC 0x70000000
-#define SHT_HIPROC 0x7fffffff
-#define SHT_LOUSER 0x80000000
-#define SHT_HIUSER 0xffffffff
-#define SHT_MIPS_LIST 0x70000000
-#define SHT_MIPS_CONFLICT 0x70000002
-#define SHT_MIPS_GPTAB 0x70000003
-#define SHT_MIPS_UCODE 0x70000004
+#define SHT_NULL 0
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_RELA 4
+#define SHT_HASH 5
+#define SHT_DYNAMIC 6
+#define SHT_NOTE 7
+#define SHT_NOBITS 8
+#define SHT_REL 9
+#define SHT_SHLIB 10
+#define SHT_DYNSYM 11
+#define SHT_NUM 12
+#define SHT_LOPROC 0x70000000
+#define SHT_HIPROC 0x7fffffff
+#define SHT_LOUSER 0x80000000
+#define SHT_HIUSER 0xffffffff
+#define SHT_MIPS_LIST 0x70000000
+#define SHT_MIPS_CONFLICT 0x70000002
+#define SHT_MIPS_GPTAB 0x70000003
+#define SHT_MIPS_UCODE 0x70000004
/* sh_flags */
-#define SHF_WRITE 0x1
-#define SHF_ALLOC 0x2
-#define SHF_EXECINSTR 0x4
-#define SHF_MASKPROC 0xf0000000
-#define SHF_MIPS_GPREL 0x10000000
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_MASKPROC 0xf0000000
+#define SHF_MIPS_GPREL 0x10000000
/* special section indexes */
-#define SHN_UNDEF 0
-#define SHN_LORESERVE 0xff00
-#define SHN_LOPROC 0xff00
-#define SHN_HIPROC 0xff1f
-#define SHN_ABS 0xfff1
-#define SHN_COMMON 0xfff2
-#define SHN_HIRESERVE 0xffff
-#define SHN_MIPS_ACCOMON 0xff00
+#define SHN_UNDEF 0
+#define SHN_LORESERVE 0xff00
+#define SHN_LOPROC 0xff00
+#define SHN_HIPROC 0xff1f
+#define SHN_ABS 0xfff1
+#define SHN_COMMON 0xfff2
+#define SHN_HIRESERVE 0xffff
+#define SHN_MIPS_ACCOMON 0xff00
typedef struct elf32_shdr {
- Elf32_Word sh_name;
- Elf32_Word sh_type;
- Elf32_Word sh_flags;
- Elf32_Addr sh_addr;
- Elf32_Off sh_offset;
- Elf32_Word sh_size;
- Elf32_Word sh_link;
- Elf32_Word sh_info;
- Elf32_Word sh_addralign;
- Elf32_Word sh_entsize;
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
} Elf32_Shdr;
typedef struct elf64_shdr {
- Elf64_Word sh_name; /* Section name, index in string tbl */
- Elf64_Word sh_type; /* Type of section */
- Elf64_Xword sh_flags; /* Miscellaneous section attributes */
- Elf64_Addr sh_addr; /* Section virtual addr at execution */
- Elf64_Off sh_offset; /* Section file offset */
- Elf64_Xword sh_size; /* Size of section in bytes */
- Elf64_Word sh_link; /* Index of another section */
- Elf64_Word sh_info; /* Additional section information */
- Elf64_Xword sh_addralign; /* Section alignment */
- Elf64_Xword sh_entsize; /* Entry size if section holds table */
+ Elf64_Word sh_name; /* Section name, index in string tbl */
+ Elf64_Word sh_type; /* Type of section */
+ Elf64_Xword sh_flags; /* Miscellaneous section attributes */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Size of section in bytes */
+ Elf64_Word sh_link; /* Index of another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
} Elf64_Shdr;
-#define EI_MAG0 0 /* e_ident[] indexes */
-#define EI_MAG1 1
-#define EI_MAG2 2
-#define EI_MAG3 3
-#define EI_CLASS 4
-#define EI_DATA 5
-#define EI_VERSION 6
-#define EI_OSABI 7
-#define EI_PAD 8
+#define EI_MAG0 0 /* e_ident[] indexes */
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+#define EI_OSABI 7
+#define EI_PAD 8
#define ELFOSABI_NONE 0 /* UNIX System V ABI */
#define ELFOSABI_SYSV 0 /* Alias. */
@@ -1592,67 +1647,99 @@ typedef struct elf64_shdr {
#define ELFOSABI_MODESTO 11 /* Novell Modesto. */
#define ELFOSABI_OPENBSD 12 /* OpenBSD. */
#define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC */
+#define ELFOSABI_XTENSA_FDPIC 65 /* Xtensa FDPIC */
#define ELFOSABI_ARM 97 /* ARM */
#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
-#define ELFMAG0 0x7f /* EI_MAG */
-#define ELFMAG1 'E'
-#define ELFMAG2 'L'
-#define ELFMAG3 'F'
-#define ELFMAG "\177ELF"
-#define SELFMAG 4
+#define ELFMAG0 0x7f /* EI_MAG */
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF"
+#define SELFMAG 4
-#define ELFCLASSNONE 0 /* EI_CLASS */
-#define ELFCLASS32 1
-#define ELFCLASS64 2
-#define ELFCLASSNUM 3
+#define ELFCLASSNONE 0 /* EI_CLASS */
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+#define ELFCLASSNUM 3
-#define ELFDATANONE 0 /* e_ident[EI_DATA] */
-#define ELFDATA2LSB 1
-#define ELFDATA2MSB 2
+#define ELFDATANONE 0 /* e_ident[EI_DATA] */
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
-#define EV_NONE 0 /* e_version, EI_VERSION */
-#define EV_CURRENT 1
-#define EV_NUM 2
+#define EV_NONE 0 /* e_version, EI_VERSION */
+#define EV_CURRENT 1
+#define EV_NUM 2
/* Notes used in ET_CORE */
-#define NT_PRSTATUS 1
-#define NT_FPREGSET 2
-#define NT_PRFPREG 2
-#define NT_PRPSINFO 3
-#define NT_TASKSTRUCT 4
-#define NT_AUXV 6
-#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
-#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
-#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
-#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */
-#define NT_S390_PREFIX 0x305 /* s390 prefix register */
-#define NT_S390_CTRS 0x304 /* s390 control registers */
-#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
-#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
-#define NT_S390_TIMER 0x301 /* s390 timer register */
-#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
-#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
-#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
-#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
-#define NT_ARM_TLS 0x401 /* ARM TLS register */
-#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
-#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
-#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
+#define NT_PRSTATUS 1
+#define NT_FPREGSET 2
+#define NT_PRFPREG 2
+#define NT_PRPSINFO 3
+#define NT_TASKSTRUCT 4
+#define NT_AUXV 6
+#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
+#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
+#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
+#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
+#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
+#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */
+#define NT_S390_PREFIX 0x305 /* s390 prefix register */
+#define NT_S390_CTRS 0x304 /* s390 control registers */
+#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
+#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
+#define NT_S390_TIMER 0x301 /* s390 timer register */
+#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
+#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
+#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
+#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
+#define NT_ARM_TLS 0x401 /* ARM TLS register */
+#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
+#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
+#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
+#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */
+
+/* Defined note types for GNU systems. */
+
+#define NT_GNU_PROPERTY_TYPE_0 5 /* Program property */
+
+/* Values used in GNU .note.gnu.property notes (NT_GNU_PROPERTY_TYPE_0). */
+
+#define GNU_PROPERTY_STACK_SIZE 1
+#define GNU_PROPERTY_NO_COPY_ON_PROTECTED 2
+
+#define GNU_PROPERTY_LOPROC 0xc0000000
+#define GNU_PROPERTY_HIPROC 0xdfffffff
+#define GNU_PROPERTY_LOUSER 0xe0000000
+#define GNU_PROPERTY_HIUSER 0xffffffff
+
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1u << 0)
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1u << 1)
+/*
+ * Physical entry point into the kernel.
+ *
+ * 32bit entry point into the kernel. When requested to launch the
+ * guest kernel, use this entry point to launch the guest in 32-bit
+ * protected mode with paging disabled.
+ *
+ * [ Corresponding definition in Linux kernel: include/xen/interface/elfnote.h ]
+ */
+#define XEN_ELFNOTE_PHYS32_ENTRY 18 /* 0x12 */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
- Elf32_Word n_namesz; /* Name size */
- Elf32_Word n_descsz; /* Content size */
- Elf32_Word n_type; /* Content type */
+ Elf32_Word n_namesz; /* Name size */
+ Elf32_Word n_descsz; /* Content size */
+ Elf32_Word n_type; /* Content type */
} Elf32_Nhdr;
/* Note header in a PT_NOTE section */
typedef struct elf64_note {
- Elf64_Word n_namesz; /* Name size */
- Elf64_Word n_descsz; /* Content size */
- Elf64_Word n_type; /* Content type */
+ Elf64_Word n_namesz; /* Name size */
+ Elf64_Word n_descsz; /* Content size */
+ Elf64_Word n_type; /* Content type */
} Elf64_Nhdr;
@@ -1677,13 +1764,13 @@ struct elf32_fdpic_loadmap {
#ifdef ELF_CLASS
#if ELF_CLASS == ELFCLASS32
-#define elfhdr elf32_hdr
-#define elf_phdr elf32_phdr
-#define elf_note elf32_note
-#define elf_shdr elf32_shdr
-#define elf_sym elf32_sym
-#define elf_addr_t Elf32_Off
-#define elf_rela elf32_rela
+#define elfhdr elf32_hdr
+#define elf_phdr elf32_phdr
+#define elf_note elf32_note
+#define elf_shdr elf32_shdr
+#define elf_sym elf32_sym
+#define elf_addr_t Elf32_Off
+#define elf_rela elf32_rela
#ifdef ELF_USES_RELOCA
# define ELF_RELOC Elf32_Rela
@@ -1693,13 +1780,13 @@ struct elf32_fdpic_loadmap {
#else
-#define elfhdr elf64_hdr
-#define elf_phdr elf64_phdr
-#define elf_note elf64_note
-#define elf_shdr elf64_shdr
-#define elf_sym elf64_sym
-#define elf_addr_t Elf64_Off
-#define elf_rela elf64_rela
+#define elfhdr elf64_hdr
+#define elf_phdr elf64_phdr
+#define elf_note elf64_note
+#define elf_shdr elf64_shdr
+#define elf_sym elf64_sym
+#define elf_addr_t Elf64_Off
+#define elf_rela elf64_rela
#ifdef ELF_USES_RELOCA
# define ELF_RELOC Elf64_Rela
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
index db8bfa9a92..0d0aa61d68 100644
--- a/include/exec/address-spaces.h
+++ b/include/exec/address-spaces.h
@@ -19,8 +19,6 @@
* you're one of them.
*/
-#include "exec/memory.h"
-
#ifndef CONFIG_USER_ONLY
/* Get the root memory region. This interface should only be used temporarily
diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h
new file mode 100644
index 0000000000..e5b188cffb
--- /dev/null
+++ b/include/exec/confidential-guest-support.h
@@ -0,0 +1,94 @@
+/*
+ * QEMU Confidential Guest support
+ * This interface describes the common pieces between various
+ * schemes for protecting guest memory or other state against a
+ * compromised hypervisor. This includes memory encryption (AMD's
+ * SEV and Intel's MKTME) or special protection modes (PEF on POWER,
+ * or PV on s390x).
+ *
+ * Copyright Red Hat.
+ *
+ * Authors:
+ * David Gibson <david@gibson.dropbear.id.au>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
+#define QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include "qom/object.h"
+
+#define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support"
+OBJECT_DECLARE_TYPE(ConfidentialGuestSupport,
+ ConfidentialGuestSupportClass,
+ CONFIDENTIAL_GUEST_SUPPORT)
+
+
+struct ConfidentialGuestSupport {
+ Object parent;
+
+ /*
+ * ready: flag set by CGS initialization code once it's ready to
+ * start executing instructions in a potentially-secure
+ * guest
+ *
+ * The definition here is a bit fuzzy, because this is essentially
+ * part of a self-sanity-check, rather than a strict mechanism.
+ *
+ * It's not feasible to have a single point in the common machine
+ * init path to configure confidential guest support, because
+ * different mechanisms have different interdependencies requiring
+ * initialization in different places, often in arch or machine
+ * type specific code. It's also usually not possible to check
+ * for invalid configurations until that initialization code.
+ * That means it would be very easy to have a bug allowing CGS
+ * init to be bypassed entirely in certain configurations.
+ *
+ * Silently ignoring a requested security feature would be bad, so
+ * to avoid that we check late in init that this 'ready' flag is
+ * set if CGS was requested. If the CGS init hasn't happened, and
+ * so 'ready' is not set, we'll abort.
+ */
+ bool ready;
+};
+
+typedef struct ConfidentialGuestSupportClass {
+ ObjectClass parent;
+
+ int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp);
+ int (*kvm_reset)(ConfidentialGuestSupport *cgs, Error **errp);
+} ConfidentialGuestSupportClass;
+
+static inline int confidential_guest_kvm_init(ConfidentialGuestSupport *cgs,
+ Error **errp)
+{
+ ConfidentialGuestSupportClass *klass;
+
+ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
+ if (klass->kvm_init) {
+ return klass->kvm_init(cgs, errp);
+ }
+
+ return 0;
+}
+
+static inline int confidential_guest_kvm_reset(ConfidentialGuestSupport *cgs,
+ Error **errp)
+{
+ ConfidentialGuestSupportClass *klass;
+
+ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
+ if (klass->kvm_reset) {
+ return klass->kvm_reset(cgs, errp);
+ }
+
+ return 0;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+#endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 117d2fbbca..1a6510fd3b 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -19,95 +19,25 @@
#ifndef CPU_ALL_H
#define CPU_ALL_H
-#include "qemu-common.h"
#include "exec/cpu-common.h"
#include "exec/memory.h"
+#include "exec/tswap.h"
#include "qemu/thread.h"
-#include "qom/cpu.h"
+#include "hw/core/cpu.h"
#include "qemu/rcu.h"
-#define EXCP_INTERRUPT 0x10000 /* async interruption */
-#define EXCP_HLT 0x10001 /* hlt instruction reached */
-#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
-#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
-#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
-#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
-
/* some important defines:
*
- * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
+ * HOST_BIG_ENDIAN : whether the host cpu is big endian and
* otherwise little endian.
*
- * TARGET_WORDS_BIGENDIAN : same for target cpu
+ * TARGET_BIG_ENDIAN : same for the target cpu
*/
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
#define BSWAP_NEEDED
#endif
-#ifdef BSWAP_NEEDED
-
-static inline uint16_t tswap16(uint16_t s)
-{
- return bswap16(s);
-}
-
-static inline uint32_t tswap32(uint32_t s)
-{
- return bswap32(s);
-}
-
-static inline uint64_t tswap64(uint64_t s)
-{
- return bswap64(s);
-}
-
-static inline void tswap16s(uint16_t *s)
-{
- *s = bswap16(*s);
-}
-
-static inline void tswap32s(uint32_t *s)
-{
- *s = bswap32(*s);
-}
-
-static inline void tswap64s(uint64_t *s)
-{
- *s = bswap64(*s);
-}
-
-#else
-
-static inline uint16_t tswap16(uint16_t s)
-{
- return s;
-}
-
-static inline uint32_t tswap32(uint32_t s)
-{
- return s;
-}
-
-static inline uint64_t tswap64(uint64_t s)
-{
- return s;
-}
-
-static inline void tswap16s(uint16_t *s)
-{
-}
-
-static inline void tswap32s(uint32_t *s)
-{
-}
-
-static inline void tswap64s(uint64_t *s)
-{
-}
-
-#endif
-
#if TARGET_LONG_SIZE == 4
#define tswapl(s) tswap32(s)
#define tswapls(s) tswap32s((uint32_t *)(s))
@@ -121,18 +51,14 @@ static inline void tswap64s(uint64_t *s)
/* Target-endianness CPU memory access functions. These fit into the
* {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
*/
-#if defined(TARGET_WORDS_BIGENDIAN)
+#if TARGET_BIG_ENDIAN
#define lduw_p(p) lduw_be_p(p)
#define ldsw_p(p) ldsw_be_p(p)
#define ldl_p(p) ldl_be_p(p)
#define ldq_p(p) ldq_be_p(p)
-#define ldfl_p(p) ldfl_be_p(p)
-#define ldfq_p(p) ldfq_be_p(p)
#define stw_p(p, v) stw_be_p(p, v)
#define stl_p(p, v) stl_be_p(p, v)
#define stq_p(p, v) stq_be_p(p, v)
-#define stfl_p(p, v) stfl_be_p(p, v)
-#define stfq_p(p, v) stfq_be_p(p, v)
#define ldn_p(p, sz) ldn_be_p(p, sz)
#define stn_p(p, sz, v) stn_be_p(p, sz, v)
#else
@@ -140,13 +66,9 @@ static inline void tswap64s(uint64_t *s)
#define ldsw_p(p) ldsw_le_p(p)
#define ldl_p(p) ldl_le_p(p)
#define ldq_p(p) ldq_le_p(p)
-#define ldfl_p(p) ldfl_le_p(p)
-#define ldfq_p(p) ldfq_le_p(p)
#define stw_p(p, v) stw_le_p(p, v)
#define stl_p(p, v) stl_le_p(p, v)
#define stq_p(p, v) stq_le_p(p, v)
-#define stfl_p(p, v) stfl_le_p(p, v)
-#define stfq_p(p, v) stfq_le_p(p, v)
#define ldn_p(p, sz) ldn_le_p(p, sz)
#define stn_p(p, sz, v) stn_le_p(p, sz, v)
#endif
@@ -155,20 +77,39 @@ static inline void tswap64s(uint64_t *s)
#if defined(CONFIG_USER_ONLY)
#include "exec/user/abitypes.h"
+#include "exec/user/guest-base.h"
+
+extern bool have_guest_base;
-/* On some host systems the guest address space is reserved on the host.
- * This allows the guest address space to be offset to a convenient location.
+/*
+ * If non-zero, the guest virtual address space is a contiguous subset
+ * of the host virtual address space, i.e. '-R reserved_va' is in effect
+ * either from the command-line or by default. The value is the last
+ * byte of the guest address space e.g. UINT32_MAX.
+ *
+ * If zero, the host and guest virtual address spaces are intermingled.
*/
-extern unsigned long guest_base;
-extern int have_guest_base;
extern unsigned long reserved_va;
-#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
-#define GUEST_ADDR_MAX (~0ul)
-#else
-#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : \
- (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
-#endif
+/*
+ * Limit the guest addresses as best we can.
+ *
+ * When not using -R reserved_va, we cannot really limit the guest
+ * to less address space than the host. For 32-bit guests, this
+ * acts as a sanity check that we're not giving the guest an address
+ * that it cannot even represent. For 64-bit guests... the address
+ * might not be what the real kernel would give, but it is at least
+ * representable in the guest.
+ *
+ * TODO: Improve address allocation to avoid this problem, and to
+ * avoid setting bits at the top of guest addresses that might need
+ * to be used for tags.
+ */
+#define GUEST_ADDR_MAX_ \
+ ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
+ UINT32_MAX : ~0ul)
+#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
+
#else
#include "exec/hwaddr.h"
@@ -177,13 +118,13 @@ extern unsigned long reserved_va;
#define ARG1 as
#define ARG1_DECL AddressSpace *as
#define TARGET_ENDIANNESS
-#include "exec/memory_ldst.inc.h"
+#include "exec/memory_ldst.h.inc"
#define SUFFIX _cached_slow
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
#define TARGET_ENDIANNESS
-#include "exec/memory_ldst.inc.h"
+#include "exec/memory_ldst.h.inc"
static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
{
@@ -195,60 +136,50 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
#define ARG1 as
#define ARG1_DECL AddressSpace *as
#define TARGET_ENDIANNESS
-#include "exec/memory_ldst_phys.inc.h"
+#include "exec/memory_ldst_phys.h.inc"
/* Inline fast path for direct RAM access. */
#define ENDIANNESS
-#include "exec/memory_ldst_cached.inc.h"
+#include "exec/memory_ldst_cached.h.inc"
#define SUFFIX _cached
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
#define TARGET_ENDIANNESS
-#include "exec/memory_ldst_phys.inc.h"
+#include "exec/memory_ldst_phys.h.inc"
#endif
/* page related stuff */
#ifdef TARGET_PAGE_BITS_VARY
-extern bool target_page_bits_decided;
-extern int target_page_bits;
-#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \
- target_page_bits; })
+# include "exec/page-vary.h"
+extern const TargetPageBits target_page;
+#ifdef CONFIG_DEBUG_TCG
+#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
+#define TARGET_PAGE_MASK ({ assert(target_page.decided); \
+ (target_long)target_page.mask; })
+#else
+#define TARGET_PAGE_BITS target_page.bits
+#define TARGET_PAGE_MASK ((target_long)target_page.mask)
+#endif
+#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
#else
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#endif
-#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
-#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
-#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
+#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
-/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
- * when intptr_t is 32-bit and we are aligning a long long.
- */
-extern uintptr_t qemu_host_page_size;
-extern intptr_t qemu_host_page_mask;
-
-#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
-#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
- qemu_real_host_page_mask)
-
-/* same as PROT_xxx */
-#define PAGE_READ 0x0001
-#define PAGE_WRITE 0x0002
-#define PAGE_EXEC 0x0004
-#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
-#define PAGE_VALID 0x0008
-/* original state of the write flag (used when tracking self-modifying
- code */
-#define PAGE_WRITE_ORG 0x0010
-/* Invalidate the TLB entry immediately, helpful for s390x
- * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
-#define PAGE_WRITE_INV 0x0040
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
/* FIXME: Code that sets/uses this is broken and needs to go away. */
-#define PAGE_RESERVED 0x0020
+#define PAGE_RESERVED 0x0100
#endif
+/*
+ * For linux-user, indicates that the page is mapped with the same semantics
+ * in both guest and host.
+ */
+#define PAGE_PASSTHROUGH 0x0800
#if defined(CONFIG_USER_ONLY)
void page_dump(FILE *f);
@@ -258,8 +189,61 @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong,
int walk_memory_regions(void *, walk_memory_regions_fn);
int page_get_flags(target_ulong address);
-void page_set_flags(target_ulong start, target_ulong end, int flags);
-int page_check_range(target_ulong start, target_ulong len, int flags);
+void page_set_flags(target_ulong start, target_ulong last, int flags);
+void page_reset_target_data(target_ulong start, target_ulong last);
+
+/**
+ * page_check_range
+ * @start: first byte of range
+ * @len: length of range
+ * @flags: flags required for each page
+ *
+ * Return true if every page in [@start, @start+@len) has @flags set.
+ * Return false if any page is unmapped. Thus testing flags == 0 is
+ * equivalent to testing for flags == PAGE_VALID.
+ */
+bool page_check_range(target_ulong start, target_ulong last, int flags);
+
+/**
+ * page_check_range_empty:
+ * @start: first byte of range
+ * @last: last byte of range
+ * Context: holding mmap lock
+ *
+ * Return true if the entire range [@start, @last] is unmapped.
+ * The memory lock must be held so that the caller will can ensure
+ * the result stays true until a new mapping can be installed.
+ */
+bool page_check_range_empty(target_ulong start, target_ulong last);
+
+/**
+ * page_find_range_empty
+ * @min: first byte of search range
+ * @max: last byte of search range
+ * @len: size of the hole required
+ * @align: alignment of the hole required (power of 2)
+ *
+ * If there is a range [x, x+@len) within [@min, @max] such that
+ * x % @align == 0, then return x. Otherwise return -1.
+ * The memory lock must be held, as the caller will want to ensure
+ * the returned range stays empty until a new mapping can be installed.
+ */
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+ target_ulong len, target_ulong align);
+
+/**
+ * page_get_target_data(address)
+ * @address: guest virtual address
+ *
+ * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
+ * with the guest page at @address, allocating it if necessary. The
+ * caller should already have verified that the address is valid.
+ *
+ * The memory will be freed when the guest page is deallocated,
+ * e.g. with the munmap system call.
+ */
+void *page_get_target_data(target_ulong address)
+ __attribute__((returns_nonnull));
#endif
CPUArchState *cpu_copy(CPUArchState *env);
@@ -316,28 +300,70 @@ CPUArchState *cpu_copy(CPUArchState *env);
| CPU_INTERRUPT_TGT_EXT_3 \
| CPU_INTERRUPT_TGT_EXT_4)
-#if !defined(CONFIG_USER_ONLY)
+#ifdef CONFIG_USER_ONLY
+
+/*
+ * Allow some level of source compatibility with softmmu. We do not
+ * support any of the more exotic features, so only invalid pages may
+ * be signaled by probe_access_flags().
+ */
+#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
+#define TLB_WATCHPOINT 0
+
+static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ return MMU_USER_IDX;
+}
+#else
-/* Flags stored in the low bits of the TLB virtual address. These are
- * defined so that fast path ram access is all zeros.
+/*
+ * Flags stored in the low bits of the TLB virtual address.
+ * These are defined so that fast path ram access is all zeros.
* The flags all must be between TARGET_PAGE_BITS and
* maximum address alignment bit.
+ *
+ * Use TARGET_PAGE_BITS_MIN so that these bits are constant
+ * when TARGET_PAGE_BITS_VARY is in effect.
+ *
+ * The count, if not the placement of these bits is known
+ * to tcg/tcg-op-ldst.c, check_max_alignment().
*/
/* Zero if TLB entry is valid. */
-#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
+#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
/* Set if TLB entry references a clean RAM page. The iotlb entry will
contain the page physical address. */
-#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
+#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
/* Set if TLB entry is an IO callback. */
-#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
-/* Set if TLB entry must have MMU lookup repeated for every access */
-#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4))
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
+/* Set if TLB entry writes ignored. */
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
+/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
+#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
-/* Use this mask to check interception with an alignment mask
+/*
+ * Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
-#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_RECHECK)
+#define TLB_FLAGS_MASK \
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
+ | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
+
+/*
+ * Flags stored in CPUTLBEntryFull.slow_flags[x].
+ * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
+ */
+/* Set if TLB entry requires byte swap. */
+#define TLB_BSWAP (1 << 0)
+/* Set if TLB entry contains a watchpoint. */
+#define TLB_WATCHPOINT (1 << 1)
+/* Set if TLB entry requires aligned accesses. */
+#define TLB_CHECK_ALIGNED (1 << 2)
+
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
+
+/* The two sets of flags must not overlap. */
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
@@ -346,7 +372,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
* @addr: virtual address to test (must be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
-static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
+static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
{
return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
}
@@ -357,18 +383,15 @@ static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
* @addr: virtual address to test (need not be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
-static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
+static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
{
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
}
-void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
-void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
#endif /* !CONFIG_USER_ONLY */
-int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
- uint8_t *buf, int len, int is_write);
-
-int cpu_exec(CPUState *cpu);
+/* Validate correct placement of CPUArchState. */
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 2ad2d6d86b..6d5318895a 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -3,32 +3,34 @@
/* CPU interfaces that are target independent. */
+#include "exec/vaddr.h"
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
#endif
+#include "hw/core/cpu.h"
+#include "tcg/debug-assert.h"
-#include "qemu/bswap.h"
-#include "qemu/queue.h"
-#include "qemu/fprintf-fn.h"
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_HLT 0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
+#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
-/**
- * CPUListState:
- * @cpu_fprintf: Print function.
- * @file: File to print to using @cpu_fprint.
- *
- * State commonly used for iterating over CPU models.
- */
-typedef struct CPUListState {
- fprintf_function cpu_fprintf;
- FILE *file;
-} CPUListState;
+void cpu_exec_init_all(void);
+void cpu_exec_step_atomic(CPUState *cpu);
+
+#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size())
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
+extern QemuMutex qemu_cpu_list_lock;
void qemu_init_cpu_list(void);
void cpu_list_lock(void);
void cpu_list_unlock(void);
+unsigned int cpu_list_generation_id_get(void);
-void tcg_flush_softmmu_tlb(CPUState *cs);
+void tcg_iommu_init_notifier_list(CPUState *cpu);
+void tcg_iommu_free_notifier_list(CPUState *cpu);
#if !defined(CONFIG_USER_ONLY)
@@ -38,7 +40,7 @@ enum device_endian {
DEVICE_LITTLE_ENDIAN,
};
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
#else
#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
@@ -55,50 +57,90 @@ typedef uintptr_t ram_addr_t;
# define RAM_ADDR_FMT "%" PRIxPTR
#endif
-extern ram_addr_t ram_size;
-
/* memory API */
-typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
-typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
-
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
ram_addr_t qemu_ram_addr_from_host(void *ptr);
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
RAMBlock *qemu_ram_block_by_name(const char *name);
+
+/*
+ * Translates a host ptr back to a RAMBlock and an offset in that RAMBlock.
+ *
+ * @ptr: The host pointer to translate.
+ * @round_offset: Whether to round the result offset down to a target page
+ * @offset: Will be set to the offset within the returned RAMBlock.
+ *
+ * Returns: RAMBlock (or NULL if not found)
+ *
+ * By the time this function returns, the returned pointer is not protected
+ * by RCU anymore. If the caller is not within an RCU critical section and
+ * does not hold the BQL, it must have other means of protecting the
+ * pointer, such as a reference to the memory region that owns the RAMBlock.
+ */
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
ram_addr_t *offset);
ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host);
void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev);
void qemu_ram_unset_idstr(RAMBlock *block);
const char *qemu_ram_get_idstr(RAMBlock *rb);
+void *qemu_ram_get_host_addr(RAMBlock *rb);
+ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
+ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
+ram_addr_t qemu_ram_get_max_length(RAMBlock *rb);
bool qemu_ram_is_shared(RAMBlock *rb);
+bool qemu_ram_is_noreserve(RAMBlock *rb);
bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
void qemu_ram_set_uf_zeroable(RAMBlock *rb);
bool qemu_ram_is_migratable(RAMBlock *rb);
void qemu_ram_set_migratable(RAMBlock *rb);
void qemu_ram_unset_migratable(RAMBlock *rb);
+bool qemu_ram_is_named_file(RAMBlock *rb);
+int qemu_ram_get_fd(RAMBlock *rb);
size_t qemu_ram_pagesize(RAMBlock *block);
size_t qemu_ram_pagesize_largest(void);
-void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
- int len, int is_write);
+/**
+ * cpu_address_space_init:
+ * @cpu: CPU to add this address space to
+ * @asidx: integer index of this address space
+ * @prefix: prefix to be used as name of address space
+ * @mr: the root memory region of address space
+ *
+ * Add the specified address space to the CPU's cpu_ases list.
+ * The address space added with @asidx 0 is the one used for the
+ * convenience pointer cpu->as.
+ * The target-specific code which registers ASes is responsible
+ * for defining what semantics address space 0, 1, 2, etc have.
+ *
+ * Before the first call to this function, the caller must set
+ * cpu->num_ases to the total number of address spaces it needs
+ * to support.
+ *
+ * Note that with KVM only one address space is supported.
+ */
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr);
+
+void cpu_physical_memory_rw(hwaddr addr, void *buf,
+ hwaddr len, bool is_write);
static inline void cpu_physical_memory_read(hwaddr addr,
- void *buf, int len)
+ void *buf, hwaddr len)
{
- cpu_physical_memory_rw(addr, buf, len, 0);
+ cpu_physical_memory_rw(addr, buf, len, false);
}
static inline void cpu_physical_memory_write(hwaddr addr,
- const void *buf, int len)
+ const void *buf, hwaddr len)
{
- cpu_physical_memory_rw(addr, (void *)buf, len, 1);
+ cpu_physical_memory_rw(addr, (void *)buf, len, true);
}
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
- int is_write);
+ bool is_write);
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
- int is_write, hwaddr access_len);
+ bool is_write, hwaddr access_len);
void cpu_register_map_client(QEMUBH *bh);
void cpu_unregister_map_client(QEMUBH *bh);
@@ -111,18 +153,129 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr);
*/
void qemu_flush_coalesced_mmio_buffer(void);
-void cpu_flush_icache_range(hwaddr start, int len);
-
-extern struct MemoryRegion io_mem_rom;
-extern struct MemoryRegion io_mem_notdirty;
+void cpu_flush_icache_range(hwaddr start, hwaddr len);
-typedef int (RAMBlockIterFunc)(const char *block_name, void *host_addr,
- ram_addr_t offset, ram_addr_t length, void *opaque);
+typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
-int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque);
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
+int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
+ size_t length);
#endif
+/* Returns: 0 on success, -1 on error */
+int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+ void *ptr, size_t len, bool is_write);
+
+/* vl.c */
+void list_cpus(void);
+
+#ifdef CONFIG_TCG
+/**
+ * cpu_unwind_state_data:
+ * @cpu: the cpu context
+ * @host_pc: the host pc within the translation
+ * @data: output data
+ *
+ * Attempt to load the the unwind state for a host pc occurring in
+ * translated code. If @host_pc is not in translated code, the
+ * function returns false; otherwise @data is loaded.
+ * This is the same unwind info as given to restore_state_to_opc.
+ */
+bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
+
+/**
+ * cpu_restore_state:
+ * @cpu: the cpu context
+ * @host_pc: the host pc within the translation
+ * @return: true if state was restored, false otherwise
+ *
+ * Attempt to restore the state for a fault occurring in translated
+ * code. If @host_pc is not in translated code no state is
+ * restored and the function returns false.
+ */
+bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
+
+G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
+G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
+#endif /* CONFIG_TCG */
+G_NORETURN void cpu_loop_exit(CPUState *cpu);
+G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
+
+/* same as PROT_xxx */
+#define PAGE_READ 0x0001
+#define PAGE_WRITE 0x0002
+#define PAGE_EXEC 0x0004
+#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_VALID 0x0008
+/*
+ * Original state of the write flag (used when tracking self-modifying code)
+ */
+#define PAGE_WRITE_ORG 0x0010
+/*
+ * Invalidate the TLB entry immediately, helpful for s390x
+ * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
+ */
+#define PAGE_WRITE_INV 0x0020
+/* For use with page_set_flags: page is being replaced; target_data cleared. */
+#define PAGE_RESET 0x0040
+/* For linux-user, indicates that the page is MAP_ANON. */
+#define PAGE_ANON 0x0080
+
+/* Target-specific bits that will be used via page_get_flags(). */
+#define PAGE_TARGET_1 0x0200
+#define PAGE_TARGET_2 0x0400
+
+/*
+ * For linux-user, indicates that the page is mapped with the same semantics
+ * in both guest and host.
+ */
+#define PAGE_PASSTHROUGH 0x0800
+
+/* accel/tcg/cpu-exec.c */
+int cpu_exec(CPUState *cpu);
+
+/**
+ * env_archcpu(env)
+ * @env: The architecture environment
+ *
+ * Return the ArchCPU associated with the environment.
+ */
+static inline ArchCPU *env_archcpu(CPUArchState *env)
+{
+ return (void *)env - sizeof(CPUState);
+}
+
+/**
+ * env_cpu(env)
+ * @env: The architecture environment
+ *
+ * Return the CPUState associated with the environment.
+ */
+static inline CPUState *env_cpu(CPUArchState *env)
+{
+ return (void *)env - sizeof(CPUState);
+}
+
+#ifndef CONFIG_USER_ONLY
+/**
+ * cpu_mmu_index:
+ * @env: The cpu environment
+ * @ifetch: True for code access, false for data access.
+ *
+ * Return the core mmu index for the current translation regime.
+ * This function is used by generic TCG code paths.
+ *
+ * The user-only version of this function is inline in cpu-all.h,
+ * where it always returns MMU_USER_IDX.
+ */
+static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ int ret = cs->cc->mmu_index(cs, ifetch);
+ tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES);
+ return ret;
+}
+#endif /* !CONFIG_USER_ONLY */
+
#endif /* CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 6a60f94a41..3915438b83 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -25,179 +25,59 @@
#include "qemu/host-utils.h"
#include "qemu/thread.h"
-#include "qemu/queue.h"
-#ifdef CONFIG_TCG
-#include "tcg-target.h"
-#endif
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
#endif
#include "exec/memattrs.h"
+#include "hw/core/cpu.h"
+
+#include "cpu-param.h"
#ifndef TARGET_LONG_BITS
-#error TARGET_LONG_BITS must be defined before including this header
+# error TARGET_LONG_BITS must be defined in cpu-param.h
#endif
-
-#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
-
-/* target_ulong is the type of a virtual address */
-#if TARGET_LONG_SIZE == 4
-typedef int32_t target_long;
-typedef uint32_t target_ulong;
-#define TARGET_FMT_lx "%08x"
-#define TARGET_FMT_ld "%d"
-#define TARGET_FMT_lu "%u"
-#elif TARGET_LONG_SIZE == 8
-typedef int64_t target_long;
-typedef uint64_t target_ulong;
-#define TARGET_FMT_lx "%016" PRIx64
-#define TARGET_FMT_ld "%" PRId64
-#define TARGET_FMT_lu "%" PRIu64
-#else
-#error TARGET_LONG_SIZE undefined
+#ifndef TARGET_PHYS_ADDR_SPACE_BITS
+# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
-
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
-/* use a fully associative victim tlb of 8 entries */
-#define CPU_VTLB_SIZE 8
-
-#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
-#define CPU_TLB_ENTRY_BITS 4
-#else
-#define CPU_TLB_ENTRY_BITS 5
+#ifndef TARGET_VIRT_ADDR_SPACE_BITS
+# error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h
+#endif
+#ifndef TARGET_PAGE_BITS
+# ifdef TARGET_PAGE_BITS_VARY
+# ifndef TARGET_PAGE_BITS_MIN
+# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h
+# endif
+# else
+# error TARGET_PAGE_BITS must be defined in cpu-param.h
+# endif
#endif
-/* TCG_TARGET_TLB_DISPLACEMENT_BITS is used in CPU_TLB_BITS to ensure that
- * the TLB is not unnecessarily small, but still small enough for the
- * TLB lookup instruction sequence used by the TCG target.
- *
- * TCG will have to generate an operand as large as the distance between
- * env and the tlb_table[NB_MMU_MODES - 1][0].addend. For simplicity,
- * the TCG targets just round everything up to the next power of two, and
- * count bits. This works because: 1) the size of each TLB is a largish
- * power of two, 2) and because the limit of the displacement is really close
- * to a power of two, 3) the offset of tlb_table[0][0] inside env is smaller
- * than the size of a TLB.
- *
- * For example, the maximum displacement 0xFFF0 on PPC and MIPS, but TCG
- * just says "the displacement is 16 bits". TCG_TARGET_TLB_DISPLACEMENT_BITS
- * then ensures that tlb_table at least 0x8000 bytes large ("not unnecessarily
- * small": 2^15). The operand then will come up smaller than 0xFFF0 without
- * any particular care, because the TLB for a single MMU mode is larger than
- * 0x10000-0xFFF0=16 bytes. In the end, the maximum value of the operand
- * could be something like 0xC000 (the offset of the last TLB table) plus
- * 0x18 (the offset of the addend field in each TLB entry) plus the offset
- * of tlb_table inside env (which is non-trivial but not huge).
- */
-#define CPU_TLB_BITS \
- MIN(8, \
- TCG_TARGET_TLB_DISPLACEMENT_BITS - CPU_TLB_ENTRY_BITS - \
- (NB_MMU_MODES <= 1 ? 0 : \
- NB_MMU_MODES <= 2 ? 1 : \
- NB_MMU_MODES <= 4 ? 2 : \
- NB_MMU_MODES <= 8 ? 3 : 4))
-
-#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
-
-typedef struct CPUTLBEntry {
- /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
- bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
- go directly to ram.
- bit 3 : indicates that the entry is invalid
- bit 2..0 : zero
- */
- union {
- struct {
- target_ulong addr_read;
- target_ulong addr_write;
- target_ulong addr_code;
- /* Addend to virtual address to get host address. IO accesses
- use the corresponding iotlb value. */
- uintptr_t addend;
- };
- /* padding to get a power of two size */
- uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
- };
-} CPUTLBEntry;
-
-QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
-
-/* The IOTLB is not accessed directly inline by generated TCG code,
- * so the CPUIOTLBEntry layout is not as critical as that of the
- * CPUTLBEntry. (This is also why we don't want to combine the two
- * structs into one.)
- */
-typedef struct CPUIOTLBEntry {
- /*
- * @addr contains:
- * - in the lower TARGET_PAGE_BITS, a physical section number
- * - with the lower TARGET_PAGE_BITS masked off, an offset which
- * must be added to the virtual address to obtain:
- * + the ram_addr_t of the target RAM (if the physical section
- * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
- * + the offset within the target MemoryRegion (otherwise)
- */
- hwaddr addr;
- MemTxAttrs attrs;
-} CPUIOTLBEntry;
-
-typedef struct CPUTLBDesc {
- /*
- * Describe a region covering all of the large pages allocated
- * into the tlb. When any page within this region is flushed,
- * we must flush the entire tlb. The region is matched if
- * (addr & large_page_mask) == large_page_addr.
- */
- target_ulong large_page_addr;
- target_ulong large_page_mask;
- /* The next index to use in the tlb victim table. */
- size_t vindex;
-} CPUTLBDesc;
+#include "exec/target_long.h"
-/*
- * Data elements that are shared between all MMU modes.
- */
-typedef struct CPUTLBCommon {
- /* Serialize updates to tlb_table and tlb_v_table, and others as noted. */
- QemuSpin lock;
- /*
- * Within dirty, for each bit N, modifications have been made to
- * mmu_idx N since the last time that mmu_idx was flushed.
- * Protected by tlb_c.lock.
- */
- uint16_t dirty;
- /*
- * Statistics. These are not lock protected, but are read and
- * written atomically. This allows the monitor to print a snapshot
- * of the stats without interfering with the cpu.
- */
- size_t full_flush_count;
- size_t part_flush_count;
- size_t elide_flush_count;
-} CPUTLBCommon;
+#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG)
+#define CPU_TLB_DYN_MIN_BITS 6
+#define CPU_TLB_DYN_DEFAULT_BITS 8
+# if HOST_LONG_BITS == 32
+/* Make sure we do not require a double-word shift for the TLB load */
+# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
+# else /* HOST_LONG_BITS == 64 */
/*
- * The meaning of each of the MMU modes is defined in the target code.
- * Note that NB_MMU_MODES is not yet defined; we can only reference it
- * within preprocessor defines that will be expanded later.
+ * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
+ * 2**34 == 16G of address space. This is roughly what one would expect a
+ * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
+ * Skylake's Level-2 STLB has 16 1G entries.
+ * Also, make sure we do not size the TLB past the guest's address space.
*/
-#define CPU_COMMON_TLB \
- CPUTLBCommon tlb_c; \
- CPUTLBDesc tlb_d[NB_MMU_MODES]; \
- CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
- CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
- CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
- CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE];
-
-#else
-
-#define CPU_COMMON_TLB
-
-#endif
-
-
-#define CPU_COMMON \
- /* soft mmu support */ \
- CPU_COMMON_TLB \
+# ifdef TARGET_PAGE_BITS_VARY
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# else
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# endif
+# endif
+
+#endif /* CONFIG_SOFTMMU && CONFIG_TCG */
#endif
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index 959068495a..eb8f3f0595 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -4,7 +4,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -25,9 +25,15 @@
*
* The syntax for the accessors is:
*
- * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
+ * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
+ * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
+ * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
*
- * store: cpu_st{sign}{size}_{mmusuffix}(env, ptr, val)
+ * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
+ * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
+ * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
*
* sign is:
* (empty): for 32 and 64 bit sizes
@@ -40,13 +46,26 @@
* l: 32 bits
* q: 64 bits
*
- * mmusuffix is one of the generic suffixes "data" or "code", or
- * (for softmmu configs) a target-specific MMU mode suffix as defined
- * in target cpu.h.
+ * end is:
+ * (empty): for target native endian, or for 8 bit access
+ * _be: for forced big endian
+ * _le: for forced little endian
+ *
+ * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
+ * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
+ * the index to use; the "data" and "code" suffixes take the index from
+ * cpu_mmu_index().
+ *
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
+ * MemOp including alignment requirements. The alignment will be enforced.
*/
#ifndef CPU_LDST_H
#define CPU_LDST_H
+#include "exec/memopidx.h"
+#include "qemu/int128.h"
+#include "cpu.h"
+
#if defined(CONFIG_USER_ONLY)
/* sparc32plus has 64bit long but 32bit space address
* this can make bad result with g2h() and h2g()
@@ -59,19 +78,40 @@ typedef uint64_t abi_ptr;
#define TARGET_ABI_FMT_ptr "%"PRIx64
#endif
+#ifndef TARGET_TAGGED_ADDRESSES
+static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
+{
+ return x;
+}
+#endif
+
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
-#define g2h(x) ((void *)((unsigned long)(abi_ptr)(x) + guest_base))
+static inline void *g2h_untagged(abi_ptr x)
+{
+ return (void *)((uintptr_t)(x) + guest_base);
+}
-#define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX)
-#define h2g_valid(x) guest_addr_valid((unsigned long)(x) - guest_base)
+static inline void *g2h(CPUState *cs, abi_ptr x)
+{
+ return g2h_untagged(cpu_untagged_addr(cs, x));
+}
-static inline int guest_range_valid(unsigned long start, unsigned long len)
+static inline bool guest_addr_valid_untagged(abi_ulong x)
+{
+ return x <= GUEST_ADDR_MAX;
+}
+
+static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
{
return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
}
+#define h2g_valid(x) \
+ (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
+ (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
+
#define h2g_nocheck(x) ({ \
- unsigned long __ret = (unsigned long)(x) - guest_base; \
+ uintptr_t __ret = (uintptr_t)(x) - guest_base; \
(abi_ptr)__ret; \
})
@@ -81,342 +121,332 @@ static inline int guest_range_valid(unsigned long start, unsigned long len)
h2g_nocheck(x); \
})
#else
-typedef target_ulong abi_ptr;
-#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
+typedef vaddr abi_ptr;
+#define TARGET_ABI_FMT_ptr VADDR_PRIx
#endif
-#if defined(CONFIG_USER_ONLY)
-
-extern __thread uintptr_t helper_retaddr;
-
-/* In user-only mode we provide only the _code and _data accessors. */
-
-#define MEMSUFFIX _data
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_useronly_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_useronly_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_useronly_template.h"
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
+int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
+uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
+int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
+uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
+uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
+uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
+int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
+uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
+uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
+
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+
+void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
+void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
+
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint64_t val, uintptr_t ra);
+void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint64_t val, uintptr_t ra);
+
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ int mmu_idx, uintptr_t ra);
+
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra);
+
+void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+ MemOpIdx oi, uintptr_t ra);
+
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
+ (CPUArchState *env, abi_ptr addr, TYPE val, \
+ MemOpIdx oi, uintptr_t retaddr);
+
+#ifdef CONFIG_ATOMIC64
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
+#else
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
+#endif
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_useronly_template.h"
-#undef MEMSUFFIX
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
-#define MEMSUFFIX _code
-#define CODE_ACCESS
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_useronly_template.h"
+#if defined(CONFIG_USER_ONLY)
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_useronly_template.h"
+extern __thread uintptr_t helper_retaddr;
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_useronly_template.h"
+static inline void set_helper_retaddr(uintptr_t ra)
+{
+ helper_retaddr = ra;
+ /*
+ * Ensure that this write is visible to the SIGSEGV handler that
+ * may be invoked due to a subsequent invalid memory operation.
+ */
+ signal_barrier();
+}
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_useronly_template.h"
-#undef MEMSUFFIX
-#undef CODE_ACCESS
+static inline void clear_helper_retaddr(void)
+{
+ /*
+ * Ensure that previous memory operations have succeeded before
+ * removing the data visible to the signal handler.
+ */
+ signal_barrier();
+ helper_retaddr = 0;
+}
#else
-/* The memory helpers for tcg-generated code need tcg_target_long etc. */
-#include "tcg.h"
+#include "tcg/oversized-guest.h"
-static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
+static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
+ MMUAccessType access_type)
{
-#if TCG_OVERSIZED_GUEST
- return entry->addr_write;
+ /* Do not rearrange the CPUTLBEntry structure members. */
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
+ MMU_DATA_LOAD * sizeof(uint64_t));
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
+ MMU_DATA_STORE * sizeof(uint64_t));
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
+ MMU_INST_FETCH * sizeof(uint64_t));
+
+#if TARGET_LONG_BITS == 32
+ /* Use qatomic_read, in case of addr_write; only care about low bits. */
+ const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
+ ptr += HOST_BIG_ENDIAN;
+ return qatomic_read(ptr);
#else
- return atomic_read(&entry->addr_write);
+ const uint64_t *ptr = &entry->addr_idx[access_type];
+# if TCG_OVERSIZED_GUEST
+ return *ptr;
+# else
+ /* ofs might correspond to .addr_write, so use qatomic_read */
+ return qatomic_read(ptr);
+# endif
#endif
}
+static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
+{
+ return tlb_read_idx(entry, MMU_DATA_STORE);
+}
+
/* Find the TLB index corresponding to the mmu_idx + address pair. */
-static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
- target_ulong addr)
+static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
+ vaddr addr)
{
- return (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
+
+ return (addr >> TARGET_PAGE_BITS) & size_mask;
}
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
-static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
- target_ulong addr)
+static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
+ vaddr addr)
{
- return &env->tlb_table[mmu_idx][tlb_index(env, mmu_idx, addr)];
+ return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
}
-#ifdef MMU_MODE0_SUFFIX
-#define CPU_MMU_INDEX 0
-#define MEMSUFFIX MMU_MODE0_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif
-
-#if (NB_MMU_MODES >= 2) && defined(MMU_MODE1_SUFFIX)
-#define CPU_MMU_INDEX 1
-#define MEMSUFFIX MMU_MODE1_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
+#endif /* defined(CONFIG_USER_ONLY) */
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
+#if TARGET_BIG_ENDIAN
+# define cpu_lduw_data cpu_lduw_be_data
+# define cpu_ldsw_data cpu_ldsw_be_data
+# define cpu_ldl_data cpu_ldl_be_data
+# define cpu_ldq_data cpu_ldq_be_data
+# define cpu_lduw_data_ra cpu_lduw_be_data_ra
+# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
+# define cpu_ldl_data_ra cpu_ldl_be_data_ra
+# define cpu_ldq_data_ra cpu_ldq_be_data_ra
+# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
+# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
+# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
+# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
+# define cpu_stw_data cpu_stw_be_data
+# define cpu_stl_data cpu_stl_be_data
+# define cpu_stq_data cpu_stq_be_data
+# define cpu_stw_data_ra cpu_stw_be_data_ra
+# define cpu_stl_data_ra cpu_stl_be_data_ra
+# define cpu_stq_data_ra cpu_stq_be_data_ra
+# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
+# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
+# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
+#else
+# define cpu_lduw_data cpu_lduw_le_data
+# define cpu_ldsw_data cpu_ldsw_le_data
+# define cpu_ldl_data cpu_ldl_le_data
+# define cpu_ldq_data cpu_ldq_le_data
+# define cpu_lduw_data_ra cpu_lduw_le_data_ra
+# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
+# define cpu_ldl_data_ra cpu_ldl_le_data_ra
+# define cpu_ldq_data_ra cpu_ldq_le_data_ra
+# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
+# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
+# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
+# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
+# define cpu_stw_data cpu_stw_le_data
+# define cpu_stl_data cpu_stl_le_data
+# define cpu_stq_data cpu_stq_le_data
+# define cpu_stw_data_ra cpu_stw_le_data_ra
+# define cpu_stl_data_ra cpu_stl_le_data_ra
+# define cpu_stq_data_ra cpu_stq_le_data_ra
+# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
+# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
+# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
#endif
-#if (NB_MMU_MODES >= 3) && defined(MMU_MODE2_SUFFIX)
-
-#define CPU_MMU_INDEX 2
-#define MEMSUFFIX MMU_MODE2_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 3) */
-
-#if (NB_MMU_MODES >= 4) && defined(MMU_MODE3_SUFFIX)
-
-#define CPU_MMU_INDEX 3
-#define MEMSUFFIX MMU_MODE3_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 4) */
-
-#if (NB_MMU_MODES >= 5) && defined(MMU_MODE4_SUFFIX)
-
-#define CPU_MMU_INDEX 4
-#define MEMSUFFIX MMU_MODE4_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 5) */
-
-#if (NB_MMU_MODES >= 6) && defined(MMU_MODE5_SUFFIX)
-
-#define CPU_MMU_INDEX 5
-#define MEMSUFFIX MMU_MODE5_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 6) */
-
-#if (NB_MMU_MODES >= 7) && defined(MMU_MODE6_SUFFIX)
-
-#define CPU_MMU_INDEX 6
-#define MEMSUFFIX MMU_MODE6_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 7) */
-
-#if (NB_MMU_MODES >= 8) && defined(MMU_MODE7_SUFFIX)
-
-#define CPU_MMU_INDEX 7
-#define MEMSUFFIX MMU_MODE7_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 8) */
-
-#if (NB_MMU_MODES >= 9) && defined(MMU_MODE8_SUFFIX)
-
-#define CPU_MMU_INDEX 8
-#define MEMSUFFIX MMU_MODE8_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 9) */
-
-#if (NB_MMU_MODES >= 10) && defined(MMU_MODE9_SUFFIX)
-
-#define CPU_MMU_INDEX 9
-#define MEMSUFFIX MMU_MODE9_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 10) */
-
-#if (NB_MMU_MODES >= 11) && defined(MMU_MODE10_SUFFIX)
-
-#define CPU_MMU_INDEX 10
-#define MEMSUFFIX MMU_MODE10_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 11) */
-
-#if (NB_MMU_MODES >= 12) && defined(MMU_MODE11_SUFFIX)
-
-#define CPU_MMU_INDEX 11
-#define MEMSUFFIX MMU_MODE11_SUFFIX
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 12) */
-
-#if (NB_MMU_MODES > 12)
-#error "NB_MMU_MODES > 12 is not supported for now"
-#endif /* (NB_MMU_MODES > 12) */
-
-/* these access are slower, they must be as rare as possible */
-#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
-#define MEMSUFFIX _data
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-
-#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
-#define MEMSUFFIX _code
-#define SOFTMMU_CODE_ACCESS
-
-#define DATA_SIZE 1
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 2
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 4
-#include "exec/cpu_ldst_template.h"
-
-#define DATA_SIZE 8
-#include "exec/cpu_ldst_template.h"
-
-#undef CPU_MMU_INDEX
-#undef MEMSUFFIX
-#undef SOFTMMU_CODE_ACCESS
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
+
+static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
+{
+ return (int8_t)cpu_ldub_code(env, addr);
+}
-#endif /* defined(CONFIG_USER_ONLY) */
+static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
+{
+ return (int16_t)cpu_lduw_code(env, addr);
+}
/**
* tlb_vaddr_to_host:
@@ -426,50 +456,20 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
- * If the TLB contains a host virtual address suitable for direct RAM
- * access, then return it. Otherwise (TLB miss, TLB entry is for an
- * I/O access, etc) return NULL.
- *
- * This is the equivalent of the initial fast-path code used by
- * TCG backends for guest load and store accesses.
+ * If we can translate a host virtual address suitable for direct RAM
+ * access, without causing a guest exception, then return it.
+ * Otherwise (TLB entry is for an I/O access, guest software
+ * TLB fill required, etc) return NULL.
*/
+#ifdef CONFIG_USER_ONLY
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
- int access_type, int mmu_idx)
+ MMUAccessType access_type, int mmu_idx)
{
-#if defined(CONFIG_USER_ONLY)
- return g2h(addr);
-#else
- CPUTLBEntry *tlbentry = tlb_entry(env, mmu_idx, addr);
- abi_ptr tlb_addr;
- uintptr_t haddr;
-
- switch (access_type) {
- case 0:
- tlb_addr = tlbentry->addr_read;
- break;
- case 1:
- tlb_addr = tlb_addr_write(tlbentry);
- break;
- case 2:
- tlb_addr = tlbentry->addr_code;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (!tlb_hit(tlb_addr, addr)) {
- /* TLB entry is for a different page */
- return NULL;
- }
-
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- return NULL;
- }
-
- haddr = addr + tlbentry->addend;
- return (void *)haddr;
-#endif /* defined(CONFIG_USER_ONLY) */
+ return g2h(env_cpu(env), addr);
}
+#else
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx);
+#endif
#endif /* CPU_LDST_H */
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
deleted file mode 100644
index 0f061d47ef..0000000000
--- a/include/exec/cpu_ldst_template.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Software MMU support
- *
- * Generate inline load/store functions for one MMU mode and data
- * size.
- *
- * Generate a store function as well as signed and unsigned loads.
- *
- * Not used directly but included from cpu_ldst.h.
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#if !defined(SOFTMMU_CODE_ACCESS)
-#include "trace-root.h"
-#endif
-
-#include "trace/mem.h"
-
-#if DATA_SIZE == 8
-#define SUFFIX q
-#define USUFFIX q
-#define DATA_TYPE uint64_t
-#define SHIFT 3
-#elif DATA_SIZE == 4
-#define SUFFIX l
-#define USUFFIX l
-#define DATA_TYPE uint32_t
-#define SHIFT 2
-#elif DATA_SIZE == 2
-#define SUFFIX w
-#define USUFFIX uw
-#define DATA_TYPE uint16_t
-#define DATA_STYPE int16_t
-#define SHIFT 1
-#elif DATA_SIZE == 1
-#define SUFFIX b
-#define USUFFIX ub
-#define DATA_TYPE uint8_t
-#define DATA_STYPE int8_t
-#define SHIFT 0
-#else
-#error unsupported data size
-#endif
-
-#if DATA_SIZE == 8
-#define RES_TYPE uint64_t
-#else
-#define RES_TYPE uint32_t
-#endif
-
-#ifdef SOFTMMU_CODE_ACCESS
-#define ADDR_READ addr_code
-#define MMUSUFFIX _cmmu
-#define URETSUFFIX SUFFIX
-#define SRETSUFFIX SUFFIX
-#else
-#define ADDR_READ addr_read
-#define MMUSUFFIX _mmu
-#define URETSUFFIX USUFFIX
-#define SRETSUFFIX glue(s, SUFFIX)
-#endif
-
-/* generic load/store macros */
-
-static inline RES_TYPE
-glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
- target_ulong ptr,
- uintptr_t retaddr)
-{
- CPUTLBEntry *entry;
- RES_TYPE res;
- target_ulong addr;
- int mmu_idx;
- TCGMemOpIdx oi;
-
-#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- ENV_GET_CPU(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, false));
-#endif
-
- addr = ptr;
- mmu_idx = CPU_MMU_INDEX;
- entry = tlb_entry(env, mmu_idx, addr);
- if (unlikely(entry->ADDR_READ !=
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- oi = make_memop_idx(SHIFT, mmu_idx);
- res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
- oi, retaddr);
- } else {
- uintptr_t hostaddr = addr + entry->addend;
- res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
- }
- return res;
-}
-
-static inline RES_TYPE
-glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
-{
- return glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
-}
-
-#if DATA_SIZE <= 2
-static inline int
-glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
- target_ulong ptr,
- uintptr_t retaddr)
-{
- CPUTLBEntry *entry;
- int res;
- target_ulong addr;
- int mmu_idx;
- TCGMemOpIdx oi;
-
-#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- ENV_GET_CPU(env), ptr,
- trace_mem_build_info(SHIFT, true, MO_TE, false));
-#endif
-
- addr = ptr;
- mmu_idx = CPU_MMU_INDEX;
- entry = tlb_entry(env, mmu_idx, addr);
- if (unlikely(entry->ADDR_READ !=
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- oi = make_memop_idx(SHIFT, mmu_idx);
- res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
- MMUSUFFIX)(env, addr, oi, retaddr);
- } else {
- uintptr_t hostaddr = addr + entry->addend;
- res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
- }
- return res;
-}
-
-static inline int
-glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
-{
- return glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
-}
-#endif
-
-#ifndef SOFTMMU_CODE_ACCESS
-
-/* generic store macro */
-
-static inline void
-glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
- target_ulong ptr,
- RES_TYPE v, uintptr_t retaddr)
-{
- CPUTLBEntry *entry;
- target_ulong addr;
- int mmu_idx;
- TCGMemOpIdx oi;
-
-#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- ENV_GET_CPU(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, true));
-#endif
-
- addr = ptr;
- mmu_idx = CPU_MMU_INDEX;
- entry = tlb_entry(env, mmu_idx, addr);
- if (unlikely(tlb_addr_write(entry) !=
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- oi = make_memop_idx(SHIFT, mmu_idx);
- glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
- retaddr);
- } else {
- uintptr_t hostaddr = addr + entry->addend;
- glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
- }
-}
-
-static inline void
-glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
- RES_TYPE v)
-{
- glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(env, ptr, v, 0);
-}
-
-#endif /* !SOFTMMU_CODE_ACCESS */
-
-#undef RES_TYPE
-#undef DATA_TYPE
-#undef DATA_STYPE
-#undef SUFFIX
-#undef USUFFIX
-#undef DATA_SIZE
-#undef MMUSUFFIX
-#undef ADDR_READ
-#undef URETSUFFIX
-#undef SRETSUFFIX
-#undef SHIFT
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
deleted file mode 100644
index 0fd6019af0..0000000000
--- a/include/exec/cpu_ldst_useronly_template.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * User-only accessor function support
- *
- * Generate inline load/store functions for one data size.
- *
- * Generate a store function as well as signed and unsigned loads.
- *
- * Not used directly but included from cpu_ldst.h.
- *
- * Copyright (c) 2015 Linaro Limited
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#if !defined(CODE_ACCESS)
-#include "trace-root.h"
-#endif
-
-#include "trace/mem.h"
-
-#if DATA_SIZE == 8
-#define SUFFIX q
-#define USUFFIX q
-#define DATA_TYPE uint64_t
-#define SHIFT 3
-#elif DATA_SIZE == 4
-#define SUFFIX l
-#define USUFFIX l
-#define DATA_TYPE uint32_t
-#define SHIFT 2
-#elif DATA_SIZE == 2
-#define SUFFIX w
-#define USUFFIX uw
-#define DATA_TYPE uint16_t
-#define DATA_STYPE int16_t
-#define SHIFT 1
-#elif DATA_SIZE == 1
-#define SUFFIX b
-#define USUFFIX ub
-#define DATA_TYPE uint8_t
-#define DATA_STYPE int8_t
-#define SHIFT 0
-#else
-#error unsupported data size
-#endif
-
-#if DATA_SIZE == 8
-#define RES_TYPE uint64_t
-#else
-#define RES_TYPE uint32_t
-#endif
-
-static inline RES_TYPE
-glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
-{
-#if !defined(CODE_ACCESS)
- trace_guest_mem_before_exec(
- ENV_GET_CPU(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, false));
-#endif
- return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
-}
-
-static inline RES_TYPE
-glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
- abi_ptr ptr,
- uintptr_t retaddr)
-{
- RES_TYPE ret;
- helper_retaddr = retaddr;
- ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
- helper_retaddr = 0;
- return ret;
-}
-
-#if DATA_SIZE <= 2
-static inline int
-glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
-{
-#if !defined(CODE_ACCESS)
- trace_guest_mem_before_exec(
- ENV_GET_CPU(env), ptr,
- trace_mem_build_info(SHIFT, true, MO_TE, false));
-#endif
- return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
-}
-
-static inline int
-glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
- abi_ptr ptr,
- uintptr_t retaddr)
-{
- int ret;
- helper_retaddr = retaddr;
- ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
- helper_retaddr = 0;
- return ret;
-}
-#endif
-
-#ifndef CODE_ACCESS
-static inline void
-glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
- RES_TYPE v)
-{
-#if !defined(CODE_ACCESS)
- trace_guest_mem_before_exec(
- ENV_GET_CPU(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, true));
-#endif
- glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
-}
-
-static inline void
-glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
- abi_ptr ptr,
- RES_TYPE v,
- uintptr_t retaddr)
-{
- helper_retaddr = retaddr;
- glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
- helper_retaddr = 0;
-}
-#endif
-
-#undef RES_TYPE
-#undef DATA_TYPE
-#undef DATA_STYPE
-#undef SUFFIX
-#undef USUFFIX
-#undef DATA_SIZE
-#undef SHIFT
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index 5373188be3..6da1462c4f 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -16,13 +16,15 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef CPUTLB_H
#define CPUTLB_H
+#include "exec/cpu-common.h"
+
#if !defined(CONFIG_USER_ONLY)
/* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code(ram_addr_t ram_addr);
-void tlb_flush_counts(size_t *full, size_t *part, size_t *elide);
#endif
#endif
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 815e5b1e83..3e53501691 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,82 +20,29 @@
#ifndef EXEC_ALL_H
#define EXEC_ALL_H
-#include "qemu-common.h"
-#include "exec/tb-context.h"
-#include "sysemu/cpus.h"
-
-/* allow to see translation results - the slowdown should be negligible, so we leave it */
-#define DEBUG_DISAS
-
-/* Page tracking code uses ram addresses in system mode, and virtual
- addresses in userspace mode. Define tb_page_addr_t to be an appropriate
- type. */
+#include "cpu.h"
#if defined(CONFIG_USER_ONLY)
-typedef abi_ulong tb_page_addr_t;
-#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
-#else
-typedef ram_addr_t tb_page_addr_t;
-#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
+#include "exec/cpu_ldst.h"
#endif
+#include "exec/translation-block.h"
+#include "qemu/clang-tsa.h"
-#include "qemu/log.h"
-
-void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb);
-void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
- target_ulong *data);
-
-void cpu_gen_init(void);
-
-/**
- * cpu_restore_state:
- * @cpu: the vCPU state is to be restore to
- * @searched_pc: the host PC the fault occurred at
- * @will_exit: true if the TB executed will be interrupted after some
- cpu adjustments. Required for maintaining the correct
- icount valus
- * @return: true if state was restored, false otherwise
- *
- * Attempt to restore the state for a fault occurring in translated
- * code. If the searched_pc is not in translated code no state is
- * restored and the function returns false.
- */
-bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
-
-void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
-void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
-TranslationBlock *tb_gen_code(CPUState *cpu,
- target_ulong pc, target_ulong cs_base,
- uint32_t flags,
- int cflags);
-
-void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
-void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
-void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
-
-#if !defined(CONFIG_USER_ONLY)
-void cpu_reloading_memory_map(void);
/**
- * cpu_address_space_init:
- * @cpu: CPU to add this address space to
- * @asidx: integer index of this address space
- * @prefix: prefix to be used as name of address space
- * @mr: the root memory region of address space
- *
- * Add the specified address space to the CPU's cpu_ases list.
- * The address space added with @asidx 0 is the one used for the
- * convenience pointer cpu->as.
- * The target-specific code which registers ASes is responsible
- * for defining what semantics address space 0, 1, 2, etc have.
- *
- * Before the first call to this function, the caller must set
- * cpu->num_ases to the total number of address spaces it needs
- * to support.
- *
- * Note that with KVM only one address space is supported.
- */
-void cpu_address_space_init(CPUState *cpu, int asidx,
- const char *prefix, MemoryRegion *mr);
-#endif
+ * cpu_loop_exit_requested:
+ * @cpu: The CPU state to be tested
+ *
+ * Indicate if somebody asked for a return of the CPU to the main loop
+ * (e.g., via cpu_exit() or cpu_interrupt()).
+ *
+ * This is helpful for architectures that support interruptible
+ * instructions. After writing back all state to registers/memory, this
+ * call can be used to check if it makes sense to return to the main loop
+ * or to continue executing the interruptible instruction.
+ */
+static inline bool cpu_loop_exit_requested(CPUState *cpu)
+{
+ return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
+}
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
/* cputlb.c */
@@ -105,6 +52,11 @@ void cpu_address_space_init(CPUState *cpu, int asidx,
*/
void tlb_init(CPUState *cpu);
/**
+ * tlb_destroy - destroy a CPU's TLB
+ * @cpu: CPU whose TLB should be destroyed
+ */
+void tlb_destroy(CPUState *cpu);
+/**
* tlb_flush_page:
* @cpu: CPU whose TLB should be flushed
* @addr: virtual address of page to be flushed
@@ -112,7 +64,7 @@ void tlb_init(CPUState *cpu);
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
-void tlb_flush_page(CPUState *cpu, target_ulong addr);
+void tlb_flush_page(CPUState *cpu, vaddr addr);
/**
* tlb_flush_page_all_cpus:
* @cpu: src CPU of the flush
@@ -121,7 +73,7 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr);
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
-void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
+void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
/**
* tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush
@@ -133,7 +85,7 @@ void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
* the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB.
*/
-void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
@@ -168,7 +120,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
* Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus:
@@ -179,7 +131,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
* Flush one page from the TLB of all CPUs, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
@@ -193,7 +145,7 @@ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
*/
-void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_by_mmuidx:
@@ -226,10 +178,76 @@ void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
* depend on when the guests translation ends the TB.
*/
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+
+/**
+ * tlb_flush_page_bits_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
+ */
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
+ uint16_t idxmap, unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
+ uint16_t idxmap, unsigned bits);
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
+ (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
+
+/**
+ * tlb_flush_range_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of the start of the range to be flushed
+ * @len: length of range to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
+ * comparing only the low @bits worth of each virtual page.
+ */
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
+ unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
+ unsigned bits);
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ vaddr addr,
+ vaddr len,
+ uint16_t idxmap,
+ unsigned bits);
+
+/**
+ * tlb_set_page_full:
+ * @cpu: CPU context
+ * @mmu_idx: mmu index of the tlb to modify
+ * @addr: virtual address of the entry to add
+ * @full: the details of the tlb entry
+ *
+ * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
+ * @full must be filled, except for xlat_section, and constitute
+ * the complete description of the translated page.
+ *
+ * This is generally called by the target tlb_fill function after
+ * having performed a successful page table walk to find the physical
+ * address and attributes for the translation.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
+ * used by tlb_flush_page.
+ */
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
+ CPUTLBEntryFull *full);
+
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
- * @vaddr: virtual address of page to add entry for
+ * @addr: virtual address of page to add entry for
* @paddr: physical address of the page
* @attrs: memory transaction attributes
* @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
@@ -237,7 +255,7 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
* @size: size of the page in bytes
*
* Add an entry to this CPU's TLB (a mapping from virtual address
- * @vaddr to physical address @paddr) with the specified memory
+ * @addr to physical address @paddr) with the specified memory
* transaction attributes. This is generally called by the target CPU
* specific code after it has been called through the tlb_fill()
* entry point and performed a successful page table walk to find
@@ -248,32 +266,32 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
* single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
* used by tlb_flush_page.
*/
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs,
- int prot, int mmu_idx, target_ulong size);
+ int prot, int mmu_idx, vaddr size);
/* tlb_set_page:
*
* This function is equivalent to calling tlb_set_page_with_attrs()
* with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
* as a convenience for CPUs which don't use memory transaction attributes.
*/
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot,
- int mmu_idx, target_ulong size);
-void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
- uintptr_t retaddr);
+ int mmu_idx, vaddr size);
#else
static inline void tlb_init(CPUState *cpu)
{
}
-static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
+static inline void tlb_destroy(CPUState *cpu)
+{
+}
+static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
- target_ulong addr)
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{
}
static inline void tlb_flush(CPUState *cpu)
@@ -286,7 +304,7 @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
{
}
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
- target_ulong addr, uint16_t idxmap)
+ vaddr addr, uint16_t idxmap)
{
}
@@ -294,12 +312,12 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
}
@@ -311,131 +329,196 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap)
{
}
+static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
+ vaddr addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
+ vaddr addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void
+tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
+ uint16_t idxmap, unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
+ vaddr addr,
+ vaddr len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ vaddr addr,
+ vaddr len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
#endif
+/**
+ * probe_access:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @size: size of the access
+ * @access_type: read, write or execute permission
+ * @mmu_idx: MMU index to use for lookup
+ * @retaddr: return address for unwinding
+ *
+ * Look up the guest virtual address @addr. Raise an exception if the
+ * page does not satisfy @access_type. Raise an exception if the
+ * access (@addr, @size) hits a watchpoint. For writes, mark a clean
+ * page as dirty.
+ *
+ * Finally, return the host address for a page that is backed by RAM,
+ * or NULL if the page requires I/O.
+ */
+void *probe_access(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
+
+static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
+}
+
+static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+}
+
+/**
+ * probe_access_flags:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @size: size of the access
+ * @access_type: read, write or execute permission
+ * @mmu_idx: MMU index to use for lookup
+ * @nonfault: suppress the fault
+ * @phost: return value for host address
+ * @retaddr: return address for unwinding
+ *
+ * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
+ * the page, and storing the host address for RAM in @phost.
+ *
+ * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
+ * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
+ * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
+ * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
+ */
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, uintptr_t retaddr);
+
+#ifndef CONFIG_USER_ONLY
+/**
+ * probe_access_full:
+ * Like probe_access_flags, except also return into @pfull.
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ */
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost,
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
-#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
+/**
+ * probe_access_mmu() - Like probe_access_full except cannot fault and
+ * doesn't trigger instrumentation.
+ *
+ * @env: CPUArchState
+ * @vaddr: virtual address to probe
+ * @size: size of the probe
+ * @access_type: read, write or execute permission
+ * @mmu_idx: softmmu index
+ * @phost: ptr to return value host address or NULL
+ * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ *
+ * Returns: TLB flags as per probe_access_flags()
+ */
+int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ void **phost, CPUTLBEntryFull **pfull);
+
+#endif
-/* Estimated block size for TB allocation. */
-/* ??? The following is based on a 2015 survey of x86_64 host output.
- Better would seem to be some sort of dynamically sized TB array,
- adapting to the block sizes actually being produced. */
-#if defined(CONFIG_SOFTMMU)
-#define CODE_GEN_AVG_BLOCK_SIZE 400
+static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
+{
+#ifdef CONFIG_USER_ONLY
+ return tb->itree.start;
#else
-#define CODE_GEN_AVG_BLOCK_SIZE 150
+ return tb->page_addr[0];
#endif
+}
-/*
- * Translation Cache-related fields of a TB.
- * This struct exists just for convenience; we keep track of TB's in a binary
- * search tree, and the only fields needed to compare TB's in the tree are
- * @ptr and @size.
- * Note: the address of search data can be obtained by adding @size to @ptr.
- */
-struct tb_tc {
- void *ptr; /* pointer to the translated code */
- size_t size;
-};
-
-struct TranslationBlock {
- target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
- target_ulong cs_base; /* CS base for this block */
- uint32_t flags; /* flags defining in which context the code was generated */
- uint16_t size; /* size of target code for this block (1 <=
- size <= TARGET_PAGE_SIZE) */
- uint16_t icount;
- uint32_t cflags; /* compile flags */
-#define CF_COUNT_MASK 0x00007fff
-#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
-#define CF_NOCACHE 0x00010000 /* To be freed after execution */
-#define CF_USE_ICOUNT 0x00020000
-#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
-#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
-/* cflags' mask for hashing/comparison */
-#define CF_HASH_MASK \
- (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL)
-
- /* Per-vCPU dynamic tracing state used to generate this TB */
- uint32_t trace_vcpu_dstate;
-
- struct tb_tc tc;
-
- /* original tb when cflags has CF_NOCACHE */
- struct TranslationBlock *orig_tb;
- /* first and second physical page containing code. The lower bit
- of the pointer tells the index in page_next[].
- The list is protected by the TB's page('s) lock(s) */
- uintptr_t page_next[2];
- tb_page_addr_t page_addr[2];
-
- /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
- QemuSpin jmp_lock;
-
- /* The following data are used to directly call another TB from
- * the code of this one. This can be done either by emitting direct or
- * indirect native jump instructions. These jumps are reset so that the TB
- * just continues its execution. The TB can be linked to another one by
- * setting one of the jump targets (or patching the jump instruction). Only
- * two of such jumps are supported.
- */
- uint16_t jmp_reset_offset[2]; /* offset of original jump target */
-#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
- uintptr_t jmp_target_arg[2]; /* target address or offset */
+static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
+{
+#ifdef CONFIG_USER_ONLY
+ tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
+ return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
+#else
+ return tb->page_addr[1];
+#endif
+}
+static inline void tb_set_page_addr0(TranslationBlock *tb,
+ tb_page_addr_t addr)
+{
+#ifdef CONFIG_USER_ONLY
+ tb->itree.start = addr;
/*
- * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
- * Each TB can have two outgoing jumps, and therefore can participate
- * in two lists. The list entries are kept in jmp_list_next[2]. The least
- * significant bit (LSB) of the pointers in these lists is used to encode
- * which of the two list entries is to be used in the pointed TB.
- *
- * List traversals are protected by jmp_lock. The destination TB of each
- * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
- * can be acquired from any origin TB.
- *
- * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
- * being invalidated, so that no further outgoing jumps from it can be set.
- *
- * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
- * to a destination TB that has CF_INVALID set.
+ * To begin, we record an interval of one byte. When the translation
+ * loop encounters a second page, the interval will be extended to
+ * include the first byte of the second page, which is sufficient to
+ * allow tb_page_addr1() above to work properly. The final corrected
+ * interval will be set by tb_page_add() from tb->size before the
+ * node is added to the interval tree.
*/
- uintptr_t jmp_list_head;
- uintptr_t jmp_list_next[2];
- uintptr_t jmp_dest[2];
-};
-
-extern bool parallel_cpus;
+ tb->itree.last = addr;
+#else
+ tb->page_addr[0] = addr;
+#endif
+}
-/* Hide the atomic_read to make code a little easier on the eyes */
-static inline uint32_t tb_cflags(const TranslationBlock *tb)
+static inline void tb_set_page_addr1(TranslationBlock *tb,
+ tb_page_addr_t addr)
{
- return atomic_read(&tb->cflags);
+#ifdef CONFIG_USER_ONLY
+ /* Extend the interval to the first byte of the second page. See above. */
+ tb->itree.last = addr;
+#else
+ tb->page_addr[1] = addr;
+#endif
}
/* current cflags for hashing/comparison */
-static inline uint32_t curr_cflags(void)
-{
- return (parallel_cpus ? CF_PARALLEL : 0)
- | (use_icount ? CF_USE_ICOUNT : 0);
-}
+uint32_t curr_cflags(CPUState *cpu);
/* TranslationBlock invalidate API */
-#if defined(CONFIG_USER_ONLY)
-void tb_invalidate_phys_addr(target_ulong addr);
-void tb_invalidate_phys_range(target_ulong start, target_ulong end);
-#else
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
-#endif
-void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
- target_ulong cs_base, uint32_t flags,
- uint32_t cf_mask);
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */
#if defined(CONFIG_TCG_INTERPRETER)
-extern uintptr_t tci_tb_ptr;
+extern __thread uintptr_t tci_tb_ptr;
# define GETPC() tci_tb_ptr
#else
# define GETPC() \
@@ -451,14 +534,6 @@ extern uintptr_t tci_tb_ptr;
smaller than 4 bytes, so we don't worry about special-casing this. */
#define GETPC_ADJ 2
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
-void assert_no_pages_locked(void);
-#else
-static inline void assert_no_pages_locked(void)
-{
-}
-#endif
-
#if !defined(CONFIG_USER_ONLY)
/**
@@ -472,47 +547,122 @@ static inline void assert_no_pages_locked(void)
*/
struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
+#endif
-void tlb_fill(CPUState *cpu, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
+/**
+ * get_page_addr_code_hostp()
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * See get_page_addr_code() (full-system version) for documentation on the
+ * return value.
+ *
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
+ * to the host address where @addr's content is kept.
+ *
+ * Note: this function can trigger an exception.
+ */
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
+ void **hostp);
-#endif
+/**
+ * get_page_addr_code()
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * If we cannot translate and execute from the entire RAM page, or if
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
+ * ram_addr_t corresponding to the guest code at @addr.
+ *
+ * Note: this function can trigger an exception.
+ */
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
+ vaddr addr)
+{
+ return get_page_addr_code_hostp(env, addr, NULL);
+}
#if defined(CONFIG_USER_ONLY)
-void mmap_lock(void);
-void mmap_unlock(void);
+void TSA_NO_TSA mmap_lock(void);
+void TSA_NO_TSA mmap_unlock(void);
bool have_mmap_lock(void);
-static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
+static inline void mmap_unlock_guard(void *unused)
{
- return addr;
+ mmap_unlock();
}
+
+#define WITH_MMAP_LOCK_GUARD() \
+ for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
+ = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
+
+/**
+ * adjust_signal_pc:
+ * @pc: raw pc from the host signal ucontext_t.
+ * @is_write: host memory operation was write, or read-modify-write.
+ *
+ * Alter @pc as required for unwinding. Return the type of the
+ * guest memory access -- host reads may be for guest execution.
+ */
+MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
+
+/**
+ * handle_sigsegv_accerr_write:
+ * @cpu: the cpu context
+ * @old_set: the sigset_t from the signal ucontext_t
+ * @host_pc: the host pc, adjusted for the signal
+ * @host_addr: the host address of the fault
+ *
+ * Return true if the write fault has been handled, and should be re-tried.
+ */
+bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
+ uintptr_t host_pc, abi_ptr guest_addr);
+
+/**
+ * cpu_loop_exit_sigsegv:
+ * @cpu: the cpu context
+ * @addr: the guest address of the fault
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGSEGV, and jump to the main cpu loop.
+ */
+G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+
+/**
+ * cpu_loop_exit_sigbus:
+ * @cpu: the cpu context
+ * @addr: the guest address of the alignment fault
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGBUS, and jump to the main cpu loop.
+ */
+G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type,
+ uintptr_t ra);
+
#else
static inline void mmap_lock(void) {}
static inline void mmap_unlock(void) {}
-
-/* cputlb.c */
-tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
+#define WITH_MMAP_LOCK_GUARD()
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
-
-/* exec.c */
-void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
+void tlb_set_dirty(CPUState *cpu, vaddr addr);
+void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
MemTxAttrs attrs, int *prot);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
- MemoryRegionSection *section,
- target_ulong vaddr,
- hwaddr paddr, hwaddr xlat,
- int prot,
- target_ulong *address);
+ MemoryRegionSection *section);
#endif
-/* vl.c */
-extern int singlestep;
-
#endif
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
index 08363969c1..eb14b91139 100644
--- a/include/exec/gdbstub.h
+++ b/include/exec/gdbstub.h
@@ -10,125 +10,138 @@
#define GDB_WATCHPOINT_READ 3
#define GDB_WATCHPOINT_ACCESS 4
-#ifdef NEED_CPU_H
-#include "cpu.h"
+typedef struct GDBFeature {
+ const char *xmlname;
+ const char *xml;
+ const char *name;
+ const char * const *regs;
+ int num_regs;
+} GDBFeature;
+
+typedef struct GDBFeatureBuilder {
+ GDBFeature *feature;
+ GPtrArray *xml;
+ GPtrArray *regs;
+ int base_reg;
+} GDBFeatureBuilder;
-typedef void (*gdb_syscall_complete_cb)(CPUState *cpu,
- target_ulong ret, target_ulong err);
+
+/* Get or set a register. Returns the size of the register. */
+typedef int (*gdb_get_reg_cb)(CPUState *cpu, GByteArray *buf, int reg);
+typedef int (*gdb_set_reg_cb)(CPUState *cpu, uint8_t *buf, int reg);
/**
- * gdb_do_syscall:
- * @cb: function to call when the system call has completed
- * @fmt: gdb syscall format string
- * ...: list of arguments to interpolate into @fmt
- *
- * Send a GDB syscall request. This function will return immediately;
- * the callback function will be called later when the remote system
- * call has completed.
- *
- * @fmt should be in the 'call-id,parameter,parameter...' format documented
- * for the F request packet in the GDB remote protocol. A limited set of
- * printf-style format specifiers is supported:
- * %x - target_ulong argument printed in hex
- * %lx - 64-bit argument printed in hex
- * %s - string pointer (target_ulong) and length (int) pair
+ * gdb_init_cpu(): Initialize the CPU for gdbstub.
+ * @cpu: The CPU to be initialized.
*/
-void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+void gdb_init_cpu(CPUState *cpu);
+
/**
- * gdb_do_syscallv:
- * @cb: function to call when the system call has completed
- * @fmt: gdb syscall format string
- * @va: arguments to interpolate into @fmt
- *
- * As gdb_do_syscall, but taking a va_list rather than a variable
- * argument list.
+ * gdb_register_coprocessor() - register a supplemental set of registers
+ * @cpu - the CPU associated with registers
+ * @get_reg - get function (gdb reading)
+ * @set_reg - set function (gdb modifying)
+ * @num_regs - number of registers in set
+ * @xml - xml name of set
+ * @gpos - non-zero to append to "general" register set at @gpos
*/
-void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va);
-int use_gdb_syscalls(void);
-void gdb_set_stop_cpu(CPUState *cpu);
-void gdb_exit(CPUArchState *, int);
-#ifdef CONFIG_USER_ONLY
+void gdb_register_coprocessor(CPUState *cpu,
+ gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
+ const GDBFeature *feature, int g_pos);
+
/**
- * gdb_handlesig: yield control to gdb
- * @cpu: CPU
- * @sig: if non-zero, the signal number which caused us to stop
+ * gdbserver_start: start the gdb server
+ * @port_or_device: connection spec for gdb
*
- * This function yields control to gdb, when a user-mode-only target
- * needs to stop execution. If @sig is non-zero, then we will send a
- * stop packet to tell gdb that we have stopped because of this signal.
- *
- * This function will block (handling protocol requests from gdb)
- * until gdb tells us to continue target execution. When it does
- * return, the return value is a signal to deliver to the target,
- * or 0 if no signal should be delivered, ie the signal that caused
- * us to stop should be ignored.
+ * For CONFIG_USER this is either a tcp port or a path to a fifo. For
+ * system emulation you can use a full chardev spec for your gdbserver
+ * port.
*/
-int gdb_handlesig(CPUState *, int);
-void gdb_signalled(CPUArchState *, int);
-void gdbserver_fork(CPUState *);
-#endif
-/* Get or set a register. Returns the size of the register. */
-typedef int (*gdb_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
-void gdb_register_coprocessor(CPUState *cpu,
- gdb_reg_cb get_reg, gdb_reg_cb set_reg,
- int num_regs, const char *xml, int g_pos);
+int gdbserver_start(const char *port_or_device);
-/* The GDB remote protocol transfers values in target byte order. This means
- * we can use the raw memory access routines to access the value buffer.
- * Conveniently, these also handle the case where the buffer is mis-aligned.
+/**
+ * gdb_feature_builder_init() - Initialize GDBFeatureBuilder.
+ * @builder: The builder to be initialized.
+ * @feature: The feature to be filled.
+ * @name: The name of the feature.
+ * @xmlname: The name of the XML.
+ * @base_reg: The base number of the register ID.
*/
+void gdb_feature_builder_init(GDBFeatureBuilder *builder, GDBFeature *feature,
+ const char *name, const char *xmlname,
+ int base_reg);
-static inline int gdb_get_reg8(uint8_t *mem_buf, uint8_t val)
-{
- stb_p(mem_buf, val);
- return 1;
-}
-
-static inline int gdb_get_reg16(uint8_t *mem_buf, uint16_t val)
-{
- stw_p(mem_buf, val);
- return 2;
-}
-
-static inline int gdb_get_reg32(uint8_t *mem_buf, uint32_t val)
-{
- stl_p(mem_buf, val);
- return 4;
-}
-
-static inline int gdb_get_reg64(uint8_t *mem_buf, uint64_t val)
-{
- stq_p(mem_buf, val);
- return 8;
-}
-
-#if TARGET_LONG_BITS == 64
-#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
-#define ldtul_p(addr) ldq_p(addr)
-#else
-#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
-#define ldtul_p(addr) ldl_p(addr)
-#endif
+/**
+ * gdb_feature_builder_append_tag() - Append a tag.
+ * @builder: The builder.
+ * @format: The format of the tag.
+ * @...: The values to be formatted.
+ */
+void G_GNUC_PRINTF(2, 3)
+gdb_feature_builder_append_tag(const GDBFeatureBuilder *builder,
+ const char *format, ...);
-#endif
+/**
+ * gdb_feature_builder_append_reg() - Append a register.
+ * @builder: The builder.
+ * @name: The register's name; it must be unique within a CPU.
+ * @bitsize: The register's size, in bits.
+ * @regnum: The offset of the register's number in the feature.
+ * @type: The type of the register.
+ * @group: The register group to which this register belongs; it can be NULL.
+ */
+void gdb_feature_builder_append_reg(const GDBFeatureBuilder *builder,
+ const char *name,
+ int bitsize,
+ int regnum,
+ const char *type,
+ const char *group);
-#ifdef CONFIG_USER_ONLY
-int gdbserver_start(int);
-#else
-int gdbserver_start(const char *port);
-#endif
+/**
+ * gdb_feature_builder_end() - End building GDBFeature.
+ * @builder: The builder.
+ */
+void gdb_feature_builder_end(const GDBFeatureBuilder *builder);
+
+/**
+ * gdb_find_static_feature() - Find a static feature.
+ * @xmlname: The name of the XML.
+ *
+ * Return: The static feature.
+ */
+const GDBFeature *gdb_find_static_feature(const char *xmlname);
-void gdbserver_cleanup(void);
+/**
+ * gdb_read_register() - Read a register associated with a CPU.
+ * @cpu: The CPU associated with the register.
+ * @buf: The buffer that the read register will be appended to.
+ * @reg: The register's number returned by gdb_find_feature_register().
+ *
+ * Return: The number of read bytes.
+ */
+int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
/**
- * gdb_has_xml:
- * This is an ugly hack to cope with both new and old gdb.
- * If gdb sends qXfer:features:read then assume we're talking to a newish
- * gdb that understands target descriptions.
+ * typedef GDBRegDesc - a register description from gdbstub
*/
-extern bool gdb_has_xml;
+typedef struct {
+ int gdb_reg;
+ const char *name;
+ const char *feature_name;
+} GDBRegDesc;
+
+/**
+ * gdb_get_register_list() - Return list of all registers for CPU
+ * @cpu: The CPU being searched
+ *
+ * Returns a GArray of GDBRegDesc, caller frees array but not the
+ * const strings.
+ */
+GArray *gdb_get_register_list(CPUState *cpu);
+
+void gdb_set_stop_cpu(CPUState *cpu);
-/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
-extern const char *const xml_builtin[][2];
+/* in gdbstub-xml.c, generated by scripts/feature_to_c.py */
+extern const GDBFeature gdb_static_features[];
#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
deleted file mode 100644
index 24f7991781..0000000000
--- a/include/exec/gen-icount.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef GEN_ICOUNT_H
-#define GEN_ICOUNT_H
-
-#include "qemu/timer.h"
-
-/* Helpers for instruction counting code generation. */
-
-static TCGOp *icount_start_insn;
-
-static inline void gen_tb_start(TranslationBlock *tb)
-{
- TCGv_i32 count, imm;
-
- tcg_ctx->exitreq_label = gen_new_label();
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- count = tcg_temp_local_new_i32();
- } else {
- count = tcg_temp_new_i32();
- }
-
- tcg_gen_ld_i32(count, cpu_env,
- -ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
-
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- imm = tcg_temp_new_i32();
- /* We emit a movi with a dummy immediate argument. Keep the insn index
- * of the movi so that we later (when we know the actual insn count)
- * can update the immediate argument with the actual insn count. */
- tcg_gen_movi_i32(imm, 0xdeadbeef);
- icount_start_insn = tcg_last_op();
-
- tcg_gen_sub_i32(count, count, imm);
- tcg_temp_free_i32(imm);
- }
-
- tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
-
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- tcg_gen_st16_i32(count, cpu_env,
- -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
- }
-
- tcg_temp_free_i32(count);
-}
-
-static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
-{
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- /* Update the num_insn immediate parameter now that we know
- * the actual insn count. */
- tcg_set_insn_param(icount_start_insn, 1, num_insns);
- }
-
- gen_set_label(tcg_ctx->exitreq_label);
- tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
-}
-
-static inline void gen_io_start(void)
-{
- TCGv_i32 tmp = tcg_const_i32(1);
- tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
- tcg_temp_free_i32(tmp);
-}
-
-static inline void gen_io_end(void)
-{
- TCGv_i32 tmp = tcg_const_i32(0);
- tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
- tcg_temp_free_i32(tmp);
-}
-
-#endif
diff --git a/include/exec/helper-gen-common.h b/include/exec/helper-gen-common.h
new file mode 100644
index 0000000000..5d6d78a625
--- /dev/null
+++ b/include/exec/helper-gen-common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ */
+
+#ifndef HELPER_GEN_COMMON_H
+#define HELPER_GEN_COMMON_H
+
+#define HELPER_H "accel/tcg/tcg-runtime.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
+
+#define HELPER_H "accel/tcg/plugin-helpers.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
+
+#endif /* HELPER_GEN_COMMON_H */
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
index 22381a1708..f7ec155699 100644
--- a/include/exec/helper-gen.h
+++ b/include/exec/helper-gen.h
@@ -1,83 +1,16 @@
-/* Helper file for declaring TCG helper functions.
- This one expands generation functions for tcg opcodes. */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ */
#ifndef HELPER_GEN_H
#define HELPER_GEN_H
-#include "exec/helper-head.h"
+#include "exec/helper-gen-common.h"
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
-{ \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 0, NULL); \
-}
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1)) \
-{ \
- TCGTemp *args[1] = { dh_arg(t1, 1) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 1, args); \
-}
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
-{ \
- TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 2, args); \
-}
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
-{ \
- TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 3, args); \
-}
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
- dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
-{ \
- TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
- dh_arg(t3, 3), dh_arg(t4, 4) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 4, args); \
-}
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
-{ \
- TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
- dh_arg(t4, 4), dh_arg(t5, 5) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 5, args); \
-}
-
-#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
-{ \
- TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 6, args); \
-}
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "trace/generated-helpers-wrappers.h"
-#include "tcg-runtime.h"
-
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
-#undef GEN_HELPER
+#define HELPER_H "helper.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
#endif /* HELPER_GEN_H */
diff --git a/include/exec/helper-gen.h.inc b/include/exec/helper-gen.h.inc
new file mode 100644
index 0000000000..c009641517
--- /dev/null
+++ b/include/exec/helper-gen.h.inc
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ * Define HELPER_H for the header file to be expanded,
+ * and static inline to change from global file scope.
+ */
+
+#include "tcg/tcg.h"
+#include "tcg/helper-info.h"
+#include "exec/helper-head.h"
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
+{ \
+ tcg_gen_call0(&glue(helper_info_, name), dh_retvar(ret)); \
+}
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1)) \
+{ \
+ tcg_gen_call1(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1)); \
+}
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
+{ \
+ tcg_gen_call2(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2)); \
+}
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
+{ \
+ tcg_gen_call3(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3)); \
+}
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
+ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
+{ \
+ tcg_gen_call4(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), \
+ dh_arg(t3, 3), dh_arg(t4, 4)); \
+}
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
+{ \
+ tcg_gen_call5(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5)); \
+}
+
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
+{ \
+ tcg_gen_call6(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6)); \
+}
+
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
+ dh_arg_decl(t7, 7)) \
+{ \
+ tcg_gen_call7(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
+ dh_arg(t7, 7)); \
+}
+
+#include HELPER_H
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
index ab4f8b6623..28ceab0a46 100644
--- a/include/exec/helper-head.h
+++ b/include/exec/helper-head.h
@@ -1,23 +1,13 @@
-/* Helper file for declaring TCG helper functions.
- Used by other helper files.
-
- Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
- functions. Names should be specified without the helper_ prefix, and
- the return and argument types specified. 3 basic types are understood
- (i32, i64 and ptr). Additional aliases are provided for convenience and
- to match the types used by the C helper implementation.
-
- The target helper.h should be included in all files that use/define
- helper functions. THis will ensure that function prototypes are
- consistent. In addition it should be included an extra two times for
- helper.c, defining:
- GEN_HELPER 1 to produce op generation functions (gen_helper_*)
- GEN_HELPER 2 to do runtime registration helper functions.
+/*
+ * Helper file for declaring TCG helper functions.
+ * Used by other helper files.
*/
#ifndef EXEC_HELPER_HEAD_H
#define EXEC_HELPER_HEAD_H
+#include "fpu/softfloat-types.h"
+
#define HELPER(name) glue(helper_, name)
/* Some types that make sense in C, but not for TCG. */
@@ -26,10 +16,13 @@
#define dh_alias_int i32
#define dh_alias_i64 i64
#define dh_alias_s64 i64
+#define dh_alias_i128 i128
#define dh_alias_f16 i32
#define dh_alias_f32 i32
#define dh_alias_f64 i64
#define dh_alias_ptr ptr
+#define dh_alias_cptr ptr
+#define dh_alias_env ptr
#define dh_alias_void void
#define dh_alias_noreturn noreturn
#define dh_alias(t) glue(dh_alias_, t)
@@ -39,25 +32,28 @@
#define dh_ctype_int int
#define dh_ctype_i64 uint64_t
#define dh_ctype_s64 int64_t
+#define dh_ctype_i128 Int128
#define dh_ctype_f16 uint32_t
#define dh_ctype_f32 float32
#define dh_ctype_f64 float64
#define dh_ctype_ptr void *
+#define dh_ctype_cptr const void *
+#define dh_ctype_env CPUArchState *
#define dh_ctype_void void
-#define dh_ctype_noreturn void QEMU_NORETURN
+#define dh_ctype_noreturn G_NORETURN void
#define dh_ctype(t) dh_ctype_##t
#ifdef NEED_CPU_H
# ifdef TARGET_LONG_BITS
# if TARGET_LONG_BITS == 32
# define dh_alias_tl i32
+# define dh_typecode_tl dh_typecode_i32
# else
# define dh_alias_tl i64
+# define dh_typecode_tl dh_typecode_i64
# endif
# endif
-# define dh_alias_env ptr
# define dh_ctype_tl target_ulong
-# define dh_ctype_env CPUArchState *
#endif
/* We can't use glue() here because it falls foul of C preprocessor
@@ -66,6 +62,7 @@
#define dh_retvar_decl0_noreturn void
#define dh_retvar_decl0_i32 TCGv_i32 retval
#define dh_retvar_decl0_i64 TCGv_i64 retval
+#define dh_retval_decl0_i128 TCGv_i128 retval
#define dh_retvar_decl0_ptr TCGv_ptr retval
#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
@@ -73,6 +70,7 @@
#define dh_retvar_decl_noreturn
#define dh_retvar_decl_i32 TCGv_i32 retval,
#define dh_retvar_decl_i64 TCGv_i64 retval,
+#define dh_retvar_decl_i128 TCGv_i128 retval,
#define dh_retvar_decl_ptr TCGv_ptr retval,
#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
@@ -80,49 +78,35 @@
#define dh_retvar_noreturn NULL
#define dh_retvar_i32 tcgv_i32_temp(retval)
#define dh_retvar_i64 tcgv_i64_temp(retval)
+#define dh_retvar_i128 tcgv_i128_temp(retval)
#define dh_retvar_ptr tcgv_ptr_temp(retval)
#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
-#define dh_is_64bit_void 0
-#define dh_is_64bit_noreturn 0
-#define dh_is_64bit_i32 0
-#define dh_is_64bit_i64 1
-#define dh_is_64bit_ptr (sizeof(void *) == 8)
-#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
-
-#define dh_is_signed_void 0
-#define dh_is_signed_noreturn 0
-#define dh_is_signed_i32 0
-#define dh_is_signed_s32 1
-#define dh_is_signed_i64 0
-#define dh_is_signed_s64 1
-#define dh_is_signed_f16 0
-#define dh_is_signed_f32 0
-#define dh_is_signed_f64 0
-#define dh_is_signed_tl 0
-#define dh_is_signed_int 1
-/* ??? This is highly specific to the host cpu. There are even special
- extension instructions that may be required, e.g. ia64's addp4. But
- for now we don't support any 64-bit targets with 32-bit pointers. */
-#define dh_is_signed_ptr 0
-#define dh_is_signed_env dh_is_signed_ptr
-#define dh_is_signed(t) dh_is_signed_##t
+#define dh_typecode_void 0
+#define dh_typecode_noreturn 0
+#define dh_typecode_i32 2
+#define dh_typecode_s32 3
+#define dh_typecode_i64 4
+#define dh_typecode_s64 5
+#define dh_typecode_ptr 6
+#define dh_typecode_i128 7
+#define dh_typecode_int dh_typecode_s32
+#define dh_typecode_f16 dh_typecode_i32
+#define dh_typecode_f32 dh_typecode_i32
+#define dh_typecode_f64 dh_typecode_i64
+#define dh_typecode_cptr dh_typecode_ptr
+#define dh_typecode_env dh_typecode_ptr
+#define dh_typecode(t) dh_typecode_##t
#define dh_callflag_i32 0
-#define dh_callflag_s32 0
-#define dh_callflag_int 0
#define dh_callflag_i64 0
-#define dh_callflag_s64 0
-#define dh_callflag_f16 0
-#define dh_callflag_f32 0
-#define dh_callflag_f64 0
+#define dh_callflag_i128 0
#define dh_callflag_ptr 0
#define dh_callflag_void 0
#define dh_callflag_noreturn TCG_CALL_NO_RETURN
#define dh_callflag(t) glue(dh_callflag_, dh_alias(t))
-#define dh_sizemask(t, n) \
- ((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1)))
+#define dh_typemask(t, n) (dh_typecode(t) << (n * 3))
#define dh_arg(t, n) \
glue(glue(tcgv_, dh_alias(t)), _temp)(glue(arg, n))
@@ -143,7 +127,9 @@
DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
#define DEF_HELPER_6(name, ret, t1, t2, t3, t4, t5, t6) \
DEF_HELPER_FLAGS_6(name, 0, ret, t1, t2, t3, t4, t5, t6)
+#define DEF_HELPER_7(name, ret, t1, t2, t3, t4, t5, t6, t7) \
+ DEF_HELPER_FLAGS_7(name, 0, ret, t1, t2, t3, t4, t5, t6, t7)
-/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+/* MAX_CALL_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
#endif /* EXEC_HELPER_HEAD_H */
diff --git a/include/exec/helper-info.c.inc b/include/exec/helper-info.c.inc
new file mode 100644
index 0000000000..530d2e6d35
--- /dev/null
+++ b/include/exec/helper-info.c.inc
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands info structures for tcg helpers.
+ * Define HELPER_H for the header file to be expanded.
+ */
+
+#include "tcg/tcg.h"
+#include "tcg/helper-info.h"
+#include "exec/helper-head.h"
+
+/*
+ * Need one more level of indirection before stringification
+ * to get all the macros expanded first.
+ */
+#define str(s) #s
+
+#define DEF_HELPER_FLAGS_0(NAME, FLAGS, RET) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) \
+ };
+
+#define DEF_HELPER_FLAGS_1(NAME, FLAGS, RET, T1) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ };
+
+#define DEF_HELPER_FLAGS_2(NAME, FLAGS, RET, T1, T2) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) \
+ };
+
+#define DEF_HELPER_FLAGS_3(NAME, FLAGS, RET, T1, T2, T3) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ };
+
+#define DEF_HELPER_FLAGS_4(NAME, FLAGS, RET, T1, T2, T3, T4) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) \
+ };
+
+#define DEF_HELPER_FLAGS_5(NAME, FLAGS, RET, T1, T2, T3, T4, T5) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ };
+
+#define DEF_HELPER_FLAGS_6(NAME, FLAGS, RET, T1, T2, T3, T4, T5, T6) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ | dh_typemask(T6, 6) \
+ };
+
+#define DEF_HELPER_FLAGS_7(NAME, FLAGS, RET, T1, T2, T3, T4, T5, T6, T7) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ | dh_typemask(T6, 6) | dh_typemask(T7, 7) \
+ };
+
+#include HELPER_H
+
+#undef str
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
diff --git a/include/exec/helper-proto-common.h b/include/exec/helper-proto-common.h
new file mode 100644
index 0000000000..8b67170a22
--- /dev/null
+++ b/include/exec/helper-proto-common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ */
+
+#ifndef HELPER_PROTO_COMMON_H
+#define HELPER_PROTO_COMMON_H
+
+#include "qemu/atomic128.h" /* for HAVE_CMPXCHG128 */
+
+#define HELPER_H "accel/tcg/tcg-runtime.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
+
+#define HELPER_H "accel/tcg/plugin-helpers.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
+
+#endif /* HELPER_PROTO_COMMON_H */
diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h
index 74943edb13..6935cb4f16 100644
--- a/include/exec/helper-proto.h
+++ b/include/exec/helper-proto.h
@@ -1,45 +1,16 @@
-/* Helper file for declaring TCG helper functions.
- This one expands prototypes for the helper functions. */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ */
#ifndef HELPER_PROTO_H
#define HELPER_PROTO_H
-#include "exec/helper-head.h"
+#include "exec/helper-proto-common.h"
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-dh_ctype(ret) HELPER(name) (void);
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1));
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4));
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5));
-
-#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5), dh_ctype(t6));
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "tcg-runtime.h"
-
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
+#define HELPER_H "helper.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
#endif /* HELPER_PROTO_H */
diff --git a/include/exec/helper-proto.h.inc b/include/exec/helper-proto.h.inc
new file mode 100644
index 0000000000..c3aa666929
--- /dev/null
+++ b/include/exec/helper-proto.h.inc
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ * Define HELPER_H for the header file to be expanded.
+ */
+
+#include "exec/helper-head.h"
+
+/*
+ * Work around an issue with --enable-lto, in which GCC's ipa-split pass
+ * decides to split out the noreturn code paths that raise an exception,
+ * taking the __builtin_return_address() along into the new function,
+ * where it no longer computes a value that returns to TCG generated code.
+ * Despite the name, the noinline attribute affects splitter, so this
+ * prevents the optimization in question. Given that helpers should not
+ * otherwise be called directly, this should not have any other visible effect.
+ *
+ * See https://gitlab.com/qemu-project/qemu/-/issues/1454
+ */
+#define DEF_HELPER_ATTR __attribute__((noinline))
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+dh_ctype(ret) HELPER(name) (void) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), \
+ dh_ctype(t3)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), \
+ dh_ctype(t6)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
+ dh_ctype(t7)) DEF_HELPER_ATTR;
+
+#define IN_HELPER_PROTO
+
+#include HELPER_H
+
+#undef IN_HELPER_PROTO
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
+#undef DEF_HELPER_ATTR
diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h
deleted file mode 100644
index 268e0f804b..0000000000
--- a/include/exec/helper-tcg.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* Helper file for declaring TCG helper functions.
- This one defines data structures private to tcg.c. */
-
-#ifndef HELPER_TCG_H
-#define HELPER_TCG_H
-
-#include "exec/helper-head.h"
-
-/* Need one more level of indirection before stringification
- to get all the macros expanded first. */
-#define str(s) #s
-
-#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) },
-
-#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) },
-
-#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) },
-
-#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) },
-
-#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) },
-
-#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
- | dh_sizemask(t5, 5) },
-
-#define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
- | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) },
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "tcg-runtime.h"
-
-#undef str
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
-
-#endif /* HELPER_TCG_H */
diff --git a/include/exec/hwaddr.h b/include/exec/hwaddr.h
index a71c93cc81..50fbb2d96c 100644
--- a/include/exec/hwaddr.h
+++ b/include/exec/hwaddr.h
@@ -10,7 +10,7 @@
typedef uint64_t hwaddr;
#define HWADDR_MAX UINT64_MAX
-#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_FMT_plx "%016" PRIx64
#define HWADDR_PRId PRId64
#define HWADDR_PRIi PRIi64
#define HWADDR_PRIo PRIo64
@@ -18,4 +18,9 @@ typedef uint64_t hwaddr;
#define HWADDR_PRIx PRIx64
#define HWADDR_PRIX PRIX64
+typedef struct MemMapEntry {
+ hwaddr base;
+ hwaddr size;
+} MemMapEntry;
+
#endif
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
index a298b89ce1..4397f12f93 100644
--- a/include/exec/ioport.h
+++ b/include/exec/ioport.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,6 +24,8 @@
#ifndef IOPORT_H
#define IOPORT_H
+#include "exec/memory.h"
+
#define MAX_IOPORTS (64 * 1024)
#define IOPORTS_MASK (MAX_IOPORTS - 1)
@@ -33,7 +35,6 @@ typedef struct MemoryRegionPortio {
unsigned size;
uint32_t (*read)(void *opaque, uint32_t address);
void (*write)(void *opaque, uint32_t address, uint32_t data);
- uint32_t base; /* private field */
} MemoryRegionPortio;
#define PORTIO_END_OF_LIST() { }
@@ -53,6 +54,7 @@ typedef struct PortioList {
const struct MemoryRegionPortio *ports;
Object *owner;
struct MemoryRegion *address_space;
+ uint32_t addr;
unsigned nr;
struct MemoryRegion **regions;
void *opaque;
@@ -69,5 +71,7 @@ void portio_list_add(PortioList *piolist,
struct MemoryRegion *address_space,
uint32_t addr);
void portio_list_del(PortioList *piolist);
+void portio_list_set_enabled(PortioList *piolist, bool enabled);
+void portio_list_set_address(PortioList *piolist, uint32_t addr);
#endif /* IOPORT_H */
diff --git a/include/exec/log.h b/include/exec/log.h
index c249307911..4a7375a45f 100644
--- a/include/exec/log.h
+++ b/include/exec/log.h
@@ -2,7 +2,7 @@
#define QEMU_EXEC_LOG_H
#include "qemu/log.h"
-#include "qom/cpu.h"
+#include "hw/core/cpu.h"
#include "disas/disas.h"
/* cpu_dump_state() logging functions: */
@@ -15,8 +15,10 @@
*/
static inline void log_cpu_state(CPUState *cpu, int flags)
{
- if (qemu_log_enabled()) {
- cpu_dump_state(cpu, qemu_logfile, fprintf, flags);
+ FILE *f = qemu_log_trylock();
+ if (f) {
+ cpu_dump_state(cpu, f, flags);
+ qemu_log_unlock(f);
}
}
@@ -35,26 +37,4 @@ static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags)
}
}
-#ifdef NEED_CPU_H
-/* disas() and target_disas() to qemu_logfile: */
-static inline void log_target_disas(CPUState *cpu, target_ulong start,
- target_ulong len)
-{
- target_disas(qemu_logfile, cpu, start, len);
-}
-
-static inline void log_disas(void *code, unsigned long size)
-{
- disas(qemu_logfile, code, size);
-}
-
-#if defined(CONFIG_USER_ONLY)
-/* page_dump() output to the log file: */
-static inline void log_page_dump(void)
-{
- page_dump(qemu_logfile);
-}
-#endif
-#endif
-
#endif
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
index d4a1642098..14cdd8d582 100644
--- a/include/exec/memattrs.h
+++ b/include/exec/memattrs.h
@@ -29,12 +29,27 @@ typedef struct MemTxAttrs {
* "didn't specify" if necessary.
*/
unsigned int unspecified:1;
- /* ARM/AMBA: TrustZone Secure access
+ /*
+ * ARM/AMBA: TrustZone Secure access
* x86: System Management Mode access
*/
unsigned int secure:1;
+ /*
+ * ARM: ArmSecuritySpace. This partially overlaps secure, but it is
+ * easier to have both fields to assist code that does not understand
+ * ARMv9 RME, or no specific knowledge of ARM at all (e.g. pflash).
+ */
+ unsigned int space:2;
/* Memory access is usermode (unprivileged) */
unsigned int user:1;
+ /*
+ * Bus interconnect and peripherals can access anything (memories,
+ * devices) by default. By setting the 'memory' bit, bus transaction
+ * are restricted to "normal" memories (per the AMBA documentation)
+ * versus devices. Access to devices will be logged and rejected
+ * (see MEMTX_ACCESS_ERROR).
+ */
+ unsigned int memory:1;
/* Requester ID (for MSI for example) */
unsigned int requester_id:16;
} MemTxAttrs;
@@ -54,6 +69,7 @@ typedef struct MemTxAttrs {
#define MEMTX_OK 0
#define MEMTX_ERROR (1U << 0) /* device returned an error */
#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
+#define MEMTX_ACCESS_ERROR (1U << 2) /* access denied */
typedef uint32_t MemTxResult;
#endif
diff --git a/include/exec/memop.h b/include/exec/memop.h
new file mode 100644
index 0000000000..a86dc6743a
--- /dev/null
+++ b/include/exec/memop.h
@@ -0,0 +1,173 @@
+/*
+ * Constants for memory operations
+ *
+ * Authors:
+ * Richard Henderson <rth@twiddle.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMOP_H
+#define MEMOP_H
+
+#include "qemu/host-utils.h"
+
+typedef enum MemOp {
+ MO_8 = 0,
+ MO_16 = 1,
+ MO_32 = 2,
+ MO_64 = 3,
+ MO_128 = 4,
+ MO_256 = 5,
+ MO_512 = 6,
+ MO_1024 = 7,
+ MO_SIZE = 0x07, /* Mask for the above. */
+
+ MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */
+
+ MO_BSWAP = 0x10, /* Host reverse endian. */
+#if HOST_BIG_ENDIAN
+ MO_LE = MO_BSWAP,
+ MO_BE = 0,
+#else
+ MO_LE = 0,
+ MO_BE = MO_BSWAP,
+#endif
+#ifdef NEED_CPU_H
+#if TARGET_BIG_ENDIAN
+ MO_TE = MO_BE,
+#else
+ MO_TE = MO_LE,
+#endif
+#endif
+
+ /*
+ * MO_UNALN accesses are never checked for alignment.
+ * MO_ALIGN accesses will result in a call to the CPU's
+ * do_unaligned_access hook if the guest address is not aligned.
+ *
+ * Some architectures (e.g. ARMv8) need the address which is aligned
+ * to a size more than the size of the memory access.
+ * Some architectures (e.g. SPARCv9) need an address which is aligned,
+ * but less strictly than the natural alignment.
+ *
+ * MO_ALIGN supposes the alignment size is the size of a memory access.
+ *
+ * There are three options:
+ * - unaligned access permitted (MO_UNALN).
+ * - an alignment to the size of an access (MO_ALIGN);
+ * - an alignment to a specified size, which may be more or less than
+ * the access size (MO_ALIGN_x where 'x' is a size in bytes);
+ */
+ MO_ASHIFT = 5,
+ MO_AMASK = 0x7 << MO_ASHIFT,
+ MO_UNALN = 0,
+ MO_ALIGN_2 = 1 << MO_ASHIFT,
+ MO_ALIGN_4 = 2 << MO_ASHIFT,
+ MO_ALIGN_8 = 3 << MO_ASHIFT,
+ MO_ALIGN_16 = 4 << MO_ASHIFT,
+ MO_ALIGN_32 = 5 << MO_ASHIFT,
+ MO_ALIGN_64 = 6 << MO_ASHIFT,
+ MO_ALIGN = MO_AMASK,
+
+ /*
+ * MO_ATOM_* describes the atomicity requirements of the operation:
+ * MO_ATOM_IFALIGN: the operation must be single-copy atomic if it
+ * is aligned; if unaligned there is no atomicity.
+ * MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to
+ * be a pair of half-sized operations which are packed together
+ * for convenience, with single-copy atomicity on each half if
+ * the half is aligned.
+ * This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP.
+ * MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it
+ * is unaligned, so long as it does not cross a 16-byte boundary;
+ * if it crosses a 16-byte boundary there is no atomicity.
+ * This is the atomicity e.g. of Arm FEAT_LSE2 LDR.
+ * MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic,
+ * if it happens to be within a 16-byte boundary, otherwise it
+ * devolves to a pair of half-sized MO_ATOM_WITHIN16 operations.
+ * Depending on alignment, one or both will be single-copy atomic.
+ * This is the atomicity e.g. of Arm FEAT_LSE2 LDP.
+ * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts
+ * by the alignment. E.g. if the address is 0 mod 4, then each
+ * 4-byte subobject is single-copy atomic.
+ * This is the atomicity e.g. of IBM Power.
+ * MO_ATOM_NONE: the operation has no atomicity requirements.
+ *
+ * Note the default (i.e. 0) value is single-copy atomic to the
+ * size of the operation, if aligned. This retains the behaviour
+ * from before this field was introduced.
+ */
+ MO_ATOM_SHIFT = 8,
+ MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT,
+ MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT,
+ MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT,
+ MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT,
+ MO_ATOM_SUBALIGN = 4 << MO_ATOM_SHIFT,
+ MO_ATOM_NONE = 5 << MO_ATOM_SHIFT,
+ MO_ATOM_MASK = 7 << MO_ATOM_SHIFT,
+
+ /* Combinations of the above, for ease of use. */
+ MO_UB = MO_8,
+ MO_UW = MO_16,
+ MO_UL = MO_32,
+ MO_UQ = MO_64,
+ MO_UO = MO_128,
+ MO_SB = MO_SIGN | MO_8,
+ MO_SW = MO_SIGN | MO_16,
+ MO_SL = MO_SIGN | MO_32,
+ MO_SQ = MO_SIGN | MO_64,
+ MO_SO = MO_SIGN | MO_128,
+
+ MO_LEUW = MO_LE | MO_UW,
+ MO_LEUL = MO_LE | MO_UL,
+ MO_LEUQ = MO_LE | MO_UQ,
+ MO_LESW = MO_LE | MO_SW,
+ MO_LESL = MO_LE | MO_SL,
+ MO_LESQ = MO_LE | MO_SQ,
+
+ MO_BEUW = MO_BE | MO_UW,
+ MO_BEUL = MO_BE | MO_UL,
+ MO_BEUQ = MO_BE | MO_UQ,
+ MO_BESW = MO_BE | MO_SW,
+ MO_BESL = MO_BE | MO_SL,
+ MO_BESQ = MO_BE | MO_SQ,
+
+#ifdef NEED_CPU_H
+ MO_TEUW = MO_TE | MO_UW,
+ MO_TEUL = MO_TE | MO_UL,
+ MO_TEUQ = MO_TE | MO_UQ,
+ MO_TEUO = MO_TE | MO_UO,
+ MO_TESW = MO_TE | MO_SW,
+ MO_TESL = MO_TE | MO_SL,
+ MO_TESQ = MO_TE | MO_SQ,
+#endif
+
+ MO_SSIZE = MO_SIZE | MO_SIGN,
+} MemOp;
+
+/* MemOp to size in bytes. */
+static inline unsigned memop_size(MemOp op)
+{
+ return 1 << (op & MO_SIZE);
+}
+
+/* Size in bytes to MemOp. */
+static inline MemOp size_memop(unsigned size)
+{
+#ifdef CONFIG_DEBUG_TCG
+ /* Power of 2 up to 8. */
+ assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
+#endif
+ return ctz32(size);
+}
+
+/* Big endianness from MemOp. */
+static inline bool memop_big_endian(MemOp op)
+{
+ return (op & MO_BSWAP) == MO_BE;
+}
+
+#endif
diff --git a/include/exec/memopidx.h b/include/exec/memopidx.h
new file mode 100644
index 0000000000..eb7f1591a3
--- /dev/null
+++ b/include/exec/memopidx.h
@@ -0,0 +1,55 @@
+/*
+ * Combine the MemOp and mmu_idx parameters into a single value.
+ *
+ * Authors:
+ * Richard Henderson <rth@twiddle.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC_MEMOPIDX_H
+#define EXEC_MEMOPIDX_H
+
+#include "exec/memop.h"
+
+typedef uint32_t MemOpIdx;
+
+/**
+ * make_memop_idx
+ * @op: memory operation
+ * @idx: mmu index
+ *
+ * Encode these values into a single parameter.
+ */
+static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
+{
+#ifdef CONFIG_DEBUG_TCG
+ assert(idx <= 15);
+#endif
+ return (op << 4) | idx;
+}
+
+/**
+ * get_memop
+ * @oi: combined op/idx parameter
+ *
+ * Extract the memory operation from the combined value.
+ */
+static inline MemOp get_memop(MemOpIdx oi)
+{
+ return oi >> 4;
+}
+
+/**
+ * get_mmuidx
+ * @oi: combined op/idx parameter
+ *
+ * Extract the mmu index from the combined value.
+ */
+static inline unsigned get_mmuidx(MemOpIdx oi)
+{
+ return oi & 15;
+}
+
+#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
index bb08fa4d2f..100c1237ac 100644
--- a/include/exec/memory-internal.h
+++ b/include/exec/memory-internal.h
@@ -20,6 +20,8 @@
#ifndef MEMORY_INTERNAL_H
#define MEMORY_INTERNAL_H
+#include "cpu.h"
+
#ifndef CONFIG_USER_ONLY
static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
{
@@ -36,82 +38,12 @@ void flatview_unref(FlatView *view);
extern const MemoryRegionOps unassigned_mem_ops;
-bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
- unsigned size, bool is_write,
- MemTxAttrs attrs);
-
void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
void address_space_dispatch_compact(AddressSpaceDispatch *d);
void address_space_dispatch_free(AddressSpaceDispatch *d);
-void mtree_print_dispatch(fprintf_function mon, void *f,
- struct AddressSpaceDispatch *d,
+void mtree_print_dispatch(struct AddressSpaceDispatch *d,
MemoryRegion *root);
-
-struct page_collection;
-
-/* Opaque struct for passing info from memory_notdirty_write_prepare()
- * to memory_notdirty_write_complete(). Callers should treat all fields
- * as private, with the exception of @active.
- *
- * @active is a field which is not touched by either the prepare or
- * complete functions, but which the caller can use if it wishes to
- * track whether it has called prepare for this struct and so needs
- * to later call the complete function.
- */
-typedef struct {
- CPUState *cpu;
- struct page_collection *pages;
- ram_addr_t ram_addr;
- vaddr mem_vaddr;
- unsigned size;
- bool active;
-} NotDirtyInfo;
-
-/**
- * memory_notdirty_write_prepare: call before writing to non-dirty memory
- * @ndi: pointer to opaque NotDirtyInfo struct
- * @cpu: CPU doing the write
- * @mem_vaddr: virtual address of write
- * @ram_addr: the ram address of the write
- * @size: size of write in bytes
- *
- * Any code which writes to the host memory corresponding to
- * guest RAM which has been marked as NOTDIRTY must wrap those
- * writes in calls to memory_notdirty_write_prepare() and
- * memory_notdirty_write_complete():
- *
- * NotDirtyInfo ndi;
- * memory_notdirty_write_prepare(&ndi, ....);
- * ... perform write here ...
- * memory_notdirty_write_complete(&ndi);
- *
- * These calls will ensure that we flush any TCG translated code for
- * the memory being written, update the dirty bits and (if possible)
- * remove the slowpath callback for writing to the memory.
- *
- * This must only be called if we are using TCG; it will assert otherwise.
- *
- * We may take locks in the prepare call, so callers must ensure that
- * they don't exit (via longjump or otherwise) without calling complete.
- *
- * This call must only be made inside an RCU critical section.
- * (Note that while we're executing a TCG TB we're always in an
- * RCU critical section, which is likely to be the case for callers
- * of these functions.)
- */
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
- CPUState *cpu,
- vaddr mem_vaddr,
- ram_addr_t ram_addr,
- unsigned size);
-/**
- * memory_notdirty_write_complete: finish write to non-dirty memory
- * @ndi: pointer to the opaque NotDirtyInfo struct which was initialized
- * by memory_not_dirty_write_prepare().
- */
-void memory_notdirty_write_complete(NotDirtyInfo *ndi);
-
#endif
#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
index cd2f209b64..dbb1bad72f 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -19,39 +19,93 @@
#include "exec/cpu-common.h"
#include "exec/hwaddr.h"
#include "exec/memattrs.h"
+#include "exec/memop.h"
#include "exec/ramlist.h"
+#include "qemu/bswap.h"
#include "qemu/queue.h"
#include "qemu/int128.h"
+#include "qemu/range.h"
#include "qemu/notify.h"
#include "qom/object.h"
#include "qemu/rcu.h"
-#include "hw/qdev-core.h"
#define RAM_ADDR_INVALID (~(ram_addr_t)0)
#define MAX_PHYS_ADDR_SPACE_BITS 62
#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
-#define TYPE_MEMORY_REGION "qemu:memory-region"
-#define MEMORY_REGION(obj) \
- OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
+#define TYPE_MEMORY_REGION "memory-region"
+DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
+ TYPE_MEMORY_REGION)
+
+#define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
+typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
+DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
+ IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
+
+#define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
+typedef struct RamDiscardManagerClass RamDiscardManagerClass;
+typedef struct RamDiscardManager RamDiscardManager;
+DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
+ RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
+
+#ifdef CONFIG_FUZZ
+void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr);
+#else
+static inline void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr)
+{
+ /* Do Nothing */
+}
+#endif
+
+/* Possible bits for global_dirty_log_{start|stop} */
+
+/* Dirty tracking enabled because migration is running */
+#define GLOBAL_DIRTY_MIGRATION (1U << 0)
+
+/* Dirty tracking enabled because measuring dirty rate */
+#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
+
+/* Dirty tracking enabled because dirty limit */
+#define GLOBAL_DIRTY_LIMIT (1U << 2)
+
+#define GLOBAL_DIRTY_MASK (0x7)
-#define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
-#define IOMMU_MEMORY_REGION(obj) \
- OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
-#define IOMMU_MEMORY_REGION_CLASS(klass) \
- OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
- TYPE_IOMMU_MEMORY_REGION)
-#define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
- TYPE_IOMMU_MEMORY_REGION)
+extern unsigned int global_dirty_tracking;
typedef struct MemoryRegionOps MemoryRegionOps;
-typedef struct MemoryRegionMmio MemoryRegionMmio;
-struct MemoryRegionMmio {
- CPUReadMemoryFunc *read[3];
- CPUWriteMemoryFunc *write[3];
+struct ReservedRegion {
+ Range range;
+ unsigned type;
+};
+
+/**
+ * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @fv: the flat view of the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ * relative to the region's address space
+ * @readonly: writes to this section are ignored
+ * @nonvolatile: this section is non-volatile
+ * @unmergeable: this section should not get merged with adjacent sections
+ */
+struct MemoryRegionSection {
+ Int128 size;
+ MemoryRegion *mr;
+ FlatView *fv;
+ hwaddr offset_within_region;
+ hwaddr offset_within_address_space;
+ bool readonly;
+ bool nonvolatile;
+ bool unmergeable;
};
typedef struct IOMMUTLBEntry IOMMUTLBEntry;
@@ -77,6 +131,32 @@ struct IOMMUTLBEntry {
/*
* Bitmap for different IOMMUNotifier capabilities. Each notifier can
* register with one or multiple IOMMU Notifier capability bit(s).
+ *
+ * Normally there're two use cases for the notifiers:
+ *
+ * (1) When the device needs accurate synchronizations of the vIOMMU page
+ * tables, it needs to register with both MAP|UNMAP notifies (which
+ * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
+ *
+ * Regarding to accurate synchronization, it's when the notified
+ * device maintains a shadow page table and must be notified on each
+ * guest MAP (page table entry creation) and UNMAP (invalidation)
+ * events (e.g. VFIO). Both notifications must be accurate so that
+ * the shadow page table is fully in sync with the guest view.
+ *
+ * (2) When the device doesn't need accurate synchronizations of the
+ * vIOMMU page tables, it needs to register only with UNMAP or
+ * DEVIOTLB_UNMAP notifies.
+ *
+ * It's when the device maintains a cache of IOMMU translations
+ * (IOTLB) and is able to fill that cache by requesting translations
+ * from the vIOMMU through a protocol similar to ATS (Address
+ * Translation Service).
+ *
+ * Note that in this mode the vIOMMU will not maintain a shadowed
+ * page table for the address space, and the UNMAP messages can cover
+ * more than the pages that used to get mapped. The IOMMU notifiee
+ * should be able to take care of over-sized invalidations.
*/
typedef enum {
IOMMU_NOTIFIER_NONE = 0,
@@ -84,9 +164,14 @@ typedef enum {
IOMMU_NOTIFIER_UNMAP = 0x1,
/* Notify entry changes (newly created entries) */
IOMMU_NOTIFIER_MAP = 0x2,
+ /* Notify changes on device IOTLB entries */
+ IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
} IOMMUNotifierFlag;
-#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
+#define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
+#define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
+#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
+ IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
struct IOMMUNotifier;
typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
@@ -103,6 +188,11 @@ struct IOMMUNotifier {
};
typedef struct IOMMUNotifier IOMMUNotifier;
+typedef struct IOMMUTLBEvent {
+ IOMMUNotifierFlag type;
+ IOMMUTLBEntry entry;
+} IOMMUTLBEvent;
+
/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
#define RAM_PREALLOC (1 << 0)
@@ -110,7 +200,7 @@ typedef struct IOMMUNotifier IOMMUNotifier;
#define RAM_SHARED (1 << 1)
/* Only a portion of RAM (used_length) is actually used, and migrated.
- * This used_length size can change across reboots.
+ * Resizing RAM while migrating can result in the migration being canceled.
*/
#define RAM_RESIZEABLE (1 << 2)
@@ -126,6 +216,36 @@ typedef struct IOMMUNotifier IOMMUNotifier;
/* RAM is a persistent kind memory */
#define RAM_PMEM (1 << 5)
+
+/*
+ * UFFDIO_WRITEPROTECT is used on this RAMBlock to
+ * support 'write-tracking' migration type.
+ * Implies ram_state->ram_wt_enabled.
+ */
+#define RAM_UF_WRITEPROTECT (1 << 6)
+
+/*
+ * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
+ * pages if applicable) is skipped: will bail out if not supported. When not
+ * set, the OS will do the reservation, if supported for the memory type.
+ */
+#define RAM_NORESERVE (1 << 7)
+
+/* RAM that isn't accessible through normal means. */
+#define RAM_PROTECTED (1 << 8)
+
+/* RAM is an mmap-ed named file */
+#define RAM_NAMED_FILE (1 << 9)
+
+/* RAM is mmap-ed read-only */
+#define RAM_READONLY (1 << 10)
+
+/* RAM FD is opened read-only */
+#define RAM_READONLY_FD (1 << 11)
+
+/* RAM can be private that has kvm guest memfd backend */
+#define RAM_GUEST_MEMFD (1 << 12)
+
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
IOMMUNotifierFlag flags,
hwaddr start, hwaddr end,
@@ -203,11 +323,17 @@ struct MemoryRegionOps {
} impl;
};
+typedef struct MemoryRegionClass {
+ /* private */
+ ObjectClass parent_class;
+} MemoryRegionClass;
+
+
enum IOMMUMemoryRegionAttr {
IOMMU_ATTR_SPAPR_TCE_FD
};
-/**
+/*
* IOMMUMemoryRegionClass:
*
* All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
@@ -217,15 +343,18 @@ enum IOMMUMemoryRegionAttr {
* The IOMMU implementation must use the IOMMU notifier infrastructure
* to report whenever mappings are changed, by calling
* memory_region_notify_iommu() (or, if necessary, by calling
- * memory_region_notify_one() for each registered notifier).
+ * memory_region_notify_iommu_one() for each registered notifier).
*
* Conceptually an IOMMU provides a mapping from input address
* to an output TLB entry. If the IOMMU is aware of memory transaction
* attributes and the output TLB entry depends on the transaction
* attributes, we represent this using IOMMU indexes. Each index
* selects a particular translation table that the IOMMU has:
+ *
* @attrs_to_index returns the IOMMU index for a set of transaction attributes
+ *
* @translate takes an input address and an IOMMU index
+ *
* and the mapping returned can only depend on the input address and the
* IOMMU index.
*
@@ -233,11 +362,14 @@ enum IOMMUMemoryRegionAttr {
* only a single IOMMU index. A more complex IOMMU might have one index
* for secure transactions and one for non-secure transactions.
*/
-typedef struct IOMMUMemoryRegionClass {
- /* private */
- struct DeviceClass parent_class;
+struct IOMMUMemoryRegionClass {
+ /* private: */
+ MemoryRegionClass parent_class;
- /*
+ /* public: */
+ /**
+ * @translate:
+ *
* Return a TLB entry that contains a given address.
*
* The IOMMUAccessFlags indicated via @flag are optional and may
@@ -258,32 +390,53 @@ typedef struct IOMMUMemoryRegionClass {
* information when the IOMMU mapping changes.
*
* @iommu: the IOMMUMemoryRegion
+ *
* @hwaddr: address to be translated within the memory region
- * @flag: requested access permissions
+ *
+ * @flag: requested access permission
+ *
* @iommu_idx: IOMMU index for the translation
*/
IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
IOMMUAccessFlags flag, int iommu_idx);
- /* Returns minimum supported page size in bytes.
+ /**
+ * @get_min_page_size:
+ *
+ * Returns minimum supported page size in bytes.
+ *
* If this method is not provided then the minimum is assumed to
* be TARGET_PAGE_SIZE.
*
* @iommu: the IOMMUMemoryRegion
*/
uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
- /* Called when IOMMU Notifier flag changes (ie when the set of
+ /**
+ * @notify_flag_changed:
+ *
+ * Called when IOMMU Notifier flag changes (ie when the set of
* events which IOMMU users are requesting notification for changes).
* Optional method -- need not be provided if the IOMMU does not
* need to know exactly which events must be notified.
*
* @iommu: the IOMMUMemoryRegion
+ *
* @old_flags: events which previously needed to be notified
+ *
* @new_flags: events which now need to be notified
+ *
+ * Returns 0 on success, or a negative errno; in particular
+ * returns -EINVAL if the new flag bitmap is not supported by the
+ * IOMMU memory region. In case of failure, the error object
+ * must be created
*/
- void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
- IOMMUNotifierFlag old_flags,
- IOMMUNotifierFlag new_flags);
- /* Called to handle memory_region_iommu_replay().
+ int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
+ IOMMUNotifierFlag old_flags,
+ IOMMUNotifierFlag new_flags,
+ Error **errp);
+ /**
+ * @replay:
+ *
+ * Called to handle memory_region_iommu_replay().
*
* The default implementation of memory_region_iommu_replay() is to
* call the IOMMU translate method for every page in the address space
@@ -300,7 +453,10 @@ typedef struct IOMMUMemoryRegionClass {
*/
void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
- /* Get IOMMU misc attributes. This is an optional method that
+ /**
+ * @get_attr:
+ *
+ * Get IOMMU misc attributes. This is an optional method that
* can be used to allow users of the IOMMU to get implementation-specific
* information. The IOMMU implements this method to handle calls
* by IOMMU users to memory_region_iommu_get_attr() by filling in
@@ -309,7 +465,9 @@ typedef struct IOMMUMemoryRegionClass {
* memory_region_iommu_get_attr() will always return -EINVAL.
*
* @iommu: the IOMMUMemoryRegion
+ *
* @attr: attribute being queried
+ *
* @data: memory to fill in with the attribute data
*
* Returns 0 on success, or a negative errno; in particular
@@ -318,7 +476,10 @@ typedef struct IOMMUMemoryRegionClass {
int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
void *data);
- /* Return the IOMMU index to use for a given set of transaction attributes.
+ /**
+ * @attrs_to_index:
+ *
+ * Return the IOMMU index to use for a given set of transaction attributes.
*
* Optional method: if an IOMMU only supports a single IOMMU index then
* the default implementation of memory_region_iommu_attrs_to_index()
@@ -331,7 +492,10 @@ typedef struct IOMMUMemoryRegionClass {
*/
int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
- /* Return the number of IOMMU indexes this IOMMU supports.
+ /**
+ * @num_indexes:
+ *
+ * Return the number of IOMMU indexes this IOMMU supports.
*
* Optional method: if this method is not provided, then
* memory_region_iommu_num_indexes() will return 1, indicating that
@@ -340,15 +504,291 @@ typedef struct IOMMUMemoryRegionClass {
* @iommu: the IOMMUMemoryRegion
*/
int (*num_indexes)(IOMMUMemoryRegion *iommu);
-} IOMMUMemoryRegionClass;
+
+ /**
+ * @iommu_set_page_size_mask:
+ *
+ * Restrict the page size mask that can be supported with a given IOMMU
+ * memory region. Used for example to propagate host physical IOMMU page
+ * size mask limitations to the virtual IOMMU.
+ *
+ * Optional method: if this method is not provided, then the default global
+ * page mask is used.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @page_size_mask: a bitmask of supported page sizes. At least one bit,
+ * representing the smallest page size, must be set. Additional set bits
+ * represent supported block sizes. For example a host physical IOMMU that
+ * uses page tables with a page size of 4kB, and supports 2MB and 4GB
+ * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
+ * block sizes is specified with mask 0xfffffffffffff000.
+ *
+ * Returns 0 on success, or a negative error. In case of failure, the error
+ * object must be created.
+ */
+ int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
+ uint64_t page_size_mask,
+ Error **errp);
+ /**
+ * @iommu_set_iova_ranges:
+ *
+ * Propagate information about the usable IOVA ranges for a given IOMMU
+ * memory region. Used for example to propagate host physical device
+ * reserved memory region constraints to the virtual IOMMU.
+ *
+ * Optional method: if this method is not provided, then the default IOVA
+ * aperture is used.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @iova_ranges: list of ordered IOVA ranges (at least one range)
+ *
+ * Returns 0 on success, or a negative error. In case of failure, the error
+ * object must be created.
+ */
+ int (*iommu_set_iova_ranges)(IOMMUMemoryRegion *iommu,
+ GList *iova_ranges,
+ Error **errp);
+};
+
+typedef struct RamDiscardListener RamDiscardListener;
+typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+struct RamDiscardListener {
+ /*
+ * @notify_populate:
+ *
+ * Notification that previously discarded memory is about to get populated.
+ * Listeners are able to object. If any listener objects, already
+ * successfully notified listeners are notified about a discard again.
+ *
+ * @rdl: the #RamDiscardListener getting notified
+ * @section: the #MemoryRegionSection to get populated. The section
+ * is aligned within the memory region to the minimum granularity
+ * unless it would exceed the registered section.
+ *
+ * Returns 0 on success. If the notification is rejected by the listener,
+ * an error is returned.
+ */
+ NotifyRamPopulate notify_populate;
+
+ /*
+ * @notify_discard:
+ *
+ * Notification that previously populated memory was discarded successfully
+ * and listeners should drop all references to such memory and prevent
+ * new population (e.g., unmap).
+ *
+ * @rdl: the #RamDiscardListener getting notified
+ * @section: the #MemoryRegionSection to get populated. The section
+ * is aligned within the memory region to the minimum granularity
+ * unless it would exceed the registered section.
+ */
+ NotifyRamDiscard notify_discard;
+
+ /*
+ * @double_discard_supported:
+ *
+ * The listener suppors getting @notify_discard notifications that span
+ * already discarded parts.
+ */
+ bool double_discard_supported;
+
+ MemoryRegionSection *section;
+ QLIST_ENTRY(RamDiscardListener) next;
+};
+
+static inline void ram_discard_listener_init(RamDiscardListener *rdl,
+ NotifyRamPopulate populate_fn,
+ NotifyRamDiscard discard_fn,
+ bool double_discard_supported)
+{
+ rdl->notify_populate = populate_fn;
+ rdl->notify_discard = discard_fn;
+ rdl->double_discard_supported = double_discard_supported;
+}
+
+typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
+typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
+
+/*
+ * RamDiscardManagerClass:
+ *
+ * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
+ * regions are currently populated to be used/accessed by the VM, notifying
+ * after parts were discarded (freeing up memory) and before parts will be
+ * populated (consuming memory), to be used/accessed by the VM.
+ *
+ * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
+ * #MemoryRegion isn't mapped into an address space yet (either directly
+ * or via an alias); it cannot change while the #MemoryRegion is
+ * mapped into an address space.
+ *
+ * The #RamDiscardManager is intended to be used by technologies that are
+ * incompatible with discarding of RAM (e.g., VFIO, which may pin all
+ * memory inside a #MemoryRegion), and require proper coordination to only
+ * map the currently populated parts, to hinder parts that are expected to
+ * remain discarded from silently getting populated and consuming memory.
+ * Technologies that support discarding of RAM don't have to bother and can
+ * simply map the whole #MemoryRegion.
+ *
+ * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
+ * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
+ * Logically unplugging memory consists of discarding RAM. The VM agreed to not
+ * access unplugged (discarded) memory - especially via DMA. virtio-mem will
+ * properly coordinate with listeners before memory is plugged (populated),
+ * and after memory is unplugged (discarded).
+ *
+ * Listeners are called in multiples of the minimum granularity (unless it
+ * would exceed the registered range) and changes are aligned to the minimum
+ * granularity within the #MemoryRegion. Listeners have to prepare for memory
+ * becoming discarded in a different granularity than it was populated and the
+ * other way around.
+ */
+struct RamDiscardManagerClass {
+ /* private */
+ InterfaceClass parent_class;
+
+ /* public */
+
+ /**
+ * @get_min_granularity:
+ *
+ * Get the minimum granularity in which listeners will get notified
+ * about changes within the #MemoryRegion via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @mr: the #MemoryRegion
+ *
+ * Returns the minimum granularity.
+ */
+ uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
+ const MemoryRegion *mr);
+
+ /**
+ * @is_populated:
+ *
+ * Check whether the given #MemoryRegionSection is completely populated
+ * (i.e., no parts are currently discarded) via the #RamDiscardManager.
+ * There are no alignment requirements.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ *
+ * Returns whether the given range is completely populated.
+ */
+ bool (*is_populated)(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section);
+
+ /**
+ * @replay_populated:
+ *
+ * Call the #ReplayRamPopulate callback for all populated parts within the
+ * #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * In case any call fails, no further calls are made.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamPopulate callback
+ * @opaque: pointer to forward to the callback
+ *
+ * Returns 0 on success, or a negative error if any notification failed.
+ */
+ int (*replay_populated)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamPopulate replay_fn, void *opaque);
+
+ /**
+ * @replay_discarded:
+ *
+ * Call the #ReplayRamDiscard callback for all discarded parts within the
+ * #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamDiscard callback
+ * @opaque: pointer to forward to the callback
+ */
+ void (*replay_discarded)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscard replay_fn, void *opaque);
+
+ /**
+ * @register_listener:
+ *
+ * Register a #RamDiscardListener for the given #MemoryRegionSection and
+ * immediately notify the #RamDiscardListener about all populated parts
+ * within the #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * In case any notification fails, no further notifications are triggered
+ * and an error is logged.
+ *
+ * @rdm: the #RamDiscardManager
+ * @rdl: the #RamDiscardListener
+ * @section: the #MemoryRegionSection
+ */
+ void (*register_listener)(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+ /**
+ * @unregister_listener:
+ *
+ * Unregister a previously registered #RamDiscardListener via the
+ * #RamDiscardManager after notifying the #RamDiscardListener about all
+ * populated parts becoming unpopulated within the registered
+ * #MemoryRegionSection.
+ *
+ * @rdm: the #RamDiscardManager
+ * @rdl: the #RamDiscardListener
+ */
+ void (*unregister_listener)(RamDiscardManager *rdm,
+ RamDiscardListener *rdl);
+};
+
+uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
+ const MemoryRegion *mr);
+
+bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section);
+
+int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamPopulate replay_fn,
+ void *opaque);
+
+void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscard replay_fn,
+ void *opaque);
+
+void ram_discard_manager_register_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl);
+
+bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
+ ram_addr_t *ram_addr, bool *read_only,
+ bool *mr_has_discard_manager);
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
+/** MemoryRegion:
+ *
+ * A struct representing a memory region.
+ */
struct MemoryRegion {
Object parent_obj;
- /* All fields are private - violators will be prosecuted */
+ /* private: */
/* The following fields should fit in a cache line */
bool romd_mode;
@@ -358,15 +798,18 @@ struct MemoryRegion {
bool nonvolatile;
bool rom_device;
bool flush_coalesced_mmio;
- bool global_locking;
+ bool unmergeable;
uint8_t dirty_log_mask;
bool is_iommu;
RAMBlock *ram_block;
Object *owner;
+ /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
+ DeviceState *dev;
const MemoryRegionOps *ops;
void *opaque;
MemoryRegion *container;
+ int mapped_via_alias; /* Mapped via an alias, container might be NULL */
Int128 size;
hwaddr addr;
void (*destructor)(MemoryRegion *mr);
@@ -385,6 +828,10 @@ struct MemoryRegion {
const char *name;
unsigned ioeventfd_nb;
MemoryRegionIoeventfd *ioeventfds;
+ RamDiscardManager *rdm; /* Only for RAM */
+
+ /* For devices designed to perform re-entrant IO into their own IO MRs */
+ bool disable_reentrancy_guard;
};
struct IOMMUMemoryRegion {
@@ -397,45 +844,279 @@ struct IOMMUMemoryRegion {
#define IOMMU_NOTIFIER_FOREACH(n, mr) \
QLIST_FOREACH((n), &(mr)->iommu_notify, node)
+#define MEMORY_LISTENER_PRIORITY_MIN 0
+#define MEMORY_LISTENER_PRIORITY_ACCEL 10
+#define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
+
/**
- * MemoryListener: callbacks structure for updates to the physical memory map
+ * struct MemoryListener: callbacks structure for updates to the physical memory map
*
* Allows a component to adjust to changes in the guest-visible memory map.
* Use with memory_listener_register() and memory_listener_unregister().
*/
struct MemoryListener {
+ /**
+ * @begin:
+ *
+ * Called at the beginning of an address space update transaction.
+ * Followed by calls to #MemoryListener.region_add(),
+ * #MemoryListener.region_del(), #MemoryListener.region_nop(),
+ * #MemoryListener.log_start() and #MemoryListener.log_stop() in
+ * increasing address order.
+ *
+ * @listener: The #MemoryListener.
+ */
void (*begin)(MemoryListener *listener);
+
+ /**
+ * @commit:
+ *
+ * Called at the end of an address space update transaction,
+ * after the last call to #MemoryListener.region_add(),
+ * #MemoryListener.region_del() or #MemoryListener.region_nop(),
+ * #MemoryListener.log_start() and #MemoryListener.log_stop().
+ *
+ * @listener: The #MemoryListener.
+ */
void (*commit)(MemoryListener *listener);
+
+ /**
+ * @region_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that is new in this address space
+ * space since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ */
void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @region_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has disappeared in the address
+ * space since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The old #MemoryRegionSection.
+ */
void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @region_nop:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that is in the same place in the address
+ * space as in the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @log_start:
+ *
+ * Called during an address space update transaction, after
+ * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
+ * #MemoryListener.region_nop(), if dirty memory logging clients have
+ * become active since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ * @old: A bitmap of dirty memory logging clients that were active in
+ * the previous transaction.
+ * @new: A bitmap of dirty memory logging clients that are active in
+ * the current transaction.
+ */
void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
int old, int new);
+
+ /**
+ * @log_stop:
+ *
+ * Called during an address space update transaction, after
+ * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
+ * #MemoryListener.region_nop() and possibly after
+ * #MemoryListener.log_start(), if dirty memory logging clients have
+ * become inactive since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ * @old: A bitmap of dirty memory logging clients that were active in
+ * the previous transaction.
+ * @new: A bitmap of dirty memory logging clients that are active in
+ * the current transaction.
+ */
void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
int old, int new);
+
+ /**
+ * @log_sync:
+ *
+ * Called by memory_region_snapshot_and_clear_dirty() and
+ * memory_global_dirty_log_sync(), before accessing QEMU's "official"
+ * copy of the dirty memory bitmap for a #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_global_start)(MemoryListener *listener);
+
+ /**
+ * @log_sync_global:
+ *
+ * This is the global version of @log_sync when the listener does
+ * not have a way to synchronize the log with finer granularity.
+ * When the listener registers with @log_sync_global defined, then
+ * its @log_sync must be NULL. Vice versa.
+ *
+ * @listener: The #MemoryListener.
+ * @last_stage: The last stage to synchronize the log during migration.
+ * The caller should guarantee that the synchronization with true for
+ * @last_stage is triggered for once after all VCPUs have been stopped.
+ */
+ void (*log_sync_global)(MemoryListener *listener, bool last_stage);
+
+ /**
+ * @log_clear:
+ *
+ * Called before reading the dirty memory bitmap for a
+ * #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
+ void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @log_global_start:
+ *
+ * Called by memory_global_dirty_log_start(), which
+ * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
+ * the address space. #MemoryListener.log_global_start() is also
+ * called when a #MemoryListener is added, if global dirty logging is
+ * active at that time.
+ *
+ * @listener: The #MemoryListener.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+ bool (*log_global_start)(MemoryListener *listener, Error **errp);
+
+ /**
+ * @log_global_stop:
+ *
+ * Called by memory_global_dirty_log_stop(), which
+ * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
+ * the address space.
+ *
+ * @listener: The #MemoryListener.
+ */
void (*log_global_stop)(MemoryListener *listener);
+
+ /**
+ * @log_global_after_sync:
+ *
+ * Called after reading the dirty memory bitmap
+ * for any #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*log_global_after_sync)(MemoryListener *listener);
+
+ /**
+ * @eventfd_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has had a new ioeventfd
+ * registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @match_data: The @match_data parameter for the new ioeventfd.
+ * @data: The @data parameter for the new ioeventfd.
+ * @e: The #EventNotifier parameter for the new ioeventfd.
+ */
void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
bool match_data, uint64_t data, EventNotifier *e);
+
+ /**
+ * @eventfd_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has dropped an ioeventfd
+ * registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @match_data: The @match_data parameter for the dropped ioeventfd.
+ * @data: The @data parameter for the dropped ioeventfd.
+ * @e: The #EventNotifier parameter for the dropped ioeventfd.
+ */
void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
bool match_data, uint64_t data, EventNotifier *e);
+
+ /**
+ * @coalesced_io_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has had a new coalesced
+ * MMIO range registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @addr: The starting address for the coalesced MMIO range.
+ * @len: The length of the coalesced MMIO range.
+ */
void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
hwaddr addr, hwaddr len);
+
+ /**
+ * @coalesced_io_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has dropped a coalesced
+ * MMIO range since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @addr: The starting address for the coalesced MMIO range.
+ * @len: The length of the coalesced MMIO range.
+ */
void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
hwaddr addr, hwaddr len);
- /* Lower = earlier (during add), later (during del) */
+ /**
+ * @priority:
+ *
+ * Govern the order in which memory listeners are invoked. Lower priorities
+ * are invoked earlier for "add" or "start" callbacks, and later for "delete"
+ * or "stop" callbacks.
+ */
unsigned priority;
+
+ /**
+ * @name:
+ *
+ * Name of the listener. It can be used in contexts where we'd like to
+ * identify one memory listener with the rest.
+ */
+ const char *name;
+
+ /* private: */
AddressSpace *address_space;
QTAILQ_ENTRY(MemoryListener) link;
QTAILQ_ENTRY(MemoryListener) link_as;
};
/**
- * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
*/
struct AddressSpace {
- /* All fields are private. */
+ /* private: */
struct rcu_head rcu;
char *name;
MemoryRegion *root;
@@ -444,6 +1125,7 @@ struct AddressSpace {
struct FlatView *current_map;
int ioeventfd_nb;
+ int ioeventfd_notifiers;
struct MemoryRegionIoeventfd *ioeventfds;
QTAILQ_HEAD(, MemoryListener) listeners;
QTAILQ_ENTRY(AddressSpace) address_spaces_link;
@@ -467,31 +1149,70 @@ struct FlatView {
static inline FlatView *address_space_to_flatview(AddressSpace *as)
{
- return atomic_rcu_read(&as->current_map);
+ return qatomic_rcu_read(&as->current_map);
}
+/**
+ * typedef flatview_cb: callback for flatview_for_each_range()
+ *
+ * @start: start address of the range within the FlatView
+ * @len: length of the range in bytes
+ * @mr: MemoryRegion covering this range
+ * @offset_in_region: offset of the first byte of the range within @mr
+ * @opaque: data pointer passed to flatview_for_each_range()
+ *
+ * Returns: true to stop the iteration, false to keep going.
+ */
+typedef bool (*flatview_cb)(Int128 start,
+ Int128 len,
+ const MemoryRegion *mr,
+ hwaddr offset_in_region,
+ void *opaque);
/**
- * MemoryRegionSection: describes a fragment of a #MemoryRegion
+ * flatview_for_each_range: Iterate through a FlatView
+ * @fv: the FlatView to iterate through
+ * @cb: function to call for each range
+ * @opaque: opaque data pointer to pass to @cb
*
- * @mr: the region, or %NULL if empty
- * @fv: the flat view of the address space the region is mapped in
- * @offset_within_region: the beginning of the section, relative to @mr's start
- * @size: the size of the section; will not exceed @mr's boundaries
- * @offset_within_address_space: the address of the first byte of the section
- * relative to the region's address space
- * @readonly: writes to this section are ignored
- * @nonvolatile: this section is non-volatile
+ * A FlatView is made up of a list of non-overlapping ranges, each of
+ * which is a slice of a MemoryRegion. This function iterates through
+ * each range in @fv, calling @cb. The callback function can terminate
+ * iteration early by returning 'true'.
*/
-struct MemoryRegionSection {
- MemoryRegion *mr;
- FlatView *fv;
- hwaddr offset_within_region;
- Int128 size;
- hwaddr offset_within_address_space;
- bool readonly;
- bool nonvolatile;
-};
+void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
+
+static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
+ MemoryRegionSection *b)
+{
+ return a->mr == b->mr &&
+ a->fv == b->fv &&
+ a->offset_within_region == b->offset_within_region &&
+ a->offset_within_address_space == b->offset_within_address_space &&
+ int128_eq(a->size, b->size) &&
+ a->readonly == b->readonly &&
+ a->nonvolatile == b->nonvolatile;
+}
+
+/**
+ * memory_region_section_new_copy: Copy a memory region section
+ *
+ * Allocate memory for a new copy, copy the memory region section, and
+ * properly take a reference on all relevant members.
+ *
+ * @s: the #MemoryRegionSection to copy
+ */
+MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
+
+/**
+ * memory_region_section_new_copy: Free a copied memory region section
+ *
+ * Free a copy of a memory section created via memory_region_section_new_copy().
+ * properly dropping references on all relevant members.
+ *
+ * @s: the #MemoryRegionSection to copy
+ */
+void memory_region_section_free_copy(MemoryRegionSection *s);
/**
* memory_region_init: Initialize a memory region
@@ -505,7 +1226,7 @@ struct MemoryRegionSection {
* @size: size of the region; any subregions beyond this size will be clipped
*/
void memory_region_init(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size);
@@ -553,7 +1274,7 @@ void memory_region_unref(MemoryRegion *mr);
* @size: size of the region.
*/
void memory_region_init_io(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
@@ -573,42 +1294,49 @@ void memory_region_init_io(MemoryRegion *mr,
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_nomigrate(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp);
/**
- * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
- * Accesses into the region will
- * modify memory directly.
+ * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
+ * Accesses into the region will
+ * modify memory directly.
*
* @mr: the #MemoryRegion to be initialized.
* @owner: the object that tracks the region's reference count
* @name: Region name, becomes part of RAMBlock name used in migration stream
* must be unique within any device
* @size: size of the region.
- * @share: allow remapping RAM to different addresses
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
+ * RAM_GUEST_MEMFD.
* @errp: pointer to Error*, to store an error if it happens.
*
- * Note that this function is similar to memory_region_init_ram_nomigrate.
- * The only difference is part of the RAM region can be remapped.
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
- struct Object *owner,
- const char *name,
- uint64_t size,
- bool share,
- Error **errp);
+bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint32_t ram_flags,
+ Error **errp);
/**
- * memory_region_init_resizeable_ram: Initialize memory region with resizeable
+ * memory_region_init_resizeable_ram: Initialize memory region with resizable
* RAM. Accesses into the region will
* modify memory directly. Only an initial
* portion of this RAM is actually used.
- * The used size can change across reboots.
+ * Changing the size while migrating
+ * can result in the migration being
+ * canceled.
*
* @mr: the #MemoryRegion to be initialized.
* @owner: the object that tracks the region's reference count
@@ -621,9 +1349,11 @@ void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_resizeable_ram(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_resizeable_ram(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
uint64_t max_size,
@@ -644,23 +1374,26 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
* @size: size of the region.
* @align: alignment of the region base address; if 0, the default alignment
* (getpagesize()) will be used.
- * @ram_flags: Memory region features:
- * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
- * - RAM_PMEM: the memory is persistent memory
- * Other bits are ignored now.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @path: the path in which to allocate the RAM.
+ * @offset: offset within the file referenced by path
* @errp: pointer to Error*, to store an error if it happens.
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_from_file(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_ram_from_file(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
uint64_t align,
uint32_t ram_flags,
const char *path,
+ ram_addr_t offset,
Error **errp);
/**
@@ -671,19 +1404,25 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
* @owner: the object that tracks the region's reference count
* @name: the name of the region.
* @size: size of the region.
- * @share: %true if memory must be mmaped with the MAP_SHARED flag
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @fd: the fd to mmap.
+ * @offset: offset within the file referenced by fd
* @errp: pointer to Error*, to store an error if it happens.
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_from_fd(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_ram_from_fd(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
- bool share,
+ uint32_t ram_flags,
int fd,
+ ram_addr_t offset,
Error **errp);
#endif
@@ -703,7 +1442,7 @@ void memory_region_init_ram_from_fd(MemoryRegion *mr,
* RAM memory region to be migrated; that is the responsibility of the caller.
*/
void memory_region_init_ram_ptr(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
void *ptr);
@@ -731,7 +1470,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
* (For RAM device memory regions, migrating the contents rarely makes sense.)
*/
void memory_region_init_ram_device_ptr(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
void *ptr);
@@ -749,7 +1488,7 @@ void memory_region_init_ram_device_ptr(MemoryRegion *mr,
* @size: size of the region.
*/
void memory_region_init_alias(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
MemoryRegion *orig,
hwaddr offset,
@@ -772,9 +1511,11 @@ void memory_region_init_alias(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_nomigrate(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp);
@@ -795,9 +1536,11 @@ void memory_region_init_rom_nomigrate(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
+ Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
@@ -854,9 +1597,11 @@ void memory_region_init_iommu(void *_iommu_mr,
* give the RAM block a unique name for migration purposes.
* We should lift this restriction and allow arbitrary Objects.
* If you pass a non-NULL non-device @owner then we will assert.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_ram(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp);
@@ -881,9 +1626,11 @@ void memory_region_init_ram(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_rom(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp);
@@ -907,13 +1654,16 @@ void memory_region_init_rom(MemoryRegion *mr,
* @mr: the #MemoryRegion to be initialized.
* @owner: the object that tracks the region's reference count
* @ops: callbacks for write access handling (must not be NULL).
+ * @opaque: passed to the read and write callbacks of the @ops structure.
* @name: Region name, becomes part of RAMBlock name used in migration stream
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_device(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_rom_device(MemoryRegion *mr,
+ Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
@@ -926,7 +1676,7 @@ void memory_region_init_rom_device(MemoryRegion *mr,
*
* @mr: the memory region being queried.
*/
-struct Object *memory_region_owner(MemoryRegion *mr);
+Object *memory_region_owner(MemoryRegion *mr);
/**
* memory_region_size: get a memory region's size.
@@ -970,6 +1720,26 @@ static inline bool memory_region_is_romd(MemoryRegion *mr)
}
/**
+ * memory_region_is_protected: check whether a memory region is protected
+ *
+ * Returns %true if a memory region is protected RAM and cannot be accessed
+ * via standard mechanisms, e.g. DMA.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_protected(MemoryRegion *mr);
+
+/**
+ * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
+ * associated
+ *
+ * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_has_guest_memfd(MemoryRegion *mr);
+
+/**
* memory_region_get_iommu: check whether a memory region is an iommu
*
* Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
@@ -995,7 +1765,7 @@ static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
* Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
* otherwise NULL. This is fast path avoiding QOM checking, use with caution.
*
- * @mr: the memory region being queried
+ * @iommu_mr: the memory region being queried
*/
static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
IOMMUMemoryRegion *iommu_mr)
@@ -1018,51 +1788,61 @@ uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
/**
* memory_region_notify_iommu: notify a change in an IOMMU translation entry.
*
- * The notification type will be decided by entry.perm bits:
- *
- * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
- * - For MAP (newly added entry) notifies: set entry.perm to the
- * permission of the page (which is definitely !IOMMU_NONE).
- *
* Note: for any IOMMU implementation, an in-place mapping change
* should be notified with an UNMAP followed by a MAP.
*
* @iommu_mr: the memory region that was changed
* @iommu_idx: the IOMMU index for the translation table which has changed
- * @entry: the new entry in the IOMMU translation table. The entry
- * replaces all old entries for the same virtual I/O address range.
- * Deleted entries have .@perm == 0.
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
*/
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
int iommu_idx,
- IOMMUTLBEntry entry);
+ IOMMUTLBEvent event);
/**
- * memory_region_notify_one: notify a change in an IOMMU translation
+ * memory_region_notify_iommu_one: notify a change in an IOMMU translation
* entry to a single notifier
*
* This works just like memory_region_notify_iommu(), but it only
* notifies a specific notifier, not all of them.
*
* @notifier: the notifier to be notified
- * @entry: the new entry in the IOMMU translation table. The entry
- * replaces all old entries for the same virtual I/O address range.
- * Deleted entries have .@perm == 0.
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
+ */
+void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
+ IOMMUTLBEvent *event);
+
+/**
+ * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
+ * translation that covers the
+ * range of a notifier
+ *
+ * @notifier: the notifier to be notified
*/
-void memory_region_notify_one(IOMMUNotifier *notifier,
- IOMMUTLBEntry *entry);
+void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
+
/**
* memory_region_register_iommu_notifier: register a notifier for changes to
* IOMMU translation entries.
*
+ * Returns 0 on success, or a negative errno otherwise. In particular,
+ * -EINVAL indicates that at least one of the attributes of the notifier
+ * is not supported (flag/range) by the IOMMU memory region. In case of error
+ * the error object must be created.
+ *
* @mr: the memory region to observe
* @n: the IOMMUNotifier to be added; the notify callback receives a
* pointer to an #IOMMUTLBEntry as the opaque value; the pointer
* ceases to be valid on exit from the notifier.
+ * @errp: pointer to Error*, to store an error if it happens.
*/
-void memory_region_register_iommu_notifier(MemoryRegion *mr,
- IOMMUNotifier *n);
+int memory_region_register_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n, Error **errp);
/**
* memory_region_iommu_replay: replay existing IOMMU translations to
@@ -1077,16 +1857,6 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr,
void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
/**
- * memory_region_iommu_replay_all: replay existing IOMMU translations
- * to all the notifiers registered.
- *
- * Note: this is not related to record-and-replay functionality.
- *
- * @iommu_mr: the memory region to observe
- */
-void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
-
-/**
* memory_region_unregister_iommu_notifier: unregister a notifier for
* changes to IOMMU translation entries.
*
@@ -1132,6 +1902,30 @@ int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
/**
+ * memory_region_iommu_set_page_size_mask: set the supported page
+ * sizes for a given IOMMU memory region
+ *
+ * @iommu_mr: IOMMU memory region
+ * @page_size_mask: supported page size mask
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
+ uint64_t page_size_mask,
+ Error **errp);
+
+/**
+ * memory_region_iommu_set_iova_ranges - Set the usable IOVA ranges
+ * for a given IOMMU MR region
+ *
+ * @iommu: IOMMU memory region
+ * @iova_ranges: list of ordered IOVA ranges (at least one range)
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion *iommu,
+ GList *iova_ranges,
+ Error **errp);
+
+/**
* memory_region_name: get a memory region's name
*
* Returns the string that was used to initialize the memory region.
@@ -1205,7 +1999,7 @@ int memory_region_get_fd(MemoryRegion *mr);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -1222,7 +2016,7 @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -1232,8 +2026,8 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr);
/* memory_region_ram_resize: Resize a RAM region.
*
- * Only legal before guest might have detected the memory size: e.g. on
- * incoming migration, or right after reset.
+ * Resizing RAM while migrating can result in the migration being canceled.
+ * Care has to be taken if the guest might have already detected the memory.
*
* @mr: a memory region created with @memory_region_init_resizeable_ram.
* @newsize: the new size the region
@@ -1243,6 +2037,26 @@ void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
Error **errp);
/**
+ * memory_region_msync: Synchronize selected address range of
+ * a memory mapped region
+ *
+ * @mr: the memory region to be msync
+ * @addr: the initial address of the range to be sync
+ * @size: the size of the range to be sync
+ */
+void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_writeback: Trigger cache writeback for
+ * selected address range
+ *
+ * @mr: the memory region to be updated
+ * @addr: the initial address of the range to be written back
+ * @size: the size of the range to be written back
+ */
+void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
* memory_region_set_log: Turn dirty logging on or off for a region.
*
* Turns dirty logging on or off for a specified client (display, migration).
@@ -1255,23 +2069,6 @@ void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
/**
- * memory_region_get_dirty: Check whether a range of bytes is dirty
- * for a specified client.
- *
- * Checks whether a range of bytes has been written to since the last
- * call to memory_region_reset_dirty() with the same @client. Dirty logging
- * must be enabled.
- *
- * @mr: the memory region being queried.
- * @addr: the address (relative to the start of the region) being queried.
- * @size: the size of the range being queried.
- * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
- * %DIRTY_MEMORY_VGA.
- */
-bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size, unsigned client);
-
-/**
* memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
*
* Marks a range of bytes as dirty, after it has been dirtied outside
@@ -1285,6 +2082,22 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size);
/**
+ * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
+ *
+ * This function is called when the caller wants to clear the remote
+ * dirty bitmap of a memory range within the memory region. This can
+ * be used by e.g. KVM to manually clear dirty log when
+ * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
+ * kernel.
+ *
+ * @mr: the memory region to clear the dirty log upon
+ * @start: start address offset within the memory region
+ * @len: length of the memory region to clear dirty bitmap
+ */
+void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
+ hwaddr len);
+
+/**
* memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
* bitmap and clear it.
*
@@ -1294,7 +2107,7 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
* querying the same page multiple times, which is especially useful for
* display updates where the scanlines often are not page aligned.
*
- * The dirty bitmap region which gets copyed into the snapshot (and
+ * The dirty bitmap region which gets copied into the snapshot (and
* cleared afterwards) can be larger than requested. The boundaries
* are rounded up/down so complete bitmap longs (covering 64 pages on
* 64bit hosts) can be copied over into the bitmap snapshot. Which
@@ -1345,6 +2158,24 @@ void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client);
/**
+ * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
+ * TBs (for self-modifying code).
+ *
+ * The MemoryRegionOps->write() callback of a ROM device must use this function
+ * to mark byte ranges that have been modified internally, such as by directly
+ * accessing the memory returned by memory_region_get_ram_ptr().
+ *
+ * This function marks the range dirty and invalidates TBs so that TCG can
+ * detect self-modifying code.
+ *
+ * @mr: the region being flushed.
+ * @addr: the start, relative to the start of the region, of the range being
+ * flushed.
+ * @size: the size, in bytes, of the range being flushed.
+ */
+void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
* memory_region_set_readonly: Turn a memory region read-only (or read-write)
*
* Allows a memory region to be marked as read-only (turning it into a ROM).
@@ -1443,19 +2274,6 @@ void memory_region_set_flush_coalesced(MemoryRegion *mr);
void memory_region_clear_flush_coalesced(MemoryRegion *mr);
/**
- * memory_region_clear_global_locking: Declares that access processing does
- * not depend on the QEMU global lock.
- *
- * By clearing this property, accesses to the memory region will be processed
- * outside of QEMU's global lock (unless the lock is held on when issuing the
- * access request). In this case, the device model implementing the access
- * handlers is responsible for synchronization of concurrency.
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_clear_global_locking(MemoryRegion *mr);
-
-/**
* memory_region_add_eventfd: Request an eventfd to be triggered when a word
* is written to a location.
*
@@ -1540,6 +2358,8 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr,
/**
* memory_region_get_ram_addr: Get the ram address associated with a memory
* region
+ *
+ * @mr: the region to be queried
*/
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
@@ -1603,6 +2423,25 @@ void memory_region_set_size(MemoryRegion *mr, uint64_t size);
void memory_region_set_alias_offset(MemoryRegion *mr,
hwaddr offset);
+/*
+ * memory_region_set_unmergeable: Set a memory region unmergeable
+ *
+ * Mark a memory region unmergeable, resulting in the memory region (or
+ * everything contained in a memory region container) not getting merged when
+ * simplifying the address space and notifying memory listeners. Consequently,
+ * memory listeners will never get notified about ranges that are larger than
+ * the original memory regions.
+ *
+ * This is primarily useful when multiple aliases to a RAM memory region are
+ * mapped into a memory region container, and updates (e.g., enable/disable or
+ * map/unmap) of individual memory region aliases are not supposed to affect
+ * other memory regions in the same container.
+ *
+ * @mr: the #MemoryRegion to be updated
+ * @unmergeable: whether to mark the #MemoryRegion unmergeable
+ */
+void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
+
/**
* memory_region_present: checks if an address relative to a @container
* translates into #MemoryRegion within @container
@@ -1617,13 +2456,49 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr);
/**
* memory_region_is_mapped: returns true if #MemoryRegion is mapped
- * into any address space.
+ * into another memory region, which does not necessarily imply that it is
+ * mapped into an address space.
*
* @mr: a #MemoryRegion which should be checked if it's mapped
*/
bool memory_region_is_mapped(MemoryRegion *mr);
/**
+ * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
+ * #MemoryRegion
+ *
+ * The #RamDiscardManager cannot change while a memory region is mapped.
+ *
+ * @mr: the #MemoryRegion
+ */
+RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
+
+/**
+ * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
+ * #RamDiscardManager assigned
+ *
+ * @mr: the #MemoryRegion
+ */
+static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
+{
+ return !!memory_region_get_ram_discard_manager(mr);
+}
+
+/**
+ * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
+ * #MemoryRegion
+ *
+ * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
+ * that does not cover RAM, or a #MemoryRegion that already has a
+ * #RamDiscardManager assigned.
+ *
+ * @mr: the #MemoryRegion
+ * @rdm: #RamDiscardManager to set
+ */
+void memory_region_set_ram_discard_manager(MemoryRegion *mr,
+ RamDiscardManager *rdm);
+
+/**
* memory_region_find: translate an address/size relative to a
* MemoryRegion into a #MemoryRegionSection.
*
@@ -1632,8 +2507,8 @@ bool memory_region_is_mapped(MemoryRegion *mr);
*
* Returns a #MemoryRegionSection that describes a contiguous overlap.
* It will have the following characteristics:
- * .@size = 0 iff no overlap was found
- * .@mr is non-%NULL iff an overlap was found
+ * - @size = 0 iff no overlap was found
+ * - @mr is non-%NULL iff an overlap was found
*
* Remember that in the return value the @offset_within_region is
* relative to the returned region (in the .@mr field), not to the
@@ -1644,8 +2519,8 @@ bool memory_region_is_mapped(MemoryRegion *mr);
* returned one. However, in the special case where the @mr argument
* has no container (and thus is the root of the address space), the
* following will hold:
- * .@offset_within_address_space >= @addr
- * .@offset_within_address_space + .@size <= @addr + @size
+ * - @offset_within_address_space >= @addr
+ * - @offset_within_address_space + .@size <= @addr + @size
*
* @mr: a MemoryRegion within which @addr is a relative address
* @addr: start of the area within @as to be searched
@@ -1658,8 +2533,21 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
*
* Synchronizes the dirty page log for all address spaces.
+ *
+ * @last_stage: whether this is the last stage of live migration
+ */
+void memory_global_dirty_log_sync(bool last_stage);
+
+/**
+ * memory_global_dirty_log_sync: synchronize the dirty log for all memory
+ *
+ * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
+ * This function must be called after the dirty log bitmap is cleared, and
+ * before dirty guest memory pages are read. If you are using
+ * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
+ * care of doing this.
*/
-void memory_global_dirty_log_sync(void);
+void memory_global_after_dirty_log_sync(void);
/**
* memory_region_transaction_begin: Start a transaction.
@@ -1694,16 +2582,26 @@ void memory_listener_unregister(MemoryListener *listener);
/**
* memory_global_dirty_log_start: begin dirty logging for all regions
+ *
+ * @flags: purpose of starting dirty log, migration or dirty rate
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_global_dirty_log_start(void);
+bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
/**
* memory_global_dirty_log_stop: end dirty logging for all regions
+ *
+ * @flags: purpose of stopping dirty log, migration or dirty rate
*/
-void memory_global_dirty_log_stop(void);
+void memory_global_dirty_log_stop(unsigned int flags);
-void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
- bool dispatch_tree, bool owner);
+void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
+
+bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
/**
* memory_region_dispatch_read: perform a read directly to the specified
@@ -1712,13 +2610,13 @@ void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
* @mr: #MemoryRegion to access
* @addr: address within that region
* @pval: pointer to uint64_t which the data is written to
- * @size: size of the access in bytes
+ * @op: size, sign, and endianness of the memory operation
* @attrs: memory transaction attributes to use for the access
*/
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
hwaddr addr,
uint64_t *pval,
- unsigned size,
+ MemOp op,
MemTxAttrs attrs);
/**
* memory_region_dispatch_write: perform a write directly to the specified
@@ -1727,13 +2625,13 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
* @mr: #MemoryRegion to access
* @addr: address within that region
* @data: data to write
- * @size: size of the access in bytes
+ * @op: size, sign, and endianness of the memory operation
* @attrs: memory transaction attributes to use for the access
*/
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
hwaddr addr,
uint64_t data,
- unsigned size,
+ MemOp op,
MemTxAttrs attrs);
/**
@@ -1758,6 +2656,16 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
void address_space_destroy(AddressSpace *as);
/**
+ * address_space_remove_listeners: unregister all listeners of an address space
+ *
+ * Removes all callbacks previously registered with memory_listener_register()
+ * for @as.
+ *
+ * @as: an initialized #AddressSpace
+ */
+void address_space_remove_listeners(AddressSpace *as);
+
+/**
* address_space_rw: read from or write to an address space.
*
* Return a MemTxResult indicating whether the operation succeeded
@@ -1772,8 +2680,8 @@ void address_space_destroy(AddressSpace *as);
* @is_write: indicates the transfer direction
*/
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf,
- int len, bool is_write);
+ MemTxAttrs attrs, void *buf,
+ hwaddr len, bool is_write);
/**
* address_space_write: write to address space.
@@ -1790,7 +2698,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
*/
MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf, int len);
+ const void *buf, hwaddr len);
/**
* address_space_write_rom: write to address space, including ROM.
@@ -1816,7 +2724,7 @@ MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
*/
MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf, int len);
+ const void *buf, hwaddr len);
/* address_space_ld*: load from an address space
* address_space_st*: store to an address space
@@ -1842,12 +2750,12 @@ MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
#define SUFFIX
#define ARG1 as
#define ARG1_DECL AddressSpace *as
-#include "exec/memory_ldst.inc.h"
+#include "exec/memory_ldst.h.inc"
#define SUFFIX
#define ARG1 as
#define ARG1_DECL AddressSpace *as
-#include "exec/memory_ldst_phys.inc.h"
+#include "exec/memory_ldst_phys.h.inc"
struct MemoryRegionCache {
void *ptr;
@@ -1858,9 +2766,6 @@ struct MemoryRegionCache {
bool is_write;
};
-#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
-
-
/* address_space_ld*_cached: load from a cached #MemoryRegion
* address_space_st*_cached: store into a cached #MemoryRegion
*
@@ -1888,7 +2793,7 @@ struct MemoryRegionCache {
#define SUFFIX _cached_slow
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
-#include "exec/memory_ldst.inc.h"
+#include "exec/memory_ldst.h.inc"
/* Inline fast path for direct RAM access. */
static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
@@ -1903,7 +2808,7 @@ static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
}
static inline void address_space_stb_cached(MemoryRegionCache *cache,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len);
if (likely(cache->ptr)) {
@@ -1914,15 +2819,15 @@ static inline void address_space_stb_cached(MemoryRegionCache *cache,
}
#define ENDIANNESS _le
-#include "exec/memory_ldst_cached.inc.h"
+#include "exec/memory_ldst_cached.h.inc"
#define ENDIANNESS _be
-#include "exec/memory_ldst_cached.inc.h"
+#include "exec/memory_ldst_cached.h.inc"
#define SUFFIX _cached
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
-#include "exec/memory_ldst_phys.inc.h"
+#include "exec/memory_ldst_phys.h.inc"
/* address_space_cache_init: prepare for repeated access to a physical
* memory region
@@ -1950,6 +2855,21 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
bool is_write);
/**
+ * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
+ *
+ * @cache: The #MemoryRegionCache to operate on.
+ *
+ * Initializes #MemoryRegionCache structure without memory region attached.
+ * Cache initialized this way can only be safely destroyed, but not used.
+ */
+static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
+{
+ cache->mrs.mr = NULL;
+ /* There is no real need to initialize fv, but it makes Coverity happy. */
+ cache->fv = NULL;
+}
+
+/**
* address_space_cache_invalidate: complete a write to a #MemoryRegionCache
*
* @cache: The #MemoryRegionCache to operate on.
@@ -2017,13 +2937,14 @@ static inline MemoryRegion *address_space_translate(AddressSpace *as,
* @is_write: indicates the transfer direction
* @attrs: memory attributes
*/
-bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len,
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
bool is_write, MemTxAttrs attrs);
/* address_space_map: map a physical memory region into a host virtual address
*
* May map a subset of the requested range, given by and returned in @plen.
- * May return %NULL if resources needed to perform the mapping are exhausted.
+ * May return %NULL and set *@plen to zero(0), if resources needed to perform
+ * the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations.
* Use cpu_register_map_client() to know when retrying the map operation is
* likely to succeed.
@@ -2049,30 +2970,34 @@ void *address_space_map(AddressSpace *as, hwaddr addr,
* @is_write: indicates the transfer direction
*/
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
- int is_write, hwaddr access_len);
+ bool is_write, hwaddr access_len);
/* Internal functions, part of the implementation of address_space_read. */
MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, int len);
+ MemTxAttrs attrs, void *buf, hwaddr len);
MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf,
- int len, hwaddr addr1, hwaddr l,
+ MemTxAttrs attrs, void *buf,
+ hwaddr len, hwaddr addr1, hwaddr l,
MemoryRegion *mr);
void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
/* Internal functions, part of the implementation of address_space_read_cached
* and address_space_write_cached. */
-void address_space_read_cached_slow(MemoryRegionCache *cache,
- hwaddr addr, void *buf, int len);
-void address_space_write_cached_slow(MemoryRegionCache *cache,
- hwaddr addr, const void *buf, int len);
+MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, void *buf, hwaddr len);
+MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, const void *buf,
+ hwaddr len);
+
+int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
+bool prepare_mmio_access(MemoryRegion *mr);
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{
if (is_write) {
- return memory_region_is_ram(mr) &&
- !mr->readonly && !memory_region_is_ram_device(mr);
+ return memory_region_is_ram(mr) && !mr->readonly &&
+ !mr->rom_device && !memory_region_is_ram_device(mr);
} else {
return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
memory_region_is_romd(mr);
@@ -2090,11 +3015,12 @@ static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
* @addr: address within that address space
* @attrs: memory transaction attributes
* @buf: buffer with the data transferred
+ * @len: length of the data transferred
*/
static inline __attribute__((__always_inline__))
MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf,
- int len)
+ MemTxAttrs attrs, void *buf,
+ hwaddr len)
{
MemTxResult result = MEMTX_OK;
hwaddr l, addr1;
@@ -2104,7 +3030,7 @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
if (__builtin_constant_p(len)) {
if (len) {
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
l = len;
mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
@@ -2115,7 +3041,6 @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
result = flatview_read_continue(fv, addr, attrs, buf, len,
addr1, l, mr);
}
- rcu_read_unlock();
}
} else {
result = address_space_read_full(as, addr, attrs, buf, len);
@@ -2131,15 +3056,17 @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
* @buf: buffer with the data transferred
* @len: length of the data transferred
*/
-static inline void
+static inline MemTxResult
address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
- void *buf, int len)
+ void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
if (likely(cache->ptr)) {
memcpy(buf, cache->ptr + addr, len);
+ return MEMTX_OK;
} else {
- address_space_read_cached_slow(cache, addr, buf, len);
+ return address_space_read_cached_slow(cache, addr, buf, len);
}
}
@@ -2151,18 +3078,110 @@ address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
* @buf: buffer with the data transferred
* @len: length of the data transferred
*/
-static inline void
+static inline MemTxResult
address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
- void *buf, int len)
+ const void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
if (likely(cache->ptr)) {
memcpy(cache->ptr + addr, buf, len);
+ return MEMTX_OK;
} else {
- address_space_write_cached_slow(cache, addr, buf, len);
+ return address_space_write_cached_slow(cache, addr, buf, len);
}
}
+/**
+ * address_space_set: Fill address space with a constant byte.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
+ * @attrs: memory transaction attributes
+ */
+MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
+ uint8_t c, hwaddr len, MemTxAttrs attrs);
+
+#ifdef NEED_CPU_H
+/* enum device_endian to MemOp. */
+static inline MemOp devend_memop(enum device_endian end)
+{
+ QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
+ DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
+
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
+ /* Swap if non-host endianness or native (target) endianness */
+ return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
+#else
+ const int non_host_endianness =
+ DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
+
+ /* In this case, native (target) endianness needs no swap. */
+ return (end == non_host_endianness) ? MO_BSWAP : 0;
+#endif
+}
+#endif
+
+/*
+ * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
+ * to manage the actual amount of memory consumed by the VM (then, the memory
+ * provided by RAM blocks might be bigger than the desired memory consumption).
+ * This *must* be set if:
+ * - Discarding parts of a RAM blocks does not result in the change being
+ * reflected in the VM and the pages getting freed.
+ * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
+ * discards blindly.
+ * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
+ * encrypted VMs).
+ * Technologies that only temporarily pin the current working set of a
+ * driver are fine, because we don't expect such pages to be discarded
+ * (esp. based on guest action like balloon inflation).
+ *
+ * This is *not* to be used to protect from concurrent discards (esp.,
+ * postcopy).
+ *
+ * Returns 0 if successful. Returns -EBUSY if a technology that relies on
+ * discards to work reliably is active.
+ */
+int ram_block_discard_disable(bool state);
+
+/*
+ * See ram_block_discard_disable(): only disable uncoordinated discards,
+ * keeping coordinated discards (via the RamDiscardManager) enabled.
+ */
+int ram_block_uncoordinated_discard_disable(bool state);
+
+/*
+ * Inhibit technologies that disable discarding of pages in RAM blocks.
+ *
+ * Returns 0 if successful. Returns -EBUSY if discards are already set to
+ * broken.
+ */
+int ram_block_discard_require(bool state);
+
+/*
+ * See ram_block_discard_require(): only inhibit technologies that disable
+ * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
+ * technologies that only inhibit uncoordinated discards (via the
+ * RamDiscardManager).
+ */
+int ram_block_coordinated_discard_require(bool state);
+
+/*
+ * Test if any discarding of memory in ram blocks is disabled.
+ */
+bool ram_block_discard_is_disabled(void);
+
+/*
+ * Test if any discarding of memory in ram blocks is required to work reliably.
+ */
+bool ram_block_discard_is_required(void);
+
#endif
#endif
diff --git a/include/exec/memory_ldst.inc.h b/include/exec/memory_ldst.h.inc
index 272c20f02e..92ad74e956 100644
--- a/include/exec/memory_ldst.inc.h
+++ b/include/exec/memory_ldst.h.inc
@@ -8,7 +8,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,48 +20,48 @@
*/
#ifdef TARGET_ENDIANNESS
-extern uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
+void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw, SUFFIX)(ARG1_DECL,
+void glue(address_space_stw, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+void glue(address_space_stl, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl, SUFFIX)(ARG1_DECL,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
#else
-extern uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
+uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stb, SUFFIX)(ARG1_DECL,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
+void glue(address_space_stb, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result);
+void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
+void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
#endif
diff --git a/include/exec/memory_ldst_cached.inc.h b/include/exec/memory_ldst_cached.h.inc
index fd4bbb40e7..d7834f852c 100644
--- a/include/exec/memory_ldst_cached.inc.h
+++ b/include/exec/memory_ldst_cached.h.inc
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,10 +24,23 @@
#define LD_P(size) \
glue(glue(ld, size), glue(ENDIANNESS, _p))
+static inline uint16_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len && 2 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 2, cache->mrs.mr);
+ if (likely(cache->ptr)) {
+ return LD_P(uw)(cache->ptr + addr);
+ } else {
+ return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result);
+ }
+}
+
static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 4 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 4, cache->mrs.mr);
if (likely(cache->ptr)) {
return LD_P(l)(cache->ptr + addr);
} else {
@@ -39,6 +52,7 @@ static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 8 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 8, cache->mrs.mr);
if (likely(cache->ptr)) {
return LD_P(q)(cache->ptr + addr);
} else {
@@ -46,17 +60,6 @@ static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
}
}
-static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
- hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
-{
- assert(addr < cache->len && 2 <= cache->len - addr);
- if (likely(cache->ptr)) {
- return LD_P(uw)(cache->ptr + addr);
- } else {
- return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result);
- }
-}
-
#undef ADDRESS_SPACE_LD_CACHED
#undef ADDRESS_SPACE_LD_CACHED_SLOW
#undef LD_P
@@ -68,25 +71,25 @@ static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
#define ST_P(size) \
glue(glue(st, size), glue(ENDIANNESS, _p))
-static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
+static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
{
- assert(addr < cache->len && 4 <= cache->len - addr);
+ assert(addr < cache->len && 2 <= cache->len - addr);
if (likely(cache->ptr)) {
- ST_P(l)(cache->ptr + addr, val);
+ ST_P(w)(cache->ptr + addr, val);
} else {
- ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result);
+ ADDRESS_SPACE_ST_CACHED_SLOW(w)(cache, addr, val, attrs, result);
}
}
-static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
+static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
{
- assert(addr < cache->len && 2 <= cache->len - addr);
+ assert(addr < cache->len && 4 <= cache->len - addr);
if (likely(cache->ptr)) {
- ST_P(w)(cache->ptr + addr, val);
+ ST_P(l)(cache->ptr + addr, val);
} else {
- ADDRESS_SPACE_ST_CACHED_SLOW(w)(cache, addr, val, attrs, result);
+ ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result);
}
}
diff --git a/include/exec/memory_ldst_phys.inc.h b/include/exec/memory_ldst_phys.h.inc
index 91f72973cb..ecd678610d 100644
--- a/include/exec/memory_ldst_phys.inc.h
+++ b/include/exec/memory_ldst_phys.h.inc
@@ -8,7 +8,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,6 +20,12 @@
*/
#ifdef TARGET_ENDIANNESS
+static inline uint16_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
static inline uint32_t glue(ldl_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldl, SUFFIX)(ARG1, addr,
@@ -32,10 +38,10 @@ static inline uint64_t glue(ldq_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline uint32_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
{
- return glue(address_space_lduw, SUFFIX)(ARG1, addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ glue(address_space_stw, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
@@ -44,18 +50,30 @@ static inline void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
-{
- glue(address_space_stw, SUFFIX)(ARG1, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
static inline void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
{
glue(address_space_stq, SUFFIX)(ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
#else
+static inline uint8_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldub, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint16_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint16_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
static inline uint32_t glue(ldl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldl_le, SUFFIX)(ARG1, addr,
@@ -80,22 +98,22 @@ static inline uint64_t glue(ldq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline uint32_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+static inline void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint8_t val)
{
- return glue(address_space_ldub, SUFFIX)(ARG1, addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ glue(address_space_stb, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline uint32_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+static inline void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
{
- return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ glue(address_space_stw_le, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline uint32_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+static inline void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
{
- return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ glue(address_space_stw_be, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
@@ -110,24 +128,6 @@ static inline void glue(stl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t va
MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
-{
- glue(address_space_stb, SUFFIX)(ARG1, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-static inline void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
-{
- glue(address_space_stw_le, SUFFIX)(ARG1, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-static inline void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
-{
- glue(address_space_stw_be, SUFFIX)(ARG1, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
static inline void glue(stq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
{
glue(address_space_stq_le, SUFFIX)(ARG1, addr, val,
diff --git a/include/exec/page-vary.h b/include/exec/page-vary.h
new file mode 100644
index 0000000000..54ddde308a
--- /dev/null
+++ b/include/exec/page-vary.h
@@ -0,0 +1,52 @@
+/*
+ * Definitions for cpus with variable page sizes.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef EXEC_PAGE_VARY_H
+#define EXEC_PAGE_VARY_H
+
+typedef struct {
+ bool decided;
+ int bits;
+ uint64_t mask;
+} TargetPageBits;
+
+#ifdef IN_PAGE_VARY
+bool set_preferred_target_page_bits_common(int bits);
+void finalize_target_page_bits_common(int min);
+#endif
+
+/**
+ * set_preferred_target_page_bits:
+ * @bits: number of bits needed to represent an address within the page
+ *
+ * Set the preferred target page size (the actual target page
+ * size may be smaller than any given CPU's preference).
+ * Returns true on success, false on failure (which can only happen
+ * if this is called after the system has already finalized its
+ * choice of page size and the requested page size is smaller than that).
+ */
+bool set_preferred_target_page_bits(int bits);
+
+/**
+ * finalize_target_page_bits:
+ * Commit the final value set by set_preferred_target_page_bits.
+ */
+void finalize_target_page_bits(void);
+
+#endif /* EXEC_PAGE_VARY_H */
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
new file mode 100644
index 0000000000..c4552b5061
--- /dev/null
+++ b/include/exec/plugin-gen.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * plugin-gen.h - TCG-dependent definitions for generating plugin code
+ *
+ * This header should be included only from plugin.c and C files that emit
+ * TCG code.
+ */
+#ifndef QEMU_PLUGIN_GEN_H
+#define QEMU_PLUGIN_GEN_H
+
+#include "tcg/tcg.h"
+
+struct DisasContextBase;
+
+#ifdef CONFIG_PLUGIN
+
+bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db,
+ bool supress);
+void plugin_gen_tb_end(CPUState *cpu, size_t num_insns);
+void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
+void plugin_gen_insn_end(void);
+
+void plugin_gen_disable_mem_helpers(void);
+void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info);
+
+#else /* !CONFIG_PLUGIN */
+
+static inline bool
+plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup)
+{
+ return false;
+}
+
+static inline
+void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db)
+{ }
+
+static inline void plugin_gen_insn_end(void)
+{ }
+
+static inline void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
+{ }
+
+static inline void plugin_gen_disable_mem_helpers(void)
+{ }
+
+static inline void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
+{ }
+
+#endif /* CONFIG_PLUGIN */
+
+#endif /* QEMU_PLUGIN_GEN_H */
+
diff --git a/include/exec/poison.h b/include/exec/poison.h
index ecdc83c147..792a83f493 100644
--- a/include/exec/poison.h
+++ b/include/exec/poison.h
@@ -3,7 +3,8 @@
#ifndef HW_POISON_H
#define HW_POISON_H
-#ifdef __GNUC__
+
+#include "config-poison.h"
#pragma GCC poison TARGET_I386
#pragma GCC poison TARGET_X86_64
@@ -11,8 +12,9 @@
#pragma GCC poison TARGET_ALPHA
#pragma GCC poison TARGET_ARM
#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_HEXAGON
#pragma GCC poison TARGET_HPPA
-#pragma GCC poison TARGET_LM32
+#pragma GCC poison TARGET_LOONGARCH64
#pragma GCC poison TARGET_M68K
#pragma GCC poison TARGET_MICROBLAZE
#pragma GCC poison TARGET_MIPS
@@ -20,38 +22,34 @@
#pragma GCC poison TARGET_ABI_MIPSO32
#pragma GCC poison TARGET_MIPS64
#pragma GCC poison TARGET_ABI_MIPSN64
-#pragma GCC poison TARGET_MOXIE
-#pragma GCC poison TARGET_NIOS2
#pragma GCC poison TARGET_OPENRISC
#pragma GCC poison TARGET_PPC
#pragma GCC poison TARGET_PPC64
#pragma GCC poison TARGET_ABI32
+#pragma GCC poison TARGET_RX
#pragma GCC poison TARGET_S390X
#pragma GCC poison TARGET_SH4
#pragma GCC poison TARGET_SPARC
#pragma GCC poison TARGET_SPARC64
-#pragma GCC poison TARGET_TILEGX
#pragma GCC poison TARGET_TRICORE
-#pragma GCC poison TARGET_UNICORE32
#pragma GCC poison TARGET_XTENSA
#pragma GCC poison TARGET_HAS_BFLT
#pragma GCC poison TARGET_NAME
#pragma GCC poison TARGET_SUPPORTS_MTTCG
-#pragma GCC poison TARGET_WORDS_BIGENDIAN
+#pragma GCC poison TARGET_BIG_ENDIAN
#pragma GCC poison BSWAP_NEEDED
#pragma GCC poison TARGET_LONG_BITS
#pragma GCC poison TARGET_FMT_lx
#pragma GCC poison TARGET_FMT_ld
+#pragma GCC poison TARGET_FMT_lu
#pragma GCC poison TARGET_PAGE_SIZE
#pragma GCC poison TARGET_PAGE_MASK
#pragma GCC poison TARGET_PAGE_BITS
#pragma GCC poison TARGET_PAGE_ALIGN
-#pragma GCC poison CPUArchState
-
#pragma GCC poison CPU_INTERRUPT_HARD
#pragma GCC poison CPU_INTERRUPT_EXITTB
#pragma GCC poison CPU_INTERRUPT_HALT
@@ -66,18 +64,14 @@
#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
#pragma GCC poison CONFIG_ALPHA_DIS
-#pragma GCC poison CONFIG_ARM_A64_DIS
-#pragma GCC poison CONFIG_ARM_DIS
#pragma GCC poison CONFIG_CRIS_DIS
#pragma GCC poison CONFIG_HPPA_DIS
#pragma GCC poison CONFIG_I386_DIS
-#pragma GCC poison CONFIG_LM32_DIS
+#pragma GCC poison CONFIG_HEXAGON_DIS
+#pragma GCC poison CONFIG_LOONGARCH_DIS
#pragma GCC poison CONFIG_M68K_DIS
#pragma GCC poison CONFIG_MICROBLAZE_DIS
#pragma GCC poison CONFIG_MIPS_DIS
-#pragma GCC poison CONFIG_NANOMIPS_DIS
-#pragma GCC poison CONFIG_MOXIE_DIS
-#pragma GCC poison CONFIG_NIOS2_DIS
#pragma GCC poison CONFIG_PPC_DIS
#pragma GCC poison CONFIG_RISCV_DIS
#pragma GCC poison CONFIG_S390_DIS
@@ -85,10 +79,10 @@
#pragma GCC poison CONFIG_SPARC_DIS
#pragma GCC poison CONFIG_XTENSA_DIS
+#pragma GCC poison CONFIG_HVF
#pragma GCC poison CONFIG_LINUX_USER
-#pragma GCC poison CONFIG_VHOST_NET
#pragma GCC poison CONFIG_KVM
-#pragma GCC poison CONFIG_SOFTMMU
+#pragma GCC poison CONFIG_WHPX
+#pragma GCC poison CONFIG_XEN
#endif
-#endif
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 9ecd911c3e..07c8f86375 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -20,37 +20,61 @@
#define RAM_ADDR_H
#ifndef CONFIG_USER_ONLY
-#include "hw/xen/xen.h"
+#include "cpu.h"
+#include "sysemu/xen.h"
+#include "sysemu/tcg.h"
#include "exec/ramlist.h"
+#include "exec/ramblock.h"
+#include "exec/exec-all.h"
-struct RAMBlock {
- struct rcu_head rcu;
- struct MemoryRegion *mr;
- uint8_t *host;
- uint8_t *colo_cache; /* For colo, VM's ram cache */
- ram_addr_t offset;
- ram_addr_t used_length;
- ram_addr_t max_length;
- void (*resized)(const char*, uint64_t length, void *host);
- uint32_t flags;
- /* Protected by iothread lock. */
- char idstr[256];
- /* RCU-enabled, writes protected by the ramlist lock */
- QLIST_ENTRY(RAMBlock) next;
- QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
- int fd;
- size_t page_size;
- /* dirty bitmap used during migration */
- unsigned long *bmap;
- /* bitmap of pages that haven't been sent even once
- * only maintained and used in postcopy at the moment
- * where it's used to send the dirtymap at the start
- * of the postcopy phase
- */
- unsigned long *unsentmap;
- /* bitmap of already received pages in postcopy */
- unsigned long *receivedmap;
-};
+extern uint64_t total_dirty_pages;
+
+/**
+ * clear_bmap_size: calculate clear bitmap size
+ *
+ * @pages: number of guest pages
+ * @shift: guest page number shift
+ *
+ * Returns: number of bits for the clear bitmap
+ */
+static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
+{
+ return DIV_ROUND_UP(pages, 1UL << shift);
+}
+
+/**
+ * clear_bmap_set: set clear bitmap for the page range. Must be with
+ * bitmap_mutex held.
+ *
+ * @rb: the ramblock to operate on
+ * @start: the start page number
+ * @size: number of pages to set in the bitmap
+ *
+ * Returns: None
+ */
+static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
+ uint64_t npages)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
+}
+
+/**
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
+ * Must be with bitmap_mutex held.
+ *
+ * @rb: the ramblock to operate on
+ * @page: the page number to check
+ *
+ * Returns: true if the bit was set, false otherwise
+ */
+static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
+}
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
{
@@ -73,7 +97,8 @@ static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
bool ramblock_is_pmem(RAMBlock *rb);
-long qemu_getrampagesize(void);
+long qemu_minrampagesize(void);
+long qemu_maxrampagesize(void);
/**
* qemu_ram_alloc_from_file,
@@ -83,12 +108,11 @@ long qemu_getrampagesize(void);
* Parameters:
* @size: the size in bytes of the ram block
* @mr: the memory region where the ram block is
- * @ram_flags: specify the properties of the ram block, which can be one
- * or bit-or of following values
- * - RAM_SHARED: mmap the backing file or device with MAP_SHARED
- * - RAM_PMEM: the backend @mem_path or @fd is persistent memory
- * Other bits are ignored.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @mem_path or @fd: specify the backing file or device
+ * @offset: Offset into target file
* @errp: pointer to Error*, to store an error if it happens
*
* Return:
@@ -97,14 +121,14 @@ long qemu_getrampagesize(void);
*/
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
uint32_t ram_flags, const char *mem_path,
- Error **errp);
+ off_t offset, Error **errp);
RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
- uint32_t ram_flags, int fd,
+ uint32_t ram_flags, int fd, off_t offset,
Error **errp);
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
-RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
+RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr,
Error **errp);
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
@@ -115,11 +139,17 @@ void qemu_ram_free(RAMBlock *block);
int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
+void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
+
+/* Clear whole block of mem */
+static inline void qemu_ram_block_writeback(RAMBlock *block)
+{
+ qemu_ram_msync(block, 0, block->used_length);
+}
+
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
-void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
-
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
@@ -134,30 +164,29 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
-
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_bit(blocks->blocks[idx],
+ num, offset);
+ if (found < num) {
+ dirty = true;
+ break;
+ }
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- unsigned long num = next - base;
- unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
- if (found < num) {
- dirty = true;
- break;
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
}
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
return dirty;
}
@@ -175,9 +204,9 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
@@ -197,8 +226,6 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
return dirty;
}
@@ -250,13 +277,11 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
set_bit_atomic(offset, blocks->blocks[idx]);
-
- rcu_read_unlock();
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
@@ -275,53 +300,60 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
+ }
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
- }
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
+ offset, next - page);
+ }
- if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
- offset, next - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
- offset, next - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
- offset, next - page);
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
}
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
xen_hvm_modified_memory(start, length);
}
#if !defined(_WIN32)
-static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
- ram_addr_t start,
- ram_addr_t pages)
+
+/*
+ * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
+ * the number of dirty pages in @bitmap passed as argument. On the other hand,
+ * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
+ * weren't set in the global migration bitmap.
+ */
+static inline
+uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+ ram_addr_t start,
+ ram_addr_t pages)
{
unsigned long i, j;
- unsigned long page_number, c;
+ unsigned long page_number, c, nbits;
hwaddr addr;
ram_addr_t ram_addr;
+ uint64_t num_dirty = 0;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
- unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
+ unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
@@ -337,34 +369,52 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
DIRTY_MEMORY_BLOCK_SIZE);
- rcu_read_lock();
-
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
- }
-
- for (k = 0; k < nr; k++) {
- if (bitmap[k]) {
- unsigned long temp = leul_to_cpu(bitmap[k]);
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] =
+ qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ }
- atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
- atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
- if (tcg_enabled()) {
- atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
+ for (k = 0; k < nr; k++) {
+ if (bitmap[k]) {
+ unsigned long temp = leul_to_cpu(bitmap[k]);
+
+ nbits = ctpopl(temp);
+ qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
+
+ if (global_dirty_tracking) {
+ qatomic_or(
+ &blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
+ temp);
+ if (unlikely(
+ global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += nbits;
+ }
+ }
+
+ num_dirty += nbits;
+
+ if (tcg_enabled()) {
+ qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
+ temp);
+ }
}
- }
- if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
- offset = 0;
- idx++;
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
}
}
- rcu_read_unlock();
-
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
+
+ if (!global_dirty_tracking) {
+ clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
+ }
+
/*
* bitmap-traveling is faster than memory-traveling (for addr...)
* especially when most of the memory is not dirty.
@@ -372,6 +422,11 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
for (i = 0; i < len; i++) {
if (bitmap[i] != 0) {
c = leul_to_cpu(bitmap[i]);
+ nbits = ctpopl(c);
+ if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += nbits;
+ }
+ num_dirty += nbits;
do {
j = ctzl(c);
c &= ~(1ul << j);
@@ -384,15 +439,25 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
}
}
}
+
+ return num_dirty;
}
#endif /* not _WIN32 */
+static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start,
+ ram_addr_t length)
+{
+ if (tcg_enabled()) {
+ tlb_reset_dirty_range_all(start, length);
+ }
+
+}
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
- (ram_addr_t start, ram_addr_t length, unsigned client);
+ (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
@@ -407,11 +472,11 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
}
+/* Called with RCU critical section */
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
ram_addr_t start,
- ram_addr_t length,
- uint64_t *real_dirty_pages)
+ ram_addr_t length)
{
ram_addr_t addr;
unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
@@ -430,16 +495,13 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
DIRTY_MEMORY_BLOCK_SIZE);
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
- rcu_read_lock();
-
- src = atomic_rcu_read(
+ src = qatomic_rcu_read(
&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
for (k = page; k < page + nr; k++) {
if (src[idx][offset]) {
- unsigned long bits = atomic_xchg(&src[idx][offset], 0);
+ unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
- *real_dirty_pages += ctpopl(bits);
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
@@ -451,8 +513,22 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
idx++;
}
}
+ if (num_dirty) {
+ cpu_physical_memory_dirty_bits_cleared(start, length);
+ }
- rcu_read_unlock();
+ if (rb->clear_bmap) {
+ /*
+ * Postpone the dirty bitmap clear to the point before we
+ * really send the pages, also we will split the clear
+ * dirty procedure into smaller chunks.
+ */
+ clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
+ length >> TARGET_PAGE_BITS);
+ } else {
+ /* Slow path - still do that in a huge chunk */
+ memory_region_clear_dirty_bitmap(rb->mr, start, length);
+ }
} else {
ram_addr_t offset = rb->offset;
@@ -461,7 +537,6 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
- *real_dirty_pages += 1;
long k = (start + addr) >> TARGET_PAGE_BITS;
if (!test_and_set_bit(k, dest)) {
num_dirty++;
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
new file mode 100644
index 0000000000..0babd105c0
--- /dev/null
+++ b/include/exec/ramblock.h
@@ -0,0 +1,94 @@
+/*
+ * Declarations for cpu physical memory functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef QEMU_EXEC_RAMBLOCK_H
+#define QEMU_EXEC_RAMBLOCK_H
+
+#ifndef CONFIG_USER_ONLY
+#include "cpu-common.h"
+#include "qemu/rcu.h"
+#include "exec/ramlist.h"
+
+struct RAMBlock {
+ struct rcu_head rcu;
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
+ ram_addr_t offset;
+ ram_addr_t used_length;
+ ram_addr_t max_length;
+ void (*resized)(const char*, uint64_t length, void *host);
+ uint32_t flags;
+ /* Protected by the BQL. */
+ char idstr[256];
+ /* RCU-enabled, writes protected by the ramlist lock */
+ QLIST_ENTRY(RAMBlock) next;
+ QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
+ int fd;
+ uint64_t fd_offset;
+ int guest_memfd;
+ size_t page_size;
+ /* dirty bitmap used during migration */
+ unsigned long *bmap;
+
+ /*
+ * Below fields are only used by mapped-ram migration
+ */
+ /* bitmap of pages present in the migration file */
+ unsigned long *file_bmap;
+ /*
+ * offset in the file pages belonging to this ramblock are saved,
+ * used only during migration to a file.
+ */
+ off_t bitmap_offset;
+ uint64_t pages_offset;
+
+ /* Bitmap of already received pages. Only used on destination side. */
+ unsigned long *receivedmap;
+
+ /*
+ * bitmap to track already cleared dirty bitmap. When the bit is
+ * set, it means the corresponding memory chunk needs a log-clear.
+ * Set this up to non-NULL to enable the capability to postpone
+ * and split clearing of dirty bitmap on the remote node (e.g.,
+ * KVM). The bitmap will be set only when doing global sync.
+ *
+ * It is only used during src side of ram migration, and it is
+ * protected by the global ram_state.bitmap_mutex.
+ *
+ * NOTE: this bitmap is different comparing to the other bitmaps
+ * in that one bit can represent multiple guest pages (which is
+ * decided by the `clear_bmap_shift' variable below). On
+ * destination side, this should always be NULL, and the variable
+ * `clear_bmap_shift' is meaningless.
+ */
+ unsigned long *clear_bmap;
+ uint8_t clear_bmap_shift;
+
+ /*
+ * RAM block length that corresponds to the used_length on the migration
+ * source (after RAM block sizes were synchronized). Especially, after
+ * starting to run the guest, used_length and postcopy_length can differ.
+ * Used to register/unregister uffd handlers and as the size of the received
+ * bitmap. Receiving any page beyond this length will bail out, as it
+ * could not have been valid on the source.
+ */
+ ram_addr_t postcopy_length;
+};
+#endif
+#endif
diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
index bc4faa1b00..2ad2a81acc 100644
--- a/include/exec/ramlist.h
+++ b/include/exec/ramlist.h
@@ -19,7 +19,7 @@ typedef struct RAMBlockNotifier RAMBlockNotifier;
* rcu_read_lock();
*
* DirtyMemoryBlocks *blocks =
- * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ * qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
*
* ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
* unsigned long *block = blocks.blocks[idx];
@@ -65,16 +65,21 @@ void qemu_mutex_lock_ramlist(void);
void qemu_mutex_unlock_ramlist(void);
struct RAMBlockNotifier {
- void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size);
- void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size);
+ void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size);
+ void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size);
+ void (*ram_block_resized)(RAMBlockNotifier *n, void *host, size_t old_size,
+ size_t new_size);
QLIST_ENTRY(RAMBlockNotifier) next;
};
void ram_block_notifier_add(RAMBlockNotifier *n);
void ram_block_notifier_remove(RAMBlockNotifier *n);
-void ram_block_notify_add(void *host, size_t size);
-void ram_block_notify_remove(void *host, size_t size);
+void ram_block_notify_add(void *host, size_t size, size_t max_size);
+void ram_block_notify_remove(void *host, size_t size, size_t max_size);
+void ram_block_notify_resize(void *host, size_t old_size, size_t new_size);
-void ram_block_dump(Monitor *mon);
+GString *ram_block_format(void);
#endif /* RAMLIST_H */
diff --git a/include/exec/replay-core.h b/include/exec/replay-core.h
new file mode 100644
index 0000000000..244c77acce
--- /dev/null
+++ b/include/exec/replay-core.h
@@ -0,0 +1,80 @@
+/*
+ * QEMU replay core API
+ *
+ * Copyright (c) 2010-2015 Institute for System Programming
+ * of the Russian Academy of Sciences.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC_REPLAY_H
+#define EXEC_REPLAY_H
+
+#include "qapi/qapi-types-replay.h"
+
+extern ReplayMode replay_mode;
+
+/* Replay process control functions */
+
+/* Enables recording or saving event log with specified parameters */
+void replay_configure(struct QemuOpts *opts);
+/* Initializes timers used for snapshotting and enables events recording */
+void replay_start(void);
+/* Closes replay log file and frees other resources. */
+void replay_finish(void);
+/* Adds replay blocker with the specified error description */
+void replay_add_blocker(const char *feature);
+/* Returns name of the replay log file */
+const char *replay_get_filename(void);
+
+/*
+ * Start making one step in backward direction.
+ * Used by gdbstub for backwards debugging.
+ * Returns true on success.
+ */
+bool replay_reverse_step(void);
+/*
+ * Start searching the last breakpoint/watchpoint.
+ * Used by gdbstub for backwards debugging.
+ * Returns true if the process successfully started.
+ */
+bool replay_reverse_continue(void);
+/*
+ * Returns true if replay module is processing
+ * reverse_continue or reverse_step request
+ */
+bool replay_running_debug(void);
+/* Called in reverse debugging mode to collect breakpoint information */
+void replay_breakpoint(void);
+/* Called when gdb is attached to gdbstub */
+void replay_gdb_attached(void);
+
+/* Interrupts and exceptions */
+
+/* Called by exception handler to write or read exception processing events */
+bool replay_exception(void);
+/*
+ * Used to determine that exception is pending.
+ * Does not proceed to the next event in the log.
+ */
+bool replay_has_exception(void);
+/*
+ * Called by interrupt handlers to write or read interrupt processing events.
+ * Returns true if interrupt should be processed.
+ */
+bool replay_interrupt(void);
+/*
+ * Tries to read interrupt event from the file.
+ * Returns true, when interrupt request is pending.
+ */
+bool replay_has_interrupt(void);
+
+/* Processing data from random generators */
+
+/* Saves the values from the random number generator */
+void replay_save_random(int ret, void *buf, size_t len);
+/* Loads the saved values for the random number generator */
+int replay_read_random(void *buf, size_t len);
+
+#endif
diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h
deleted file mode 100644
index 7eefad8f39..0000000000
--- a/include/exec/softmmu-semi.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Helper routines to provide target memory access for semihosting
- * syscalls in system emulation mode.
- *
- * Copyright (c) 2007 CodeSourcery.
- *
- * This code is licensed under the GPL
- */
-
-#ifndef SOFTMMU_SEMI_H
-#define SOFTMMU_SEMI_H
-
-static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr)
-{
- uint64_t val;
-
- cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 8, 0);
- return tswap64(val);
-}
-
-static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
-{
- uint32_t val;
-
- cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 0);
- return tswap32(val);
-}
-
-static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
-{
- uint8_t val;
-
- cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &val, 1, 0);
- return val;
-}
-
-#define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; })
-#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
-#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
-#define get_user_ual(arg, p) get_user_u32(arg, p)
-
-static inline void softmmu_tput64(CPUArchState *env,
- target_ulong addr, uint64_t val)
-{
- val = tswap64(val);
- cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 8, 1);
-}
-
-static inline void softmmu_tput32(CPUArchState *env,
- target_ulong addr, uint32_t val)
-{
- val = tswap32(val);
- cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 1);
-}
-#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; })
-#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
-#define put_user_ual(arg, p) put_user_u32(arg, p)
-
-static void *softmmu_lock_user(CPUArchState *env,
- target_ulong addr, target_ulong len, int copy)
-{
- uint8_t *p;
- /* TODO: Make this something that isn't fixed size. */
- p = malloc(len);
- if (p && copy) {
- cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 0);
- }
- return p;
-}
-#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
-static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr)
-{
- char *p;
- char *s;
- uint8_t c;
- /* TODO: Make this something that isn't fixed size. */
- s = p = malloc(1024);
- if (!s) {
- return NULL;
- }
- do {
- cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &c, 1, 0);
- addr++;
- *(p++) = c;
- } while (c);
- return s;
-}
-#define lock_user_string(p) softmmu_lock_user_string(env, p)
-static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
- target_ulong len)
-{
- if (len) {
- cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 1);
- }
- free(p);
-}
-#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
-
-#endif
diff --git a/include/exec/target_long.h b/include/exec/target_long.h
new file mode 100644
index 0000000000..3cd8e26a23
--- /dev/null
+++ b/include/exec/target_long.h
@@ -0,0 +1,44 @@
+/*
+ * Target Long Definitions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _TARGET_LONG_H_
+#define _TARGET_LONG_H_
+
+/*
+ * Usually this should only be included via cpu-defs.h however for
+ * certain cases where we want to build only two versions of a binary
+ * object we can include directly. However the build-system must
+ * ensure TARGET_LONG_BITS is defined directly.
+ */
+#ifndef TARGET_LONG_BITS
+#error TARGET_LONG_BITS not defined
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long;
+typedef uint32_t target_ulong;
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#define MO_TL MO_32
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long;
+typedef uint64_t target_ulong;
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#define MO_TL MO_64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#endif /* _TARGET_LONG_H_ */
diff --git a/include/exec/target_page.h b/include/exec/target_page.h
index 96726c36a4..98ffbb5c23 100644
--- a/include/exec/target_page.h
+++ b/include/exec/target_page.h
@@ -15,7 +15,9 @@
#define EXEC_TARGET_PAGE_H
size_t qemu_target_page_size(void);
+int qemu_target_page_mask(void);
int qemu_target_page_bits(void);
int qemu_target_page_bits_min(void);
+size_t qemu_target_pages_to_MiB(size_t pages);
#endif
diff --git a/include/exec/tb-flush.h b/include/exec/tb-flush.h
new file mode 100644
index 0000000000..142c240d94
--- /dev/null
+++ b/include/exec/tb-flush.h
@@ -0,0 +1,28 @@
+/*
+ * tb-flush prototype for use by the rest of the system.
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef _TB_FLUSH_H_
+#define _TB_FLUSH_H_
+
+/**
+ * tb_flush() - flush all translation blocks
+ * @cs: CPUState (must be valid, but treated as anonymous pointer)
+ *
+ * Used to flush all the translation blocks in the system. Sometimes
+ * it is simpler to flush everything than work out which individual
+ * translations are now invalid and ensure they are not called
+ * anymore.
+ *
+ * tb_flush() takes care of running the flush in an exclusive context
+ * if it is not already running in one. This means no guest code will
+ * run until this complete.
+ */
+void tb_flush(CPUState *cs);
+
+void tcg_flush_jmp_cache(CPUState *cs);
+
+#endif /* _TB_FLUSH_H_ */
diff --git a/include/exec/tb-hash.h b/include/exec/tb-hash.h
deleted file mode 100644
index 4f3a37d927..0000000000
--- a/include/exec/tb-hash.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * internal execution defines for qemu
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef EXEC_TB_HASH_H
-#define EXEC_TB_HASH_H
-
-#include "qemu/xxhash.h"
-
-#ifdef CONFIG_SOFTMMU
-
-/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
- addresses on the same page. The top bits are the same. This allows
- TLB invalidation to quickly clear a subset of the hash table. */
-#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
-#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
-#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
-#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
-
-static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
-{
- target_ulong tmp;
- tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
- return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
-}
-
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
-{
- target_ulong tmp;
- tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
- return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
- | (tmp & TB_JMP_ADDR_MASK));
-}
-
-#else
-
-/* In user-mode we can get better hashing because we do not have a TLB */
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
-{
- return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
-}
-
-#endif /* CONFIG_SOFTMMU */
-
-static inline
-uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
- uint32_t cf_mask, uint32_t trace_vcpu_dstate)
-{
- return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
-}
-
-#endif
diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h
deleted file mode 100644
index 492cb68289..0000000000
--- a/include/exec/tb-lookup.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
- *
- * License: GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-#ifndef EXEC_TB_LOOKUP_H
-#define EXEC_TB_LOOKUP_H
-
-#ifdef NEED_CPU_H
-#include "cpu.h"
-#else
-#include "exec/poison.h"
-#endif
-
-#include "exec/exec-all.h"
-#include "exec/tb-hash.h"
-
-/* Might cause an exception, so have a longjmp destination ready */
-static inline TranslationBlock *
-tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
- uint32_t *flags, uint32_t cf_mask)
-{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb;
- uint32_t hash;
-
- cpu_get_tb_cpu_state(env, pc, cs_base, flags);
- hash = tb_jmp_cache_hash_func(*pc);
- tb = atomic_rcu_read(&cpu->tb_jmp_cache[hash]);
- if (likely(tb &&
- tb->pc == *pc &&
- tb->cs_base == *cs_base &&
- tb->flags == *flags &&
- tb->trace_vcpu_dstate == *cpu->trace_dstate &&
- (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) {
- return tb;
- }
- tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask);
- if (tb == NULL) {
- return NULL;
- }
- atomic_set(&cpu->tb_jmp_cache[hash], tb);
- return tb;
-}
-
-#endif /* EXEC_TB_LOOKUP_H */
diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
new file mode 100644
index 0000000000..dc5a5faa0b
--- /dev/null
+++ b/include/exec/tlb-common.h
@@ -0,0 +1,56 @@
+/*
+ * Common definitions for the softmmu tlb
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef EXEC_TLB_COMMON_H
+#define EXEC_TLB_COMMON_H 1
+
+#define CPU_TLB_ENTRY_BITS 5
+
+/* Minimalized TLB entry for use by TCG fast path. */
+typedef union CPUTLBEntry {
+ struct {
+ uint64_t addr_read;
+ uint64_t addr_write;
+ uint64_t addr_code;
+ /*
+ * Addend to virtual address to get host address. IO accesses
+ * use the corresponding iotlb value.
+ */
+ uintptr_t addend;
+ };
+ /*
+ * Padding to get a power of two size, as well as index
+ * access to addr_{read,write,code}.
+ */
+ uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
+} CPUTLBEntry;
+
+QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
+
+/*
+ * Data elements that are per MMU mode, accessed by the fast path.
+ * The structure is aligned to aid loading the pair with one insn.
+ */
+typedef struct CPUTLBDescFast {
+ /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
+ uintptr_t mask;
+ /* The array of tlb entries itself. */
+ CPUTLBEntry *table;
+} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
+
+#endif /* EXEC_TLB_COMMON_H */
diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h
new file mode 100644
index 0000000000..85c9460c7c
--- /dev/null
+++ b/include/exec/translate-all.h
@@ -0,0 +1,33 @@
+/*
+ * Translated block handling
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef TRANSLATE_ALL_H
+#define TRANSLATE_ALL_H
+
+#include "exec/exec-all.h"
+
+
+/* translate-all.c */
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
+
+#ifdef CONFIG_USER_ONLY
+void page_protect(tb_page_addr_t page_addr);
+int page_unprotect(target_ulong address, uintptr_t pc);
+#endif
+
+#endif /* TRANSLATE_ALL_H */
diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h
new file mode 100644
index 0000000000..48211c890a
--- /dev/null
+++ b/include/exec/translation-block.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Definition of TranslationBlock.
+ * Copyright (c) 2003 Fabrice Bellard
+ */
+
+#ifndef EXEC_TRANSLATION_BLOCK_H
+#define EXEC_TRANSLATION_BLOCK_H
+
+#include "qemu/thread.h"
+#include "exec/cpu-common.h"
+#ifdef CONFIG_USER_ONLY
+#include "qemu/interval-tree.h"
+#endif
+
+/*
+ * Page tracking code uses ram addresses in system mode, and virtual
+ * addresses in userspace mode. Define tb_page_addr_t to be an
+ * appropriate type.
+ */
+#if defined(CONFIG_USER_ONLY)
+typedef vaddr tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
+#else
+typedef ram_addr_t tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
+#endif
+
+/*
+ * Translation Cache-related fields of a TB.
+ * This struct exists just for convenience; we keep track of TB's in a binary
+ * search tree, and the only fields needed to compare TB's in the tree are
+ * @ptr and @size.
+ * Note: the address of search data can be obtained by adding @size to @ptr.
+ */
+struct tb_tc {
+ const void *ptr; /* pointer to the translated code */
+ size_t size;
+};
+
+struct TranslationBlock {
+ /*
+ * Guest PC corresponding to this block. This must be the true
+ * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
+ * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
+ * privilege, must store those bits elsewhere.
+ *
+ * If CF_PCREL, the opcodes for the TranslationBlock are written
+ * such that the TB is associated only with the physical page and
+ * may be run in any virtual address context. In this case, PC
+ * must always be taken from ENV in a target-specific manner.
+ * Unwind information is taken as offsets from the page, to be
+ * deposited into the "current" PC.
+ */
+ vaddr pc;
+
+ /*
+ * Target-specific data associated with the TranslationBlock, e.g.:
+ * x86: the original user, the Code Segment virtual base,
+ * arm: an extension of tb->flags,
+ * s390x: instruction data for EXECUTE,
+ * sparc: the next pc of the instruction queue (for delay slots).
+ */
+ uint64_t cs_base;
+
+ uint32_t flags; /* flags defining in which context the code was generated */
+ uint32_t cflags; /* compile flags */
+
+/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
+#define CF_COUNT_MASK 0x000001ff
+#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
+#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
+#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
+#define CF_MEMI_ONLY 0x00001000 /* Only instrument memory ops */
+#define CF_USE_ICOUNT 0x00002000
+#define CF_INVALID 0x00004000 /* TB is stale. Set with @jmp_lock held */
+#define CF_PARALLEL 0x00008000 /* Generate code for a parallel context */
+#define CF_NOIRQ 0x00010000 /* Generate an uninterruptible TB */
+#define CF_PCREL 0x00020000 /* Opcodes in TB are PC-relative */
+#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
+#define CF_CLUSTER_SHIFT 24
+
+ /*
+ * Above fields used for comparing
+ */
+
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
+ uint16_t size;
+ uint16_t icount;
+
+ struct tb_tc tc;
+
+ /*
+ * Track tb_page_addr_t intervals that intersect this TB.
+ * For user-only, the virtual addresses are always contiguous,
+ * and we use a unified interval tree. For system, we use a
+ * linked list headed in each PageDesc. Within the list, the lsb
+ * of the previous pointer tells the index of page_next[], and the
+ * list is protected by the PageDesc lock(s).
+ */
+#ifdef CONFIG_USER_ONLY
+ IntervalTreeNode itree;
+#else
+ uintptr_t page_next[2];
+ tb_page_addr_t page_addr[2];
+#endif
+
+ /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
+ QemuSpin jmp_lock;
+
+ /* The following data are used to directly call another TB from
+ * the code of this one. This can be done either by emitting direct or
+ * indirect native jump instructions. These jumps are reset so that the TB
+ * just continues its execution. The TB can be linked to another one by
+ * setting one of the jump targets (or patching the jump instruction). Only
+ * two of such jumps are supported.
+ */
+#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
+ uint16_t jmp_reset_offset[2]; /* offset of original jump target */
+ uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
+ uintptr_t jmp_target_addr[2]; /* target address */
+
+ /*
+ * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
+ * Each TB can have two outgoing jumps, and therefore can participate
+ * in two lists. The list entries are kept in jmp_list_next[2]. The least
+ * significant bit (LSB) of the pointers in these lists is used to encode
+ * which of the two list entries is to be used in the pointed TB.
+ *
+ * List traversals are protected by jmp_lock. The destination TB of each
+ * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
+ * can be acquired from any origin TB.
+ *
+ * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
+ * being invalidated, so that no further outgoing jumps from it can be set.
+ *
+ * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
+ * to a destination TB that has CF_INVALID set.
+ */
+ uintptr_t jmp_list_head;
+ uintptr_t jmp_list_next[2];
+ uintptr_t jmp_dest[2];
+};
+
+/* The alignment given to TranslationBlock during allocation. */
+#define CODE_GEN_ALIGN 16
+
+/* Hide the qatomic_read to make code a little easier on the eyes */
+static inline uint32_t tb_cflags(const TranslationBlock *tb)
+{
+ return qatomic_read(&tb->cflags);
+}
+
+#endif /* EXEC_TRANSLATION_BLOCK_H */
diff --git a/include/exec/translator.h b/include/exec/translator.h
index 71e7b2c347..2c4fb818e7 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -18,10 +18,22 @@
* member in your target-specific DisasContext.
*/
+#include "qemu/bswap.h"
+#include "exec/cpu_ldst.h" /* for abi_ptr */
-#include "exec/exec-all.h"
-#include "tcg/tcg.h"
-
+/**
+ * gen_intermediate_code
+ * @cpu: cpu context
+ * @tb: translation block
+ * @max_insns: max number of instructions to translate
+ * @pc: guest virtual program counter address
+ * @host_pc: host physical program counter address
+ *
+ * This function must be provided by the target, which should create
+ * the target-specific DisasContext, and then invoke translator_loop.
+ */
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
+ vaddr pc, void *host_pc);
/**
* DisasJumpType:
@@ -60,17 +72,24 @@ typedef enum DisasJumpType {
* @num_insns: Number of translated instructions (including current).
* @max_insns: Maximum number of instructions to be translated in this TB.
* @singlestep_enabled: "Hardware" single stepping enabled.
+ * @saved_can_do_io: Known value of cpu->neg.can_do_io, or -1 for unknown.
+ * @plugin_enabled: TCG plugin enabled in this TB.
+ * @insn_start: The last op emitted by the insn_start hook,
+ * which is expected to be INDEX_op_insn_start.
*
* Architecture-agnostic disassembly context.
*/
typedef struct DisasContextBase {
TranslationBlock *tb;
- target_ulong pc_first;
- target_ulong pc_next;
+ vaddr pc_first;
+ vaddr pc_next;
DisasJumpType is_jmp;
int num_insns;
int max_insns;
bool singlestep_enabled;
+ bool plugin_enabled;
+ struct TCGOp *insn_start;
+ void *host_addr[2];
} DisasContextBase;
/**
@@ -86,15 +105,6 @@ typedef struct DisasContextBase {
* @insn_start:
* Emit the tcg_gen_insn_start opcode.
*
- * @breakpoint_check:
- * When called, the breakpoint has already been checked to match the PC,
- * but the target may decide the breakpoint missed the address
- * (e.g., due to conditions encoded in their flags). Return true to
- * indicate that the breakpoint did hit, in which case no more breakpoints
- * are checked. If the breakpoint did hit, emit any code required to
- * signal the exception, and set db->is_jmp as necessary to terminate
- * the main loop.
- *
* @translate_insn:
* Disassemble one instruction and set db->pc_next for the start
* of the following instruction. Set db->is_jmp as necessary to
@@ -110,19 +120,20 @@ typedef struct TranslatorOps {
void (*init_disas_context)(DisasContextBase *db, CPUState *cpu);
void (*tb_start)(DisasContextBase *db, CPUState *cpu);
void (*insn_start)(DisasContextBase *db, CPUState *cpu);
- bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu,
- const CPUBreakpoint *bp);
void (*translate_insn)(DisasContextBase *db, CPUState *cpu);
void (*tb_stop)(DisasContextBase *db, CPUState *cpu);
- void (*disas_log)(const DisasContextBase *db, CPUState *cpu);
+ void (*disas_log)(const DisasContextBase *db, CPUState *cpu, FILE *f);
} TranslatorOps;
/**
* translator_loop:
- * @ops: Target-specific operations.
- * @db: Disassembly context.
* @cpu: Target vCPU.
* @tb: Translation block.
+ * @max_insns: Maximum number of insns to translate.
+ * @pc: guest virtual program counter address
+ * @host_pc: host physical program counter address
+ * @ops: Target-specific operations.
+ * @db: Disassembly context.
*
* Generic translator loop.
*
@@ -136,9 +147,99 @@ typedef struct TranslatorOps {
* - When single-stepping is enabled (system-wide or on the current vCPU).
* - When too many instructions have been translated.
*/
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
- CPUState *cpu, TranslationBlock *tb);
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
+ vaddr pc, void *host_pc, const TranslatorOps *ops,
+ DisasContextBase *db);
+
+/**
+ * translator_use_goto_tb
+ * @db: Disassembly context
+ * @dest: target pc of the goto
+ *
+ * Return true if goto_tb is allowed between the current TB
+ * and the destination PC.
+ */
+bool translator_use_goto_tb(DisasContextBase *db, vaddr dest);
-void translator_loop_temp_check(DisasContextBase *db);
+/**
+ * translator_io_start
+ * @db: Disassembly context
+ *
+ * If icount is enabled, set cpu->can_do_io, adjust db->is_jmp to
+ * DISAS_TOO_MANY if it is still DISAS_NEXT, and return true.
+ * Otherwise return false.
+ */
+bool translator_io_start(DisasContextBase *db);
+
+/*
+ * Translator Load Functions
+ *
+ * These are intended to replace the direct usage of the cpu_ld*_code
+ * functions and are mandatory for front-ends that have been migrated
+ * to the common translator_loop. These functions are only intended
+ * to be called from the translation stage and should not be called
+ * from helper functions. Those functions should be converted to encode
+ * the relevant information at translation time.
+ */
+
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+
+static inline uint16_t
+translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint16_t ret = translator_lduw(env, db, pc);
+ if (do_swap) {
+ ret = bswap16(ret);
+ }
+ return ret;
+}
+
+static inline uint32_t
+translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint32_t ret = translator_ldl(env, db, pc);
+ if (do_swap) {
+ ret = bswap32(ret);
+ }
+ return ret;
+}
+
+static inline uint64_t
+translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint64_t ret = translator_ldq(env, db, pc);
+ if (do_swap) {
+ ret = bswap64(ret);
+ }
+ return ret;
+}
+
+/**
+ * translator_fake_ldb - fake instruction load
+ * @insn8: byte of instruction
+ * @pc: program counter of instruction
+ *
+ * This is a special case helper used where the instruction we are
+ * about to translate comes from somewhere else (e.g. being
+ * re-synthesised for s390x "ex"). It ensures we update other areas of
+ * the translator with details of the executed instruction.
+ */
+void translator_fake_ldb(uint8_t insn8, abi_ptr pc);
+
+/*
+ * Return whether addr is on the same page as where disassembly started.
+ * Translators can use this to enforce the rule that only single-insn
+ * translation blocks are allowed to cross page boundaries.
+ */
+static inline bool is_same_page(const DisasContextBase *db, vaddr addr)
+{
+ return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
+}
-#endif /* EXEC__TRANSLATOR_H */
+#endif /* EXEC__TRANSLATOR_H */
diff --git a/include/exec/tswap.h b/include/exec/tswap.h
new file mode 100644
index 0000000000..68944a880b
--- /dev/null
+++ b/include/exec/tswap.h
@@ -0,0 +1,72 @@
+/*
+ * Macros for swapping a value if the endianness is different
+ * between the target and the host.
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef TSWAP_H
+#define TSWAP_H
+
+#include "hw/core/cpu.h"
+#include "qemu/bswap.h"
+
+/*
+ * If we're in target-specific code, we can hard-code the swapping
+ * condition, otherwise we have to do (slower) run-time checks.
+ */
+#ifdef NEED_CPU_H
+#define target_needs_bswap() (HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN)
+#else
+#define target_needs_bswap() (target_words_bigendian() != HOST_BIG_ENDIAN)
+#endif
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap16(s);
+ } else {
+ return s;
+ }
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap32(s);
+ } else {
+ return s;
+ }
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap64(s);
+ } else {
+ return s;
+ }
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap16(*s);
+ }
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap32(*s);
+ }
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap64(*s);
+ }
+}
+
+#endif /* TSWAP_H */
diff --git a/include/exec/user/abitypes.h b/include/exec/user/abitypes.h
index 743b8bb9ea..db4a670328 100644
--- a/include/exec/user/abitypes.h
+++ b/include/exec/user/abitypes.h
@@ -15,7 +15,17 @@
#define ABI_LLONG_ALIGNMENT 2
#endif
-#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) || defined(TARGET_SH4)
+#ifdef TARGET_CRIS
+#define ABI_SHORT_ALIGNMENT 1
+#define ABI_INT_ALIGNMENT 1
+#define ABI_LONG_ALIGNMENT 1
+#define ABI_LLONG_ALIGNMENT 1
+#endif
+
+#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) \
+ || defined(TARGET_SH4) \
+ || defined(TARGET_OPENRISC) \
+ || defined(TARGET_MICROBLAZE)
#define ABI_LLONG_ALIGNMENT 4
#endif
diff --git a/include/exec/user/guest-base.h b/include/exec/user/guest-base.h
new file mode 100644
index 0000000000..afe2ab7fbb
--- /dev/null
+++ b/include/exec/user/guest-base.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Declaration of guest_base.
+ * Copyright (c) 2003 Fabrice Bellard
+ */
+
+#ifndef EXEC_USER_GUEST_BASE_H
+#define EXEC_USER_GUEST_BASE_H
+
+extern uintptr_t guest_base;
+
+#endif
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
index 8d3af5a3be..2ebfecf58e 100644
--- a/include/exec/user/thunk.h
+++ b/include/exec/user/thunk.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -16,10 +16,12 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef THUNK_H
#define THUNK_H
#include "cpu.h"
+#include "exec/user/abitypes.h"
/* types enums definitions */
@@ -40,7 +42,7 @@ typedef enum argtype {
} argtype;
#define MK_PTR(type) TYPE_PTR, type
-#define MK_ARRAY(type, size) TYPE_ARRAY, size, type
+#define MK_ARRAY(type, size) TYPE_ARRAY, (int)(size), type
#define MK_STRUCT(id) TYPE_STRUCT, id
#define THUNK_TARGET 0
@@ -53,6 +55,7 @@ typedef struct {
int *field_offsets[2];
/* special handling */
void (*convert[2])(void *dst, const void *src);
+ void (*print)(void *arg);
int size[2];
int align[2];
const char *name;
@@ -71,6 +74,7 @@ void thunk_register_struct_direct(int id, const char *name,
const StructEntry *se1);
const argtype *thunk_convert(void *dst, const void *src,
const argtype *type_ptr, int to_host);
+const argtype *thunk_print(void *arg, const argtype *type_ptr);
extern StructEntry *struct_entries;
@@ -107,8 +111,7 @@ static inline int thunk_type_size(const argtype *type_ptr, int is_host)
if (is_host) {
#if defined(HOST_X86_64)
return 8;
-#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
- defined(HOST_PARISC) || defined(HOST_SPARC64)
+#elif defined(HOST_MIPS) || defined(HOST_SPARC64)
return 4;
#elif defined(HOST_PPC)
return sizeof(void *);
@@ -189,10 +192,17 @@ static inline int thunk_type_align(const argtype *type_ptr, int is_host)
}
}
-unsigned int target_to_host_bitmask(unsigned int target_mask,
- const bitmask_transtbl * trans_tbl);
-unsigned int host_to_target_bitmask(unsigned int host_mask,
- const bitmask_transtbl * trans_tbl);
+unsigned int target_to_host_bitmask_len(unsigned int target_mask,
+ const bitmask_transtbl *trans_tbl,
+ size_t trans_len);
+unsigned int host_to_target_bitmask_len(unsigned int host_mask,
+ const bitmask_transtbl * trans_tbl,
+ size_t trans_len);
+
+#define target_to_host_bitmask(M, T) \
+ target_to_host_bitmask_len(M, T, ARRAY_SIZE(T))
+#define host_to_target_bitmask(M, T) \
+ host_to_target_bitmask_len(M, T, ARRAY_SIZE(T))
void thunk_init(unsigned int max_structs);
diff --git a/include/exec/vaddr.h b/include/exec/vaddr.h
new file mode 100644
index 0000000000..b9844afc77
--- /dev/null
+++ b/include/exec/vaddr.h
@@ -0,0 +1,18 @@
+/* Define vaddr. */
+
+#ifndef VADDR_H
+#define VADDR_H
+
+/**
+ * vaddr:
+ * Type wide enough to contain any #target_ulong virtual address.
+ */
+typedef uint64_t vaddr;
+#define VADDR_PRId PRId64
+#define VADDR_PRIu PRIu64
+#define VADDR_PRIo PRIo64
+#define VADDR_PRIx PRIx64
+#define VADDR_PRIX PRIX64
+#define VADDR_MAX UINT64_MAX
+
+#endif
diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h
new file mode 100644
index 0000000000..94cbe073ec
--- /dev/null
+++ b/include/fpu/softfloat-helpers.h
@@ -0,0 +1,144 @@
+/*
+ * QEMU float support - standalone helpers
+ *
+ * This is provided for files that don't need the access to the full
+ * set of softfloat functions. Typically this is cpu initialisation
+ * code which wants to set default rounding and exceptions modes.
+ *
+ * The code in this source file is derived from release 2a of the SoftFloat
+ * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and
+ * some later contributions) are provided under that license, as detailed below.
+ * It has subsequently been modified by contributors to the QEMU Project,
+ * so some portions are provided under:
+ * the SoftFloat-2a license
+ * the BSD license
+ * GPL-v2-or-later
+ *
+ * Any future contributions to this file after December 1st 2014 will be
+ * taken to be licensed under the Softfloat-2a license unless specifically
+ * indicated otherwise.
+ */
+
+/*
+===============================================================================
+This C header file is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2a.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
+has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
+TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
+PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
+AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) they include prominent notice that the work is derivative, and (2) they
+include prominent notice akin to these four paragraphs for those parts of
+this code that are retained.
+
+===============================================================================
+*/
+
+#ifndef SOFTFLOAT_HELPERS_H
+#define SOFTFLOAT_HELPERS_H
+
+#include "fpu/softfloat-types.h"
+
+static inline void set_float_detect_tininess(bool val, float_status *status)
+{
+ status->tininess_before_rounding = val;
+}
+
+static inline void set_float_rounding_mode(FloatRoundMode val,
+ float_status *status)
+{
+ status->float_rounding_mode = val;
+}
+
+static inline void set_float_exception_flags(int val, float_status *status)
+{
+ status->float_exception_flags = val;
+}
+
+static inline void set_floatx80_rounding_precision(FloatX80RoundPrec val,
+ float_status *status)
+{
+ status->floatx80_rounding_precision = val;
+}
+
+static inline void set_flush_to_zero(bool val, float_status *status)
+{
+ status->flush_to_zero = val;
+}
+
+static inline void set_flush_inputs_to_zero(bool val, float_status *status)
+{
+ status->flush_inputs_to_zero = val;
+}
+
+static inline void set_default_nan_mode(bool val, float_status *status)
+{
+ status->default_nan_mode = val;
+}
+
+static inline void set_snan_bit_is_one(bool val, float_status *status)
+{
+ status->snan_bit_is_one = val;
+}
+
+static inline void set_use_first_nan(bool val, float_status *status)
+{
+ status->use_first_nan = val;
+}
+
+static inline void set_no_signaling_nans(bool val, float_status *status)
+{
+ status->no_signaling_nans = val;
+}
+
+static inline bool get_float_detect_tininess(float_status *status)
+{
+ return status->tininess_before_rounding;
+}
+
+static inline FloatRoundMode get_float_rounding_mode(float_status *status)
+{
+ return status->float_rounding_mode;
+}
+
+static inline int get_float_exception_flags(float_status *status)
+{
+ return status->float_exception_flags;
+}
+
+static inline FloatX80RoundPrec
+get_floatx80_rounding_precision(float_status *status)
+{
+ return status->floatx80_rounding_precision;
+}
+
+static inline bool get_flush_to_zero(float_status *status)
+{
+ return status->flush_to_zero;
+}
+
+static inline bool get_flush_inputs_to_zero(float_status *status)
+{
+ return status->flush_inputs_to_zero;
+}
+
+static inline bool get_default_nan_mode(float_status *status)
+{
+ return status->default_nan_mode;
+}
+
+#endif /* SOFTFLOAT_HELPERS_H */
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
index b1d772e6d4..f35cdbfa63 100644
--- a/include/fpu/softfloat-macros.h
+++ b/include/fpu/softfloat-macros.h
@@ -8,7 +8,6 @@
* so some portions are provided under:
* the SoftFloat-2a license
* the BSD license
- * GPL-v2-or-later
*
* Any future contributions to this file after December 1st 2014 will be
* taken to be licensed under the Softfloat-2a license unless specifically
@@ -75,9 +74,47 @@ this code that are retained.
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* Portions of this work are licensed under the terms of the GNU GPL,
- * version 2 or later. See the COPYING file in the top-level directory.
+#ifndef FPU_SOFTFLOAT_MACROS_H
+#define FPU_SOFTFLOAT_MACROS_H
+
+#include "fpu/softfloat-types.h"
+#include "qemu/host-utils.h"
+
+/**
+ * shl_double: double-word merging left shift
+ * @l: left or most-significant word
+ * @r: right or least-significant word
+ * @c: shift count
+ *
+ * Shift @l left by @c bits, shifting in bits from @r.
+ */
+static inline uint64_t shl_double(uint64_t l, uint64_t r, int c)
+{
+#if defined(__x86_64__)
+ asm("shld %b2, %1, %0" : "+r"(l) : "r"(r), "ci"(c));
+ return l;
+#else
+ return c ? (l << c) | (r >> (64 - c)) : l;
+#endif
+}
+
+/**
+ * shr_double: double-word merging right shift
+ * @l: left or most-significant word
+ * @r: right or least-significant word
+ * @c: shift count
+ *
+ * Shift @r right by @c bits, shifting in bits from @l.
*/
+static inline uint64_t shr_double(uint64_t l, uint64_t r, int c)
+{
+#if defined(__x86_64__)
+ asm("shrd %b2, %1, %0" : "+r"(r) : "r"(l), "ci"(c));
+ return r;
+#else
+ return c ? (r >> c) | (l << (64 - c)) : r;
+#endif
+}
/*----------------------------------------------------------------------------
| Shifts `a' right by the number of bits given in `count'. If any nonzero
@@ -398,16 +435,12 @@ static inline void
| are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- add128(
- uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr )
+static inline void add128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1,
+ uint64_t *z0Ptr, uint64_t *z1Ptr)
{
- uint64_t z1;
-
- z1 = a1 + b1;
- *z1Ptr = z1;
- *z0Ptr = a0 + b0 + ( z1 < a1 );
-
+ bool c = 0;
+ *z1Ptr = uadd64_carry(a1, b1, &c);
+ *z0Ptr = uadd64_carry(a0, b0, &c);
}
/*----------------------------------------------------------------------------
@@ -418,34 +451,14 @@ static inline void
| `z1Ptr', and `z2Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- add192(
- uint64_t a0,
- uint64_t a1,
- uint64_t a2,
- uint64_t b0,
- uint64_t b1,
- uint64_t b2,
- uint64_t *z0Ptr,
- uint64_t *z1Ptr,
- uint64_t *z2Ptr
- )
+static inline void add192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2,
+ uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr)
{
- uint64_t z0, z1, z2;
- int8_t carry0, carry1;
-
- z2 = a2 + b2;
- carry1 = ( z2 < a2 );
- z1 = a1 + b1;
- carry0 = ( z1 < a1 );
- z0 = a0 + b0;
- z1 += carry1;
- z0 += ( z1 < carry1 );
- z0 += carry0;
- *z2Ptr = z2;
- *z1Ptr = z1;
- *z0Ptr = z0;
-
+ bool c = 0;
+ *z2Ptr = uadd64_carry(a2, b2, &c);
+ *z1Ptr = uadd64_carry(a1, b1, &c);
+ *z0Ptr = uadd64_carry(a0, b0, &c);
}
/*----------------------------------------------------------------------------
@@ -456,14 +469,12 @@ static inline void
| `z1Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- sub128(
- uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr )
+static inline void sub128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1,
+ uint64_t *z0Ptr, uint64_t *z1Ptr)
{
-
- *z1Ptr = a1 - b1;
- *z0Ptr = a0 - b0 - ( a1 < b1 );
-
+ bool c = 0;
+ *z1Ptr = usub64_borrow(a1, b1, &c);
+ *z0Ptr = usub64_borrow(a0, b0, &c);
}
/*----------------------------------------------------------------------------
@@ -474,34 +485,14 @@ static inline void
| pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- sub192(
- uint64_t a0,
- uint64_t a1,
- uint64_t a2,
- uint64_t b0,
- uint64_t b1,
- uint64_t b2,
- uint64_t *z0Ptr,
- uint64_t *z1Ptr,
- uint64_t *z2Ptr
- )
+static inline void sub192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2,
+ uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr)
{
- uint64_t z0, z1, z2;
- int8_t borrow0, borrow1;
-
- z2 = a2 - b2;
- borrow1 = ( a2 < b2 );
- z1 = a1 - b1;
- borrow0 = ( a1 < b1 );
- z0 = a0 - b0;
- z0 -= ( z1 < borrow1 );
- z1 -= borrow1;
- z0 -= borrow0;
- *z2Ptr = z2;
- *z1Ptr = z1;
- *z0Ptr = z0;
-
+ bool c = 0;
+ *z2Ptr = usub64_borrow(a2, b2, &c);
+ *z1Ptr = usub64_borrow(a1, b1, &c);
+ *z0Ptr = usub64_borrow(a0, b0, &c);
}
/*----------------------------------------------------------------------------
@@ -510,27 +501,10 @@ static inline void
| `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
-static inline void mul64To128( uint64_t a, uint64_t b, uint64_t *z0Ptr, uint64_t *z1Ptr )
+static inline void
+mul64To128(uint64_t a, uint64_t b, uint64_t *z0Ptr, uint64_t *z1Ptr)
{
- uint32_t aHigh, aLow, bHigh, bLow;
- uint64_t z0, zMiddleA, zMiddleB, z1;
-
- aLow = a;
- aHigh = a>>32;
- bLow = b;
- bHigh = b>>32;
- z1 = ( (uint64_t) aLow ) * bLow;
- zMiddleA = ( (uint64_t) aLow ) * bHigh;
- zMiddleB = ( (uint64_t) aHigh ) * bLow;
- z0 = ( (uint64_t) aHigh ) * bHigh;
- zMiddleA += zMiddleB;
- z0 += ( ( (uint64_t) ( zMiddleA < zMiddleB ) )<<32 ) + ( zMiddleA>>32 );
- zMiddleA <<= 32;
- z1 += zMiddleA;
- z0 += ( z1 < zMiddleA );
- *z1Ptr = z1;
- *z0Ptr = z0;
-
+ mulu64(z1Ptr, z0Ptr, a, b);
}
/*----------------------------------------------------------------------------
@@ -541,24 +515,14 @@ static inline void mul64To128( uint64_t a, uint64_t b, uint64_t *z0Ptr, uint64_t
*----------------------------------------------------------------------------*/
static inline void
- mul128By64To192(
- uint64_t a0,
- uint64_t a1,
- uint64_t b,
- uint64_t *z0Ptr,
- uint64_t *z1Ptr,
- uint64_t *z2Ptr
- )
+mul128By64To192(uint64_t a0, uint64_t a1, uint64_t b,
+ uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr)
{
- uint64_t z0, z1, z2, more1;
-
- mul64To128( a1, b, &z1, &z2 );
- mul64To128( a0, b, &z0, &more1 );
- add128( z0, more1, 0, z1, &z0, &z1 );
- *z2Ptr = z2;
- *z1Ptr = z1;
- *z0Ptr = z0;
+ uint64_t z0, z1, m1;
+ mul64To128(a1, b, &m1, z2Ptr);
+ mul64To128(a0, b, &z0, &z1);
+ add128(z0, z1, 0, m1, z0Ptr, z1Ptr);
}
/*----------------------------------------------------------------------------
@@ -568,34 +532,21 @@ static inline void
| the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- mul128To256(
- uint64_t a0,
- uint64_t a1,
- uint64_t b0,
- uint64_t b1,
- uint64_t *z0Ptr,
- uint64_t *z1Ptr,
- uint64_t *z2Ptr,
- uint64_t *z3Ptr
- )
+static inline void mul128To256(uint64_t a0, uint64_t a1,
+ uint64_t b0, uint64_t b1,
+ uint64_t *z0Ptr, uint64_t *z1Ptr,
+ uint64_t *z2Ptr, uint64_t *z3Ptr)
{
- uint64_t z0, z1, z2, z3;
- uint64_t more1, more2;
-
- mul64To128( a1, b1, &z2, &z3 );
- mul64To128( a1, b0, &z1, &more2 );
- add128( z1, more2, 0, z2, &z1, &z2 );
- mul64To128( a0, b0, &z0, &more1 );
- add128( z0, more1, 0, z1, &z0, &z1 );
- mul64To128( a0, b1, &more1, &more2 );
- add128( more1, more2, 0, z2, &more1, &z2 );
- add128( z0, z1, 0, more1, &z0, &z1 );
- *z3Ptr = z3;
- *z2Ptr = z2;
- *z1Ptr = z1;
- *z0Ptr = z0;
+ uint64_t z0, z1, z2;
+ uint64_t m0, m1, m2, n1, n2;
+
+ mul64To128(a1, b0, &m1, &m2);
+ mul64To128(a0, b1, &n1, &n2);
+ mul64To128(a1, b1, &z2, z3Ptr);
+ mul64To128(a0, b0, &z0, &z1);
+ add192( 0, m1, m2, 0, n1, n2, &m0, &m1, &m2);
+ add192(m0, m1, m2, z0, z1, z2, z0Ptr, z1Ptr, z2Ptr);
}
/*----------------------------------------------------------------------------
@@ -613,13 +564,13 @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
uint64_t rem0, rem1, term0, term1;
uint64_t z;
- if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF );
+ if ( b <= a0 ) return UINT64_C(0xFFFFFFFFFFFFFFFF);
b0 = b>>32;
- z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32;
+ z = ( b0<<32 <= a0 ) ? UINT64_C(0xFFFFFFFF00000000) : ( a0 / b0 )<<32;
mul64To128( b, z, &term0, &term1 );
sub128( a0, a1, term0, term1, &rem0, &rem1 );
while ( ( (int64_t) rem0 ) < 0 ) {
- z -= LIT64( 0x100000000 );
+ z -= UINT64_C(0x100000000);
b1 = b<<32;
add128( rem0, rem1, b0, b1, &rem0, &rem1 );
}
@@ -629,83 +580,6 @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
}
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
- *
- * Licensed under the GPLv2/LGPLv3
- */
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
- uint64_t n0, uint64_t d)
-{
-#if defined(__x86_64__)
- uint64_t q;
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
- return q;
-#elif defined(__s390x__)
- /* Need to use a TImode type to get an even register pair for DLGR. */
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
- *r = n >> 64;
- return n;
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
- /* From Power ISA 2.06, programming note for divdeu. */
- uint64_t q1, q2, Q, r1, r2, R;
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
- : "=&r"(q1), "=r"(q2)
- : "r"(n1), "r"(n0), "r"(d));
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
- r2 = n0 - (q2 * d);
- Q = q1 + q2;
- R = r1 + r2;
- if (R >= d || R < r2) { /* overflow implies R > d */
- Q += 1;
- R -= d;
- }
- *r = R;
- return Q;
-#else
- uint64_t d0, d1, q0, q1, r1, r0, m;
-
- d0 = (uint32_t)d;
- d1 = d >> 32;
-
- r1 = n1 % d1;
- q1 = n1 / d1;
- m = q1 * d0;
- r1 = (r1 << 32) | (n0 >> 32);
- if (r1 < m) {
- q1 -= 1;
- r1 += d;
- if (r1 >= d) {
- if (r1 < m) {
- q1 -= 1;
- r1 += d;
- }
- }
- }
- r1 -= m;
-
- r0 = r1 % d1;
- q0 = r1 / d1;
- m = q0 * d0;
- r0 = (r0 << 32) | (uint32_t)n0;
- if (r0 < m) {
- q0 -= 1;
- r0 += d;
- if (r0 >= d) {
- if (r0 < m) {
- q0 -= 1;
- r0 += d;
- }
- }
- }
- r0 -= m;
-
- *r = r0;
- return (q1 << 32) | q0;
-#endif
-}
-
/*----------------------------------------------------------------------------
| Returns an approximation to the square root of the 32-bit significand given
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
@@ -751,11 +625,9 @@ static inline uint32_t estimateSqrt32(int aExp, uint32_t a)
| Otherwise, returns 0.
*----------------------------------------------------------------------------*/
-static inline flag eq128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 )
+static inline bool eq128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1)
{
-
- return ( a0 == b0 ) && ( a1 == b1 );
-
+ return a0 == b0 && a1 == b1;
}
/*----------------------------------------------------------------------------
@@ -764,11 +636,9 @@ static inline flag eq128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 )
| Otherwise, returns 0.
*----------------------------------------------------------------------------*/
-static inline flag le128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 )
+static inline bool le128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1)
{
-
- return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 <= b1 ) );
-
+ return a0 < b0 || (a0 == b0 && a1 <= b1);
}
/*----------------------------------------------------------------------------
@@ -777,11 +647,9 @@ static inline flag le128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 )
| returns 0.
*----------------------------------------------------------------------------*/
-static inline flag lt128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 )
+static inline bool lt128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1)
{
-
- return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 < b1 ) );
-
+ return a0 < b0 || (a0 == b0 && a1 < b1);
}
/*----------------------------------------------------------------------------
@@ -790,9 +658,43 @@ static inline flag lt128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 )
| Otherwise, returns 0.
*----------------------------------------------------------------------------*/
-static inline flag ne128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 )
+static inline bool ne128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1)
+{
+ return a0 != b0 || a1 != b1;
+}
+
+/*
+ * Similarly, comparisons of 192-bit values.
+ */
+
+static inline bool eq192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2)
{
+ return ((a0 ^ b0) | (a1 ^ b1) | (a2 ^ b2)) == 0;
+}
- return ( a0 != b0 ) || ( a1 != b1 );
+static inline bool le192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2)
+{
+ if (a0 != b0) {
+ return a0 < b0;
+ }
+ if (a1 != b1) {
+ return a1 < b1;
+ }
+ return a2 <= b2;
+}
+static inline bool lt192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2)
+{
+ if (a0 != b0) {
+ return a0 < b0;
+ }
+ if (a1 != b1) {
+ return a1 < b1;
+ }
+ return a2 < b2;
}
+
+#endif
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
index 2aae6a89b1..0884ec4ef7 100644
--- a/include/fpu/softfloat-types.h
+++ b/include/fpu/softfloat-types.h
@@ -80,12 +80,6 @@ this code that are retained.
#ifndef SOFTFLOAT_TYPES_H
#define SOFTFLOAT_TYPES_H
-/* This 'flag' type must be able to hold at least 0 and 1. It should
- * probably be replaced with 'bool' but the uses would need to be audited
- * to check that they weren't accidentally relying on it being a larger type.
- */
-typedef uint8_t flag;
-
/*
* Software IEC/IEEE floating-point types.
*/
@@ -109,7 +103,7 @@ typedef struct {
#define make_floatx80(exp, mant) ((floatx80) { mant, exp })
#define make_floatx80_init(exp, mant) { .low = mant, .high = exp }
typedef struct {
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint64_t high, low;
#else
uint64_t low, high;
@@ -119,42 +113,62 @@ typedef struct {
#define make_float128_init(high_, low_) { .high = high_, .low = low_ }
/*
+ * Software neural-network floating-point types.
+ */
+typedef uint16_t bfloat16;
+
+/*
* Software IEC/IEEE floating-point underflow tininess-detection mode.
*/
-enum {
- float_tininess_after_rounding = 0,
- float_tininess_before_rounding = 1
-};
+#define float_tininess_after_rounding false
+#define float_tininess_before_rounding true
/*
*Software IEC/IEEE floating-point rounding mode.
*/
-enum {
+typedef enum __attribute__((__packed__)) {
float_round_nearest_even = 0,
float_round_down = 1,
float_round_up = 2,
float_round_to_zero = 3,
float_round_ties_away = 4,
- /* Not an IEEE rounding mode: round to the closest odd mantissa value */
+ /* Not an IEEE rounding mode: round to closest odd, overflow to max */
float_round_to_odd = 5,
-};
+ /* Not an IEEE rounding mode: round to closest odd, overflow to inf */
+ float_round_to_odd_inf = 6,
+} FloatRoundMode;
/*
* Software IEC/IEEE floating-point exception flags.
*/
enum {
- float_flag_invalid = 1,
- float_flag_divbyzero = 4,
- float_flag_overflow = 8,
- float_flag_underflow = 16,
- float_flag_inexact = 32,
- float_flag_input_denormal = 64,
- float_flag_output_denormal = 128
+ float_flag_invalid = 0x0001,
+ float_flag_divbyzero = 0x0002,
+ float_flag_overflow = 0x0004,
+ float_flag_underflow = 0x0008,
+ float_flag_inexact = 0x0010,
+ float_flag_input_denormal = 0x0020,
+ float_flag_output_denormal = 0x0040,
+ float_flag_invalid_isi = 0x0080, /* inf - inf */
+ float_flag_invalid_imz = 0x0100, /* inf * 0 */
+ float_flag_invalid_idi = 0x0200, /* inf / inf */
+ float_flag_invalid_zdz = 0x0400, /* 0 / 0 */
+ float_flag_invalid_sqrt = 0x0800, /* sqrt(-x) */
+ float_flag_invalid_cvti = 0x1000, /* non-nan to integer */
+ float_flag_invalid_snan = 0x2000, /* any operand was snan */
};
+/*
+ * Rounding precision for floatx80.
+ */
+typedef enum __attribute__((__packed__)) {
+ floatx80_precision_x,
+ floatx80_precision_d,
+ floatx80_precision_s,
+} FloatX80RoundPrec;
/*
* Floating Point Status. Individual architectures may maintain
@@ -164,17 +178,27 @@ enum {
*/
typedef struct float_status {
- signed char float_detect_tininess;
- signed char float_rounding_mode;
- uint8_t float_exception_flags;
- signed char floatx80_rounding_precision;
+ uint16_t float_exception_flags;
+ FloatRoundMode float_rounding_mode;
+ FloatX80RoundPrec floatx80_rounding_precision;
+ bool tininess_before_rounding;
/* should denormalised results go to zero and set the inexact flag? */
- flag flush_to_zero;
+ bool flush_to_zero;
/* should denormalised inputs go to zero and set the input_denormal flag? */
- flag flush_inputs_to_zero;
- flag default_nan_mode;
- /* not always used -- see snan_bit_is_one() in softfloat-specialize.h */
- flag snan_bit_is_one;
+ bool flush_inputs_to_zero;
+ bool default_nan_mode;
+ /*
+ * The flags below are not used on all specializations and may
+ * constant fold away (see snan_bit_is_one()/no_signalling_nans() in
+ * softfloat-specialize.inc.c)
+ */
+ bool snan_bit_is_one;
+ bool use_first_nan;
+ bool no_signaling_nans;
+ /* should overflowed results subtract re_bias to its exponent? */
+ bool rebias_overflow;
+ /* should underflowed results add re_bias to its exponent? */
+ bool rebias_underflow;
} float_status;
#endif /* SOFTFLOAT_TYPES_H */
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index 38a5e99cf3..eb64075b9c 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -82,87 +82,29 @@ this code that are retained.
#ifndef SOFTFLOAT_H
#define SOFTFLOAT_H
-#define LIT64( a ) a##LL
-
/*----------------------------------------------------------------------------
| Software IEC/IEEE floating-point ordering relations
*----------------------------------------------------------------------------*/
-enum {
+
+typedef enum {
float_relation_less = -1,
float_relation_equal = 0,
float_relation_greater = 1,
float_relation_unordered = 2
-};
+} FloatRelation;
#include "fpu/softfloat-types.h"
-
-static inline void set_float_detect_tininess(int val, float_status *status)
-{
- status->float_detect_tininess = val;
-}
-static inline void set_float_rounding_mode(int val, float_status *status)
-{
- status->float_rounding_mode = val;
-}
-static inline void set_float_exception_flags(int val, float_status *status)
-{
- status->float_exception_flags = val;
-}
-static inline void set_floatx80_rounding_precision(int val,
- float_status *status)
-{
- status->floatx80_rounding_precision = val;
-}
-static inline void set_flush_to_zero(flag val, float_status *status)
-{
- status->flush_to_zero = val;
-}
-static inline void set_flush_inputs_to_zero(flag val, float_status *status)
-{
- status->flush_inputs_to_zero = val;
-}
-static inline void set_default_nan_mode(flag val, float_status *status)
-{
- status->default_nan_mode = val;
-}
-static inline void set_snan_bit_is_one(flag val, float_status *status)
-{
- status->snan_bit_is_one = val;
-}
-static inline int get_float_detect_tininess(float_status *status)
-{
- return status->float_detect_tininess;
-}
-static inline int get_float_rounding_mode(float_status *status)
-{
- return status->float_rounding_mode;
-}
-static inline int get_float_exception_flags(float_status *status)
-{
- return status->float_exception_flags;
-}
-static inline int get_floatx80_rounding_precision(float_status *status)
-{
- return status->floatx80_rounding_precision;
-}
-static inline flag get_flush_to_zero(float_status *status)
-{
- return status->flush_to_zero;
-}
-static inline flag get_flush_inputs_to_zero(float_status *status)
-{
- return status->flush_inputs_to_zero;
-}
-static inline flag get_default_nan_mode(float_status *status)
-{
- return status->default_nan_mode;
-}
+#include "fpu/softfloat-helpers.h"
+#include "qemu/int128.h"
/*----------------------------------------------------------------------------
| Routine to raise any or all of the software IEC/IEEE floating-point
| exception flags.
*----------------------------------------------------------------------------*/
-void float_raise(uint8_t flags, float_status *status);
+static inline void float_raise(uint16_t flags, float_status *status)
+{
+ status->float_exception_flags |= flags;
+}
/*----------------------------------------------------------------------------
| If `a' is denormal and we are in flush-to-zero mode then set the
@@ -171,6 +113,7 @@ void float_raise(uint8_t flags, float_status *status);
float16 float16_squash_input_denormal(float16 a, float_status *status);
float32 float32_squash_input_denormal(float32 a, float_status *status);
float64 float64_squash_input_denormal(float64 a, float_status *status);
+bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
/*----------------------------------------------------------------------------
| Options to indicate which negations to perform in float*_muladd()
@@ -198,9 +141,11 @@ float16 uint16_to_float16_scalbn(uint16_t a, int, float_status *status);
float16 uint32_to_float16_scalbn(uint32_t a, int, float_status *status);
float16 uint64_to_float16_scalbn(uint64_t a, int, float_status *status);
+float16 int8_to_float16(int8_t a, float_status *status);
float16 int16_to_float16(int16_t a, float_status *status);
float16 int32_to_float16(int32_t a, float_status *status);
float16 int64_to_float16(int64_t a, float_status *status);
+float16 uint8_to_float16(uint8_t a, float_status *status);
float16 uint16_to_float16(uint16_t a, float_status *status);
float16 uint32_to_float16(uint32_t a, float_status *status);
float16 uint64_to_float16(uint64_t a, float_status *status);
@@ -238,7 +183,9 @@ floatx80 int64_to_floatx80(int64_t, float_status *status);
float128 int32_to_float128(int32_t, float_status *status);
float128 int64_to_float128(int64_t, float_status *status);
+float128 int128_to_float128(Int128, float_status *status);
float128 uint64_to_float128(uint64_t, float_status *status);
+float128 uint128_to_float128(Int128, float_status *status);
/*----------------------------------------------------------------------------
| Software half-precision conversion routines.
@@ -249,10 +196,13 @@ float32 float16_to_float32(float16, bool ieee, float_status *status);
float16 float64_to_float16(float64 a, bool ieee, float_status *status);
float64 float16_to_float64(float16 a, bool ieee, float_status *status);
-int16_t float16_to_int16_scalbn(float16, int, int, float_status *status);
-int32_t float16_to_int32_scalbn(float16, int, int, float_status *status);
-int64_t float16_to_int64_scalbn(float16, int, int, float_status *status);
+int8_t float16_to_int8_scalbn(float16, FloatRoundMode, int,
+ float_status *status);
+int16_t float16_to_int16_scalbn(float16, FloatRoundMode, int, float_status *);
+int32_t float16_to_int32_scalbn(float16, FloatRoundMode, int, float_status *);
+int64_t float16_to_int64_scalbn(float16, FloatRoundMode, int, float_status *);
+int8_t float16_to_int8(float16, float_status *status);
int16_t float16_to_int16(float16, float_status *status);
int32_t float16_to_int32(float16, float_status *status);
int64_t float16_to_int64(float16, float_status *status);
@@ -261,10 +211,16 @@ int16_t float16_to_int16_round_to_zero(float16, float_status *status);
int32_t float16_to_int32_round_to_zero(float16, float_status *status);
int64_t float16_to_int64_round_to_zero(float16, float_status *status);
-uint16_t float16_to_uint16_scalbn(float16 a, int, int, float_status *status);
-uint32_t float16_to_uint32_scalbn(float16 a, int, int, float_status *status);
-uint64_t float16_to_uint64_scalbn(float16 a, int, int, float_status *status);
+uint8_t float16_to_uint8_scalbn(float16 a, FloatRoundMode,
+ int, float_status *status);
+uint16_t float16_to_uint16_scalbn(float16 a, FloatRoundMode,
+ int, float_status *status);
+uint32_t float16_to_uint32_scalbn(float16 a, FloatRoundMode,
+ int, float_status *status);
+uint64_t float16_to_uint64_scalbn(float16 a, FloatRoundMode,
+ int, float_status *status);
+uint8_t float16_to_uint8(float16 a, float_status *status);
uint16_t float16_to_uint16(float16 a, float_status *status);
uint32_t float16_to_uint32(float16 a, float_status *status);
uint64_t float16_to_uint64(float16 a, float_status *status);
@@ -290,39 +246,46 @@ float16 float16_minnum(float16, float16, float_status *status);
float16 float16_maxnum(float16, float16, float_status *status);
float16 float16_minnummag(float16, float16, float_status *status);
float16 float16_maxnummag(float16, float16, float_status *status);
+float16 float16_minimum_number(float16, float16, float_status *status);
+float16 float16_maximum_number(float16, float16, float_status *status);
float16 float16_sqrt(float16, float_status *status);
-int float16_compare(float16, float16, float_status *status);
-int float16_compare_quiet(float16, float16, float_status *status);
+FloatRelation float16_compare(float16, float16, float_status *status);
+FloatRelation float16_compare_quiet(float16, float16, float_status *status);
-int float16_is_quiet_nan(float16, float_status *status);
-int float16_is_signaling_nan(float16, float_status *status);
+bool float16_is_quiet_nan(float16, float_status *status);
+bool float16_is_signaling_nan(float16, float_status *status);
float16 float16_silence_nan(float16, float_status *status);
-static inline int float16_is_any_nan(float16 a)
+static inline bool float16_is_any_nan(float16 a)
{
return ((float16_val(a) & ~0x8000) > 0x7c00);
}
-static inline int float16_is_neg(float16 a)
+static inline bool float16_is_neg(float16 a)
{
return float16_val(a) >> 15;
}
-static inline int float16_is_infinity(float16 a)
+static inline bool float16_is_infinity(float16 a)
{
return (float16_val(a) & 0x7fff) == 0x7c00;
}
-static inline int float16_is_zero(float16 a)
+static inline bool float16_is_zero(float16 a)
{
return (float16_val(a) & 0x7fff) == 0;
}
-static inline int float16_is_zero_or_denormal(float16 a)
+static inline bool float16_is_zero_or_denormal(float16 a)
{
return (float16_val(a) & 0x7c00) == 0;
}
+static inline bool float16_is_normal(float16 a)
+{
+ return (((float16_val(a) >> 10) + 1) & 0x1f) >= 2;
+}
+
static inline float16 float16_abs(float16 a)
{
/* Note that abs does *not* handle NaN specially, nor does
@@ -344,6 +307,47 @@ static inline float16 float16_set_sign(float16 a, int sign)
return make_float16((float16_val(a) & 0x7fff) | (sign << 15));
}
+static inline bool float16_eq(float16 a, float16 b, float_status *s)
+{
+ return float16_compare(a, b, s) == float_relation_equal;
+}
+
+static inline bool float16_le(float16 a, float16 b, float_status *s)
+{
+ return float16_compare(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float16_lt(float16 a, float16 b, float_status *s)
+{
+ return float16_compare(a, b, s) < float_relation_equal;
+}
+
+static inline bool float16_unordered(float16 a, float16 b, float_status *s)
+{
+ return float16_compare(a, b, s) == float_relation_unordered;
+}
+
+static inline bool float16_eq_quiet(float16 a, float16 b, float_status *s)
+{
+ return float16_compare_quiet(a, b, s) == float_relation_equal;
+}
+
+static inline bool float16_le_quiet(float16 a, float16 b, float_status *s)
+{
+ return float16_compare_quiet(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float16_lt_quiet(float16 a, float16 b, float_status *s)
+{
+ return float16_compare_quiet(a, b, s) < float_relation_equal;
+}
+
+static inline bool float16_unordered_quiet(float16 a, float16 b,
+ float_status *s)
+{
+ return float16_compare_quiet(a, b, s) == float_relation_unordered;
+}
+
#define float16_zero make_float16(0)
#define float16_half make_float16(0x3800)
#define float16_one make_float16(0x3c00)
@@ -353,6 +357,200 @@ static inline float16 float16_set_sign(float16 a, int sign)
#define float16_infinity make_float16(0x7c00)
/*----------------------------------------------------------------------------
+| Software bfloat16 conversion routines.
+*----------------------------------------------------------------------------*/
+
+bfloat16 bfloat16_round_to_int(bfloat16, float_status *status);
+bfloat16 float32_to_bfloat16(float32, float_status *status);
+float32 bfloat16_to_float32(bfloat16, float_status *status);
+bfloat16 float64_to_bfloat16(float64 a, float_status *status);
+float64 bfloat16_to_float64(bfloat16 a, float_status *status);
+
+int8_t bfloat16_to_int8_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
+int16_t bfloat16_to_int16_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
+int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
+int64_t bfloat16_to_int64_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
+
+int8_t bfloat16_to_int8(bfloat16, float_status *status);
+int16_t bfloat16_to_int16(bfloat16, float_status *status);
+int32_t bfloat16_to_int32(bfloat16, float_status *status);
+int64_t bfloat16_to_int64(bfloat16, float_status *status);
+
+int8_t bfloat16_to_int8_round_to_zero(bfloat16, float_status *status);
+int16_t bfloat16_to_int16_round_to_zero(bfloat16, float_status *status);
+int32_t bfloat16_to_int32_round_to_zero(bfloat16, float_status *status);
+int64_t bfloat16_to_int64_round_to_zero(bfloat16, float_status *status);
+
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
+uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
+uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
+uint64_t bfloat16_to_uint64_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
+
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *status);
+uint16_t bfloat16_to_uint16(bfloat16 a, float_status *status);
+uint32_t bfloat16_to_uint32(bfloat16 a, float_status *status);
+uint64_t bfloat16_to_uint64(bfloat16 a, float_status *status);
+
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *status);
+uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *status);
+uint32_t bfloat16_to_uint32_round_to_zero(bfloat16 a, float_status *status);
+uint64_t bfloat16_to_uint64_round_to_zero(bfloat16 a, float_status *status);
+
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int, float_status *status);
+bfloat16 int16_to_bfloat16_scalbn(int16_t a, int, float_status *status);
+bfloat16 int32_to_bfloat16_scalbn(int32_t a, int, float_status *status);
+bfloat16 int64_to_bfloat16_scalbn(int64_t a, int, float_status *status);
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int, float_status *status);
+bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int, float_status *status);
+bfloat16 uint32_to_bfloat16_scalbn(uint32_t a, int, float_status *status);
+bfloat16 uint64_to_bfloat16_scalbn(uint64_t a, int, float_status *status);
+
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status);
+bfloat16 int16_to_bfloat16(int16_t a, float_status *status);
+bfloat16 int32_to_bfloat16(int32_t a, float_status *status);
+bfloat16 int64_to_bfloat16(int64_t a, float_status *status);
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status);
+bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status);
+bfloat16 uint32_to_bfloat16(uint32_t a, float_status *status);
+bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status);
+
+/*----------------------------------------------------------------------------
+| Software bfloat16 operations.
+*----------------------------------------------------------------------------*/
+
+bfloat16 bfloat16_add(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_sub(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_mul(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_div(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_muladd(bfloat16, bfloat16, bfloat16, int,
+ float_status *status);
+float16 bfloat16_scalbn(bfloat16, int, float_status *status);
+bfloat16 bfloat16_min(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_max(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_minnum(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_maxnum(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_minnummag(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_maxnummag(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_minimum_number(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_maximum_number(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_sqrt(bfloat16, float_status *status);
+FloatRelation bfloat16_compare(bfloat16, bfloat16, float_status *status);
+FloatRelation bfloat16_compare_quiet(bfloat16, bfloat16, float_status *status);
+
+bool bfloat16_is_quiet_nan(bfloat16, float_status *status);
+bool bfloat16_is_signaling_nan(bfloat16, float_status *status);
+bfloat16 bfloat16_silence_nan(bfloat16, float_status *status);
+bfloat16 bfloat16_default_nan(float_status *status);
+
+static inline bool bfloat16_is_any_nan(bfloat16 a)
+{
+ return ((a & ~0x8000) > 0x7F80);
+}
+
+static inline bool bfloat16_is_neg(bfloat16 a)
+{
+ return a >> 15;
+}
+
+static inline bool bfloat16_is_infinity(bfloat16 a)
+{
+ return (a & 0x7fff) == 0x7F80;
+}
+
+static inline bool bfloat16_is_zero(bfloat16 a)
+{
+ return (a & 0x7fff) == 0;
+}
+
+static inline bool bfloat16_is_zero_or_denormal(bfloat16 a)
+{
+ return (a & 0x7F80) == 0;
+}
+
+static inline bool bfloat16_is_normal(bfloat16 a)
+{
+ return (((a >> 7) + 1) & 0xff) >= 2;
+}
+
+static inline bfloat16 bfloat16_abs(bfloat16 a)
+{
+ /* Note that abs does *not* handle NaN specially, nor does
+ * it flush denormal inputs to zero.
+ */
+ return a & 0x7fff;
+}
+
+static inline bfloat16 bfloat16_chs(bfloat16 a)
+{
+ /* Note that chs does *not* handle NaN specially, nor does
+ * it flush denormal inputs to zero.
+ */
+ return a ^ 0x8000;
+}
+
+static inline bfloat16 bfloat16_set_sign(bfloat16 a, int sign)
+{
+ return (a & 0x7fff) | (sign << 15);
+}
+
+static inline bool bfloat16_eq(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare(a, b, s) == float_relation_equal;
+}
+
+static inline bool bfloat16_le(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare(a, b, s) <= float_relation_equal;
+}
+
+static inline bool bfloat16_lt(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare(a, b, s) < float_relation_equal;
+}
+
+static inline bool bfloat16_unordered(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare(a, b, s) == float_relation_unordered;
+}
+
+static inline bool bfloat16_eq_quiet(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare_quiet(a, b, s) == float_relation_equal;
+}
+
+static inline bool bfloat16_le_quiet(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare_quiet(a, b, s) <= float_relation_equal;
+}
+
+static inline bool bfloat16_lt_quiet(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare_quiet(a, b, s) < float_relation_equal;
+}
+
+static inline bool bfloat16_unordered_quiet(bfloat16 a, bfloat16 b,
+ float_status *s)
+{
+ return bfloat16_compare_quiet(a, b, s) == float_relation_unordered;
+}
+
+#define bfloat16_zero 0
+#define bfloat16_half 0x3f00
+#define bfloat16_one 0x3f80
+#define bfloat16_one_point_five 0x3fc0
+#define bfloat16_two 0x4000
+#define bfloat16_three 0x4040
+#define bfloat16_infinity 0x7f80
+
+/*----------------------------------------------------------------------------
| The pattern for a default generated half-precision NaN.
*----------------------------------------------------------------------------*/
float16 float16_default_nan(float_status *status);
@@ -361,9 +559,9 @@ float16 float16_default_nan(float_status *status);
| Software IEC/IEEE single-precision conversion routines.
*----------------------------------------------------------------------------*/
-int16_t float32_to_int16_scalbn(float32, int, int, float_status *status);
-int32_t float32_to_int32_scalbn(float32, int, int, float_status *status);
-int64_t float32_to_int64_scalbn(float32, int, int, float_status *status);
+int16_t float32_to_int16_scalbn(float32, FloatRoundMode, int, float_status *);
+int32_t float32_to_int32_scalbn(float32, FloatRoundMode, int, float_status *);
+int64_t float32_to_int64_scalbn(float32, FloatRoundMode, int, float_status *);
int16_t float32_to_int16(float32, float_status *status);
int32_t float32_to_int32(float32, float_status *status);
@@ -373,9 +571,9 @@ int16_t float32_to_int16_round_to_zero(float32, float_status *status);
int32_t float32_to_int32_round_to_zero(float32, float_status *status);
int64_t float32_to_int64_round_to_zero(float32, float_status *status);
-uint16_t float32_to_uint16_scalbn(float32, int, int, float_status *status);
-uint32_t float32_to_uint32_scalbn(float32, int, int, float_status *status);
-uint64_t float32_to_uint64_scalbn(float32, int, int, float_status *status);
+uint16_t float32_to_uint16_scalbn(float32, FloatRoundMode, int, float_status *);
+uint32_t float32_to_uint32_scalbn(float32, FloatRoundMode, int, float_status *);
+uint64_t float32_to_uint64_scalbn(float32, FloatRoundMode, int, float_status *);
uint16_t float32_to_uint16(float32, float_status *status);
uint32_t float32_to_uint32(float32, float_status *status);
@@ -402,24 +600,18 @@ float32 float32_muladd(float32, float32, float32, int, float_status *status);
float32 float32_sqrt(float32, float_status *status);
float32 float32_exp2(float32, float_status *status);
float32 float32_log2(float32, float_status *status);
-int float32_eq(float32, float32, float_status *status);
-int float32_le(float32, float32, float_status *status);
-int float32_lt(float32, float32, float_status *status);
-int float32_unordered(float32, float32, float_status *status);
-int float32_eq_quiet(float32, float32, float_status *status);
-int float32_le_quiet(float32, float32, float_status *status);
-int float32_lt_quiet(float32, float32, float_status *status);
-int float32_unordered_quiet(float32, float32, float_status *status);
-int float32_compare(float32, float32, float_status *status);
-int float32_compare_quiet(float32, float32, float_status *status);
+FloatRelation float32_compare(float32, float32, float_status *status);
+FloatRelation float32_compare_quiet(float32, float32, float_status *status);
float32 float32_min(float32, float32, float_status *status);
float32 float32_max(float32, float32, float_status *status);
float32 float32_minnum(float32, float32, float_status *status);
float32 float32_maxnum(float32, float32, float_status *status);
float32 float32_minnummag(float32, float32, float_status *status);
float32 float32_maxnummag(float32, float32, float_status *status);
-int float32_is_quiet_nan(float32, float_status *status);
-int float32_is_signaling_nan(float32, float_status *status);
+float32 float32_minimum_number(float32, float32, float_status *status);
+float32 float32_maximum_number(float32, float32, float_status *status);
+bool float32_is_quiet_nan(float32, float_status *status);
+bool float32_is_signaling_nan(float32, float_status *status);
float32 float32_silence_nan(float32, float_status *status);
float32 float32_scalbn(float32, int, float_status *status);
@@ -439,34 +631,34 @@ static inline float32 float32_chs(float32 a)
return make_float32(float32_val(a) ^ 0x80000000);
}
-static inline int float32_is_infinity(float32 a)
+static inline bool float32_is_infinity(float32 a)
{
return (float32_val(a) & 0x7fffffff) == 0x7f800000;
}
-static inline int float32_is_neg(float32 a)
+static inline bool float32_is_neg(float32 a)
{
return float32_val(a) >> 31;
}
-static inline int float32_is_zero(float32 a)
+static inline bool float32_is_zero(float32 a)
{
return (float32_val(a) & 0x7fffffff) == 0;
}
-static inline int float32_is_any_nan(float32 a)
+static inline bool float32_is_any_nan(float32 a)
{
return ((float32_val(a) & ~(1 << 31)) > 0x7f800000UL);
}
-static inline int float32_is_zero_or_denormal(float32 a)
+static inline bool float32_is_zero_or_denormal(float32 a)
{
return (float32_val(a) & 0x7f800000) == 0;
}
static inline bool float32_is_normal(float32 a)
{
- return ((float32_val(a) + 0x00800000) & 0x7fffffff) >= 0x01000000;
+ return (((float32_val(a) >> 23) + 1) & 0xff) >= 2;
}
static inline bool float32_is_denormal(float32 a)
@@ -484,6 +676,47 @@ static inline float32 float32_set_sign(float32 a, int sign)
return make_float32((float32_val(a) & 0x7fffffff) | (sign << 31));
}
+static inline bool float32_eq(float32 a, float32 b, float_status *s)
+{
+ return float32_compare(a, b, s) == float_relation_equal;
+}
+
+static inline bool float32_le(float32 a, float32 b, float_status *s)
+{
+ return float32_compare(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float32_lt(float32 a, float32 b, float_status *s)
+{
+ return float32_compare(a, b, s) < float_relation_equal;
+}
+
+static inline bool float32_unordered(float32 a, float32 b, float_status *s)
+{
+ return float32_compare(a, b, s) == float_relation_unordered;
+}
+
+static inline bool float32_eq_quiet(float32 a, float32 b, float_status *s)
+{
+ return float32_compare_quiet(a, b, s) == float_relation_equal;
+}
+
+static inline bool float32_le_quiet(float32 a, float32 b, float_status *s)
+{
+ return float32_compare_quiet(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float32_lt_quiet(float32 a, float32 b, float_status *s)
+{
+ return float32_compare_quiet(a, b, s) < float_relation_equal;
+}
+
+static inline bool float32_unordered_quiet(float32 a, float32 b,
+ float_status *s)
+{
+ return float32_compare_quiet(a, b, s) == float_relation_unordered;
+}
+
#define float32_zero make_float32(0)
#define float32_half make_float32(0x3f000000)
#define float32_one make_float32(0x3f800000)
@@ -503,7 +736,7 @@ static inline float32 float32_set_sign(float32 a, int sign)
| significand.
*----------------------------------------------------------------------------*/
-static inline float32 packFloat32(flag zSign, int zExp, uint32_t zSig)
+static inline float32 packFloat32(bool zSign, int zExp, uint32_t zSig)
{
return make_float32(
(((uint32_t)zSign) << 31) + (((uint32_t)zExp) << 23) + zSig);
@@ -518,9 +751,9 @@ float32 float32_default_nan(float_status *status);
| Software IEC/IEEE double-precision conversion routines.
*----------------------------------------------------------------------------*/
-int16_t float64_to_int16_scalbn(float64, int, int, float_status *status);
-int32_t float64_to_int32_scalbn(float64, int, int, float_status *status);
-int64_t float64_to_int64_scalbn(float64, int, int, float_status *status);
+int16_t float64_to_int16_scalbn(float64, FloatRoundMode, int, float_status *);
+int32_t float64_to_int32_scalbn(float64, FloatRoundMode, int, float_status *);
+int64_t float64_to_int64_scalbn(float64, FloatRoundMode, int, float_status *);
int16_t float64_to_int16(float64, float_status *status);
int32_t float64_to_int32(float64, float_status *status);
@@ -530,9 +763,12 @@ int16_t float64_to_int16_round_to_zero(float64, float_status *status);
int32_t float64_to_int32_round_to_zero(float64, float_status *status);
int64_t float64_to_int64_round_to_zero(float64, float_status *status);
-uint16_t float64_to_uint16_scalbn(float64, int, int, float_status *status);
-uint32_t float64_to_uint32_scalbn(float64, int, int, float_status *status);
-uint64_t float64_to_uint64_scalbn(float64, int, int, float_status *status);
+int32_t float64_to_int32_modulo(float64, FloatRoundMode, float_status *status);
+int64_t float64_to_int64_modulo(float64, FloatRoundMode, float_status *status);
+
+uint16_t float64_to_uint16_scalbn(float64, FloatRoundMode, int, float_status *);
+uint32_t float64_to_uint32_scalbn(float64, FloatRoundMode, int, float_status *);
+uint64_t float64_to_uint64_scalbn(float64, FloatRoundMode, int, float_status *);
uint16_t float64_to_uint16(float64, float_status *status);
uint32_t float64_to_uint32(float64, float_status *status);
@@ -558,24 +794,18 @@ float64 float64_rem(float64, float64, float_status *status);
float64 float64_muladd(float64, float64, float64, int, float_status *status);
float64 float64_sqrt(float64, float_status *status);
float64 float64_log2(float64, float_status *status);
-int float64_eq(float64, float64, float_status *status);
-int float64_le(float64, float64, float_status *status);
-int float64_lt(float64, float64, float_status *status);
-int float64_unordered(float64, float64, float_status *status);
-int float64_eq_quiet(float64, float64, float_status *status);
-int float64_le_quiet(float64, float64, float_status *status);
-int float64_lt_quiet(float64, float64, float_status *status);
-int float64_unordered_quiet(float64, float64, float_status *status);
-int float64_compare(float64, float64, float_status *status);
-int float64_compare_quiet(float64, float64, float_status *status);
+FloatRelation float64_compare(float64, float64, float_status *status);
+FloatRelation float64_compare_quiet(float64, float64, float_status *status);
float64 float64_min(float64, float64, float_status *status);
float64 float64_max(float64, float64, float_status *status);
float64 float64_minnum(float64, float64, float_status *status);
float64 float64_maxnum(float64, float64, float_status *status);
float64 float64_minnummag(float64, float64, float_status *status);
float64 float64_maxnummag(float64, float64, float_status *status);
-int float64_is_quiet_nan(float64 a, float_status *status);
-int float64_is_signaling_nan(float64, float_status *status);
+float64 float64_minimum_number(float64, float64, float_status *status);
+float64 float64_maximum_number(float64, float64, float_status *status);
+bool float64_is_quiet_nan(float64 a, float_status *status);
+bool float64_is_signaling_nan(float64, float_status *status);
float64 float64_silence_nan(float64, float_status *status);
float64 float64_scalbn(float64, int, float_status *status);
@@ -595,34 +825,34 @@ static inline float64 float64_chs(float64 a)
return make_float64(float64_val(a) ^ 0x8000000000000000LL);
}
-static inline int float64_is_infinity(float64 a)
+static inline bool float64_is_infinity(float64 a)
{
return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL;
}
-static inline int float64_is_neg(float64 a)
+static inline bool float64_is_neg(float64 a)
{
return float64_val(a) >> 63;
}
-static inline int float64_is_zero(float64 a)
+static inline bool float64_is_zero(float64 a)
{
return (float64_val(a) & 0x7fffffffffffffffLL) == 0;
}
-static inline int float64_is_any_nan(float64 a)
+static inline bool float64_is_any_nan(float64 a)
{
return ((float64_val(a) & ~(1ULL << 63)) > 0x7ff0000000000000ULL);
}
-static inline int float64_is_zero_or_denormal(float64 a)
+static inline bool float64_is_zero_or_denormal(float64 a)
{
return (float64_val(a) & 0x7ff0000000000000LL) == 0;
}
static inline bool float64_is_normal(float64 a)
{
- return ((float64_val(a) + (1ULL << 52)) & -1ULL >> 1) >= 1ULL << 53;
+ return (((float64_val(a) >> 52) + 1) & 0x7ff) >= 2;
}
static inline bool float64_is_denormal(float64 a)
@@ -641,6 +871,47 @@ static inline float64 float64_set_sign(float64 a, int sign)
| ((int64_t)sign << 63));
}
+static inline bool float64_eq(float64 a, float64 b, float_status *s)
+{
+ return float64_compare(a, b, s) == float_relation_equal;
+}
+
+static inline bool float64_le(float64 a, float64 b, float_status *s)
+{
+ return float64_compare(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float64_lt(float64 a, float64 b, float_status *s)
+{
+ return float64_compare(a, b, s) < float_relation_equal;
+}
+
+static inline bool float64_unordered(float64 a, float64 b, float_status *s)
+{
+ return float64_compare(a, b, s) == float_relation_unordered;
+}
+
+static inline bool float64_eq_quiet(float64 a, float64 b, float_status *s)
+{
+ return float64_compare_quiet(a, b, s) == float_relation_equal;
+}
+
+static inline bool float64_le_quiet(float64 a, float64 b, float_status *s)
+{
+ return float64_compare_quiet(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float64_lt_quiet(float64 a, float64 b, float_status *s)
+{
+ return float64_compare_quiet(a, b, s) < float_relation_equal;
+}
+
+static inline bool float64_unordered_quiet(float64 a, float64 b,
+ float_status *s)
+{
+ return float64_compare_quiet(a, b, s) == float_relation_unordered;
+}
+
#define float64_zero make_float64(0)
#define float64_half make_float64(0x3fe0000000000000LL)
#define float64_one make_float64(0x3ff0000000000000LL)
@@ -656,6 +927,18 @@ static inline float64 float64_set_sign(float64 a, int sign)
float64 float64_default_nan(float_status *status);
/*----------------------------------------------------------------------------
+| Software IEC/IEEE double-precision operations, rounding to single precision,
+| returning a result in double precision, with only one rounding step.
+*----------------------------------------------------------------------------*/
+
+float64 float64r32_add(float64, float64, float_status *status);
+float64 float64r32_sub(float64, float64, float_status *status);
+float64 float64r32_mul(float64, float64, float_status *status);
+float64 float64r32_div(float64, float64, float_status *status);
+float64 float64r32_muladd(float64, float64, float64, int, float_status *status);
+float64 float64r32_sqrt(float64, float_status *status);
+
+/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision conversion routines.
*----------------------------------------------------------------------------*/
int32_t floatx80_to_int32(floatx80, float_status *status);
@@ -680,18 +963,13 @@ floatx80 floatx80_add(floatx80, floatx80, float_status *status);
floatx80 floatx80_sub(floatx80, floatx80, float_status *status);
floatx80 floatx80_mul(floatx80, floatx80, float_status *status);
floatx80 floatx80_div(floatx80, floatx80, float_status *status);
+floatx80 floatx80_modrem(floatx80, floatx80, bool, uint64_t *,
+ float_status *status);
+floatx80 floatx80_mod(floatx80, floatx80, float_status *status);
floatx80 floatx80_rem(floatx80, floatx80, float_status *status);
floatx80 floatx80_sqrt(floatx80, float_status *status);
-int floatx80_eq(floatx80, floatx80, float_status *status);
-int floatx80_le(floatx80, floatx80, float_status *status);
-int floatx80_lt(floatx80, floatx80, float_status *status);
-int floatx80_unordered(floatx80, floatx80, float_status *status);
-int floatx80_eq_quiet(floatx80, floatx80, float_status *status);
-int floatx80_le_quiet(floatx80, floatx80, float_status *status);
-int floatx80_lt_quiet(floatx80, floatx80, float_status *status);
-int floatx80_unordered_quiet(floatx80, floatx80, float_status *status);
-int floatx80_compare(floatx80, floatx80, float_status *status);
-int floatx80_compare_quiet(floatx80, floatx80, float_status *status);
+FloatRelation floatx80_compare(floatx80, floatx80, float_status *status);
+FloatRelation floatx80_compare_quiet(floatx80, floatx80, float_status *status);
int floatx80_is_quiet_nan(floatx80, float_status *status);
int floatx80_is_signaling_nan(floatx80, float_status *status);
floatx80 floatx80_silence_nan(floatx80, float_status *status);
@@ -709,7 +987,7 @@ static inline floatx80 floatx80_chs(floatx80 a)
return a;
}
-static inline int floatx80_is_infinity(floatx80 a)
+static inline bool floatx80_is_infinity(floatx80 a)
{
#if defined(TARGET_M68K)
return (a.high & 0x7fff) == floatx80_infinity.high && !(a.low << 1);
@@ -719,26 +997,67 @@ static inline int floatx80_is_infinity(floatx80 a)
#endif
}
-static inline int floatx80_is_neg(floatx80 a)
+static inline bool floatx80_is_neg(floatx80 a)
{
return a.high >> 15;
}
-static inline int floatx80_is_zero(floatx80 a)
+static inline bool floatx80_is_zero(floatx80 a)
{
return (a.high & 0x7fff) == 0 && a.low == 0;
}
-static inline int floatx80_is_zero_or_denormal(floatx80 a)
+static inline bool floatx80_is_zero_or_denormal(floatx80 a)
{
return (a.high & 0x7fff) == 0;
}
-static inline int floatx80_is_any_nan(floatx80 a)
+static inline bool floatx80_is_any_nan(floatx80 a)
{
return ((a.high & 0x7fff) == 0x7fff) && (a.low<<1);
}
+static inline bool floatx80_eq(floatx80 a, floatx80 b, float_status *s)
+{
+ return floatx80_compare(a, b, s) == float_relation_equal;
+}
+
+static inline bool floatx80_le(floatx80 a, floatx80 b, float_status *s)
+{
+ return floatx80_compare(a, b, s) <= float_relation_equal;
+}
+
+static inline bool floatx80_lt(floatx80 a, floatx80 b, float_status *s)
+{
+ return floatx80_compare(a, b, s) < float_relation_equal;
+}
+
+static inline bool floatx80_unordered(floatx80 a, floatx80 b, float_status *s)
+{
+ return floatx80_compare(a, b, s) == float_relation_unordered;
+}
+
+static inline bool floatx80_eq_quiet(floatx80 a, floatx80 b, float_status *s)
+{
+ return floatx80_compare_quiet(a, b, s) == float_relation_equal;
+}
+
+static inline bool floatx80_le_quiet(floatx80 a, floatx80 b, float_status *s)
+{
+ return floatx80_compare_quiet(a, b, s) <= float_relation_equal;
+}
+
+static inline bool floatx80_lt_quiet(floatx80 a, floatx80 b, float_status *s)
+{
+ return floatx80_compare_quiet(a, b, s) < float_relation_equal;
+}
+
+static inline bool floatx80_unordered_quiet(floatx80 a, floatx80 b,
+ float_status *s)
+{
+ return floatx80_compare_quiet(a, b, s) == float_relation_unordered;
+}
+
/*----------------------------------------------------------------------------
| Return whether the given value is an invalid floatx80 encoding.
| Invalid floatx80 encodings arise when the integer bit is not set, but
@@ -751,10 +1070,35 @@ static inline int floatx80_is_any_nan(floatx80 a)
*----------------------------------------------------------------------------*/
static inline bool floatx80_invalid_encoding(floatx80 a)
{
+#if defined(TARGET_M68K)
+ /*-------------------------------------------------------------------------
+ | With m68k, the explicit integer bit can be zero in the case of:
+ | - zeros (exp == 0, mantissa == 0)
+ | - denormalized numbers (exp == 0, mantissa != 0)
+ | - unnormalized numbers (exp != 0, exp < 0x7FFF)
+ | - infinities (exp == 0x7FFF, mantissa == 0)
+ | - not-a-numbers (exp == 0x7FFF, mantissa != 0)
+ |
+ | For infinities and NaNs, the explicit integer bit can be either one or
+ | zero.
+ |
+ | The IEEE 754 standard does not define a zero integer bit. Such a number
+ | is an unnormalized number. Hardware does not directly support
+ | denormalized and unnormalized numbers, but implicitly supports them by
+ | trapping them as unimplemented data types, allowing efficient conversion
+ | in software.
+ |
+ | See "M68000 FAMILY PROGRAMMER’S REFERENCE MANUAL",
+ | "1.6 FLOATING-POINT DATA TYPES"
+ *------------------------------------------------------------------------*/
+ return false;
+#else
return (a.low & (1ULL << 63)) == 0 && (a.high & 0x7FFF) != 0;
+#endif
}
#define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL)
+#define floatx80_zero_init make_floatx80_init(0x0000, 0x0000000000000000LL)
#define floatx80_one make_floatx80(0x3fff, 0x8000000000000000LL)
#define floatx80_ln2 make_floatx80(0x3ffe, 0xb17217f7d1cf79acLL)
#define floatx80_pi make_floatx80(0x4000, 0xc90fdaa22168c235LL)
@@ -785,7 +1129,7 @@ static inline int32_t extractFloatx80Exp(floatx80 a)
| `a'.
*----------------------------------------------------------------------------*/
-static inline flag extractFloatx80Sign(floatx80 a)
+static inline bool extractFloatx80Sign(floatx80 a)
{
return a.high >> 15;
}
@@ -795,7 +1139,7 @@ static inline flag extractFloatx80Sign(floatx80 a)
| extended double-precision floating-point value, returning the result.
*----------------------------------------------------------------------------*/
-static inline floatx80 packFloatx80(flag zSign, int32_t zExp, uint64_t zSig)
+static inline floatx80 packFloatx80(bool zSign, int32_t zExp, uint64_t zSig)
{
floatx80 z;
@@ -846,7 +1190,7 @@ floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status);
| Floating-Point Arithmetic.
*----------------------------------------------------------------------------*/
-floatx80 roundAndPackFloatx80(int8_t roundingPrecision, flag zSign,
+floatx80 roundAndPackFloatx80(FloatX80RoundPrec roundingPrecision, bool zSign,
int32_t zExp, uint64_t zSig0, uint64_t zSig1,
float_status *status);
@@ -859,8 +1203,8 @@ floatx80 roundAndPackFloatx80(int8_t roundingPrecision, flag zSign,
| normalized.
*----------------------------------------------------------------------------*/
-floatx80 normalizeRoundAndPackFloatx80(int8_t roundingPrecision,
- flag zSign, int32_t zExp,
+floatx80 normalizeRoundAndPackFloatx80(FloatX80RoundPrec roundingPrecision,
+ bool zSign, int32_t zExp,
uint64_t zSig0, uint64_t zSig1,
float_status *status);
@@ -875,9 +1219,14 @@ floatx80 floatx80_default_nan(float_status *status);
int32_t float128_to_int32(float128, float_status *status);
int32_t float128_to_int32_round_to_zero(float128, float_status *status);
int64_t float128_to_int64(float128, float_status *status);
+Int128 float128_to_int128(float128, float_status *status);
int64_t float128_to_int64_round_to_zero(float128, float_status *status);
+Int128 float128_to_int128_round_to_zero(float128, float_status *status);
uint64_t float128_to_uint64(float128, float_status *status);
+Int128 float128_to_uint128(float128, float_status *status);
uint64_t float128_to_uint64_round_to_zero(float128, float_status *status);
+Int128 float128_to_uint128_round_to_zero(float128, float_status *status);
+uint32_t float128_to_uint32(float128, float_status *status);
uint32_t float128_to_uint32_round_to_zero(float128, float_status *status);
float32 float128_to_float32(float128, float_status *status);
float64 float128_to_float64(float128, float_status *status);
@@ -890,21 +1239,23 @@ float128 float128_round_to_int(float128, float_status *status);
float128 float128_add(float128, float128, float_status *status);
float128 float128_sub(float128, float128, float_status *status);
float128 float128_mul(float128, float128, float_status *status);
+float128 float128_muladd(float128, float128, float128, int,
+ float_status *status);
float128 float128_div(float128, float128, float_status *status);
float128 float128_rem(float128, float128, float_status *status);
float128 float128_sqrt(float128, float_status *status);
-int float128_eq(float128, float128, float_status *status);
-int float128_le(float128, float128, float_status *status);
-int float128_lt(float128, float128, float_status *status);
-int float128_unordered(float128, float128, float_status *status);
-int float128_eq_quiet(float128, float128, float_status *status);
-int float128_le_quiet(float128, float128, float_status *status);
-int float128_lt_quiet(float128, float128, float_status *status);
-int float128_unordered_quiet(float128, float128, float_status *status);
-int float128_compare(float128, float128, float_status *status);
-int float128_compare_quiet(float128, float128, float_status *status);
-int float128_is_quiet_nan(float128, float_status *status);
-int float128_is_signaling_nan(float128, float_status *status);
+FloatRelation float128_compare(float128, float128, float_status *status);
+FloatRelation float128_compare_quiet(float128, float128, float_status *status);
+float128 float128_min(float128, float128, float_status *status);
+float128 float128_max(float128, float128, float_status *status);
+float128 float128_minnum(float128, float128, float_status *status);
+float128 float128_maxnum(float128, float128, float_status *status);
+float128 float128_minnummag(float128, float128, float_status *status);
+float128 float128_maxnummag(float128, float128, float_status *status);
+float128 float128_minimum_number(float128, float128, float_status *status);
+float128 float128_maximum_number(float128, float128, float_status *status);
+bool float128_is_quiet_nan(float128, float_status *status);
+bool float128_is_signaling_nan(float128, float_status *status);
float128 float128_silence_nan(float128, float_status *status);
float128 float128_scalbn(float128, int, float_status *status);
@@ -920,32 +1271,83 @@ static inline float128 float128_chs(float128 a)
return a;
}
-static inline int float128_is_infinity(float128 a)
+static inline bool float128_is_infinity(float128 a)
{
return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0;
}
-static inline int float128_is_neg(float128 a)
+static inline bool float128_is_neg(float128 a)
{
return a.high >> 63;
}
-static inline int float128_is_zero(float128 a)
+static inline bool float128_is_zero(float128 a)
{
return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0;
}
-static inline int float128_is_zero_or_denormal(float128 a)
+static inline bool float128_is_zero_or_denormal(float128 a)
{
return (a.high & 0x7fff000000000000LL) == 0;
}
-static inline int float128_is_any_nan(float128 a)
+static inline bool float128_is_normal(float128 a)
+{
+ return (((a.high >> 48) + 1) & 0x7fff) >= 2;
+}
+
+static inline bool float128_is_denormal(float128 a)
+{
+ return float128_is_zero_or_denormal(a) && !float128_is_zero(a);
+}
+
+static inline bool float128_is_any_nan(float128 a)
{
return ((a.high >> 48) & 0x7fff) == 0x7fff &&
((a.low != 0) || ((a.high & 0xffffffffffffLL) != 0));
}
+static inline bool float128_eq(float128 a, float128 b, float_status *s)
+{
+ return float128_compare(a, b, s) == float_relation_equal;
+}
+
+static inline bool float128_le(float128 a, float128 b, float_status *s)
+{
+ return float128_compare(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float128_lt(float128 a, float128 b, float_status *s)
+{
+ return float128_compare(a, b, s) < float_relation_equal;
+}
+
+static inline bool float128_unordered(float128 a, float128 b, float_status *s)
+{
+ return float128_compare(a, b, s) == float_relation_unordered;
+}
+
+static inline bool float128_eq_quiet(float128 a, float128 b, float_status *s)
+{
+ return float128_compare_quiet(a, b, s) == float_relation_equal;
+}
+
+static inline bool float128_le_quiet(float128 a, float128 b, float_status *s)
+{
+ return float128_compare_quiet(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float128_lt_quiet(float128 a, float128 b, float_status *s)
+{
+ return float128_compare_quiet(a, b, s) < float_relation_equal;
+}
+
+static inline bool float128_unordered_quiet(float128 a, float128 b,
+ float_status *s)
+{
+ return float128_compare_quiet(a, b, s) == float_relation_unordered;
+}
+
#define float128_zero make_float128(0, 0)
/*----------------------------------------------------------------------------
diff --git a/include/gdbstub/helpers.h b/include/gdbstub/helpers.h
new file mode 100644
index 0000000000..c573aef2dc
--- /dev/null
+++ b/include/gdbstub/helpers.h
@@ -0,0 +1,103 @@
+/*
+ * gdbstub helpers
+ *
+ * These are all used by the various frontends and have to be host
+ * aware to ensure things are store in target order.
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _GDBSTUB_HELPERS_H_
+#define _GDBSTUB_HELPERS_H_
+
+#ifdef NEED_CPU_H
+#include "cpu.h"
+
+/*
+ * The GDB remote protocol transfers values in target byte order. As
+ * the gdbstub may be batching up several register values we always
+ * append to the array.
+ */
+
+static inline int gdb_get_reg8(GByteArray *buf, uint8_t val)
+{
+ g_byte_array_append(buf, &val, 1);
+ return 1;
+}
+
+static inline int gdb_get_reg16(GByteArray *buf, uint16_t val)
+{
+ uint16_t to_word = tswap16(val);
+ g_byte_array_append(buf, (uint8_t *) &to_word, 2);
+ return 2;
+}
+
+static inline int gdb_get_reg32(GByteArray *buf, uint32_t val)
+{
+ uint32_t to_long = tswap32(val);
+ g_byte_array_append(buf, (uint8_t *) &to_long, 4);
+ return 4;
+}
+
+static inline int gdb_get_reg64(GByteArray *buf, uint64_t val)
+{
+ uint64_t to_quad = tswap64(val);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ return 8;
+}
+
+static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi,
+ uint64_t val_lo)
+{
+ uint64_t to_quad;
+#if TARGET_BIG_ENDIAN
+ to_quad = tswap64(val_hi);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ to_quad = tswap64(val_lo);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+#else
+ to_quad = tswap64(val_lo);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ to_quad = tswap64(val_hi);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+#endif
+ return 16;
+}
+
+static inline int gdb_get_zeroes(GByteArray *array, size_t len)
+{
+ guint oldlen = array->len;
+ g_byte_array_set_size(array, oldlen + len);
+ memset(array->data + oldlen, 0, len);
+
+ return len;
+}
+
+/**
+ * gdb_get_reg_ptr: get pointer to start of last element
+ * @len: length of element
+ *
+ * This is a helper function to extract the pointer to the last
+ * element for additional processing. Some front-ends do additional
+ * dynamic swapping of the elements based on CPU state.
+ */
+static inline uint8_t *gdb_get_reg_ptr(GByteArray *buf, int len)
+{
+ return buf->data + buf->len - len;
+}
+
+#if TARGET_LONG_BITS == 64
+#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
+#define ldtul_p(addr) ldq_p(addr)
+#else
+#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
+#define ldtul_p(addr) ldl_p(addr)
+#endif
+
+#else
+#error "gdbstub helpers should only be included by target specific code"
+#endif
+
+#endif /* _GDBSTUB_HELPERS_H_ */
diff --git a/include/gdbstub/syscalls.h b/include/gdbstub/syscalls.h
new file mode 100644
index 0000000000..54ff7245a1
--- /dev/null
+++ b/include/gdbstub/syscalls.h
@@ -0,0 +1,122 @@
+/*
+ * GDB Syscall support
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef _SYSCALLS_H_
+#define _SYSCALLS_H_
+
+/* For gdb file i/o remote protocol open flags. */
+#define GDB_O_RDONLY 0
+#define GDB_O_WRONLY 1
+#define GDB_O_RDWR 2
+#define GDB_O_APPEND 8
+#define GDB_O_CREAT 0x200
+#define GDB_O_TRUNC 0x400
+#define GDB_O_EXCL 0x800
+
+/* For gdb file i/o remote protocol errno values */
+#define GDB_EPERM 1
+#define GDB_ENOENT 2
+#define GDB_EINTR 4
+#define GDB_EBADF 9
+#define GDB_EACCES 13
+#define GDB_EFAULT 14
+#define GDB_EBUSY 16
+#define GDB_EEXIST 17
+#define GDB_ENODEV 19
+#define GDB_ENOTDIR 20
+#define GDB_EISDIR 21
+#define GDB_EINVAL 22
+#define GDB_ENFILE 23
+#define GDB_EMFILE 24
+#define GDB_EFBIG 27
+#define GDB_ENOSPC 28
+#define GDB_ESPIPE 29
+#define GDB_EROFS 30
+#define GDB_ENAMETOOLONG 91
+#define GDB_EUNKNOWN 9999
+
+/* For gdb file i/o remote protocol lseek whence. */
+#define GDB_SEEK_SET 0
+#define GDB_SEEK_CUR 1
+#define GDB_SEEK_END 2
+
+/* For gdb file i/o stat/fstat. */
+typedef uint32_t gdb_mode_t;
+typedef uint32_t gdb_time_t;
+
+struct gdb_stat {
+ uint32_t gdb_st_dev; /* device */
+ uint32_t gdb_st_ino; /* inode */
+ gdb_mode_t gdb_st_mode; /* protection */
+ uint32_t gdb_st_nlink; /* number of hard links */
+ uint32_t gdb_st_uid; /* user ID of owner */
+ uint32_t gdb_st_gid; /* group ID of owner */
+ uint32_t gdb_st_rdev; /* device type (if inode device) */
+ uint64_t gdb_st_size; /* total size, in bytes */
+ uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */
+ uint64_t gdb_st_blocks; /* number of blocks allocated */
+ gdb_time_t gdb_st_atime; /* time of last access */
+ gdb_time_t gdb_st_mtime; /* time of last modification */
+ gdb_time_t gdb_st_ctime; /* time of last change */
+} QEMU_PACKED;
+
+struct gdb_timeval {
+ gdb_time_t tv_sec; /* second */
+ uint64_t tv_usec; /* microsecond */
+} QEMU_PACKED;
+
+typedef void (*gdb_syscall_complete_cb)(CPUState *cpu, uint64_t ret, int err);
+
+/**
+ * gdb_do_syscall:
+ * @cb: function to call when the system call has completed
+ * @fmt: gdb syscall format string
+ * ...: list of arguments to interpolate into @fmt
+ *
+ * Send a GDB syscall request. This function will return immediately;
+ * the callback function will be called later when the remote system
+ * call has completed.
+ *
+ * @fmt should be in the 'call-id,parameter,parameter...' format documented
+ * for the F request packet in the GDB remote protocol. A limited set of
+ * printf-style format specifiers is supported:
+ * %x - target_ulong argument printed in hex
+ * %lx - 64-bit argument printed in hex
+ * %s - string pointer (target_ulong) and length (int) pair
+ */
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+
+/**
+ * use_gdb_syscalls() - report if GDB should be used for syscalls
+ *
+ * This is mostly driven by the semihosting mode the user configures
+ * but assuming GDB is allowed by that we report true if GDB is
+ * connected to the stub.
+ */
+int use_gdb_syscalls(void);
+
+/**
+ * gdb_exit: exit gdb session, reporting inferior status
+ * @code: exit code reported
+ *
+ * This closes the session and sends a final packet to GDB reporting
+ * the exit status of the program. It also cleans up any connections
+ * detritus before returning.
+ */
+void gdb_exit(int code);
+
+/**
+ * gdb_qemu_exit: ask qemu to exit
+ * @code: exit code reported
+ *
+ * This requests qemu to exit. This function is allowed to return as
+ * the exit request might be processed asynchronously by qemu backend.
+ */
+void gdb_qemu_exit(int code);
+
+#endif /* _SYSCALLS_H_ */
diff --git a/include/gdbstub/user.h b/include/gdbstub/user.h
new file mode 100644
index 0000000000..3b8358e3da
--- /dev/null
+++ b/include/gdbstub/user.h
@@ -0,0 +1,67 @@
+/*
+ * gdbstub user-mode only APIs
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef GDBSTUB_USER_H
+#define GDBSTUB_USER_H
+
+#define MAX_SIGINFO_LENGTH 128
+
+/**
+ * gdb_handlesig() - yield control to gdb
+ * @cpu: CPU
+ * @sig: if non-zero, the signal number which caused us to stop
+ * @reason: stop reason for stop reply packet or NULL
+ * @siginfo: target-specific siginfo struct
+ * @siginfo_len: target-specific siginfo struct length
+ *
+ * This function yields control to gdb, when a user-mode-only target
+ * needs to stop execution. If @sig is non-zero, then we will send a
+ * stop packet to tell gdb that we have stopped because of this signal.
+ *
+ * This function will block (handling protocol requests from gdb)
+ * until gdb tells us to continue target execution. When it does
+ * return, the return value is a signal to deliver to the target,
+ * or 0 if no signal should be delivered, ie the signal that caused
+ * us to stop should be ignored.
+ */
+int gdb_handlesig(CPUState *, int, const char *, void *, int);
+
+/**
+ * gdb_signalled() - inform remote gdb of sig exit
+ * @as: current CPUArchState
+ * @sig: signal number
+ */
+void gdb_signalled(CPUArchState *as, int sig);
+
+/**
+ * gdbserver_fork_start() - inform gdb of the upcoming fork()
+ */
+void gdbserver_fork_start(void);
+
+/**
+ * gdbserver_fork_end() - inform gdb of the completed fork()
+ * @cs: CPU
+ * @pid: 0 if in child process, -1 if fork failed, child process pid otherwise
+ */
+void gdbserver_fork_end(CPUState *cs, pid_t pid);
+
+/**
+ * gdb_syscall_entry() - inform gdb of syscall entry and yield control to it
+ * @cs: CPU
+ * @num: syscall number
+ */
+void gdb_syscall_entry(CPUState *cs, int num);
+
+/**
+ * gdb_syscall_entry() - inform gdb of syscall return and yield control to it
+ * @cs: CPU
+ * @num: syscall number
+ */
+void gdb_syscall_return(CPUState *cs, int num);
+
+#endif /* GDBSTUB_USER_H */
diff --git a/include/glib-compat.h b/include/glib-compat.h
index 8a078c5288..43a562974d 100644
--- a/include/glib-compat.h
+++ b/include/glib-compat.h
@@ -19,17 +19,22 @@
/* Ask for warnings for anything that was marked deprecated in
* the defined version, or before. It is a candidate for rewrite.
*/
-#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_40
+#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_56
/* Ask for warnings if code tries to use function that did not
* exist in the defined version. These risk breaking builds
*/
-#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_40
+#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_56
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#include <glib.h>
+#if defined(G_OS_UNIX)
+#include <glib-unix.h>
+#include <sys/types.h>
+#include <pwd.h>
+#endif
/*
* Note that because of the GLIB_VERSION_MAX_ALLOWED constant above, allowing
@@ -41,9 +46,9 @@
* int g_foo(const char *wibble)
*
* We must define a static inline function with the same signature that does
- * what we need, but with a "_qemu" suffix e.g.
+ * what we need, but with a "_compat" suffix e.g.
*
- * static inline void g_foo_qemu(const char *wibble)
+ * static inline void g_foo_compat(const char *wibble)
* {
* #if GLIB_CHECK_VERSION(X, Y, 0)
* g_foo(wibble)
@@ -56,117 +61,94 @@
* ensuring this wrapper function impl doesn't trigger the compiler warning
* about using too new glib APIs. Finally we can do
*
- * #define g_foo(a) g_foo_qemu(a)
+ * #define g_foo(a) g_foo_compat(a)
*
* So now the code elsewhere in QEMU, which *does* have the
* -Wdeprecated-declarations warning active, can call g_foo(...) as normal,
* without generating warnings.
*/
-static inline gboolean g_strv_contains_qemu(const gchar *const *strv,
- const gchar *str)
+/*
+ * g_memdup2_qemu:
+ * @mem: (nullable): the memory to copy.
+ * @byte_size: the number of bytes to copy.
+ *
+ * Allocates @byte_size bytes of memory, and copies @byte_size bytes into it
+ * from @mem. If @mem is %NULL it returns %NULL.
+ *
+ * This replaces g_memdup(), which was prone to integer overflows when
+ * converting the argument from a #gsize to a #guint.
+ *
+ * This static inline version is a backport of the new public API from
+ * GLib 2.68, kept internal to GLib for backport to older stable releases.
+ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2319.
+ *
+ * Returns: (nullable): a pointer to the newly-allocated copy of the memory,
+ * or %NULL if @mem is %NULL.
+ */
+static inline gpointer g_memdup2_qemu(gconstpointer mem, gsize byte_size)
{
-#if GLIB_CHECK_VERSION(2, 44, 0)
- return g_strv_contains(strv, str);
+#if GLIB_CHECK_VERSION(2, 68, 0)
+ return g_memdup2(mem, byte_size);
#else
- g_return_val_if_fail(strv != NULL, FALSE);
- g_return_val_if_fail(str != NULL, FALSE);
+ gpointer new_mem;
- for (; *strv != NULL; strv++) {
- if (g_str_equal(str, *strv)) {
- return TRUE;
- }
+ if (mem && byte_size != 0) {
+ new_mem = g_malloc(byte_size);
+ memcpy(new_mem, mem, byte_size);
+ } else {
+ new_mem = NULL;
}
- return FALSE;
+ return new_mem;
#endif
}
-#define g_strv_contains(a, b) g_strv_contains_qemu(a, b)
-
-#if !GLIB_CHECK_VERSION(2, 58, 0)
-typedef struct QemuGSpawnFds {
- GSpawnChildSetupFunc child_setup;
- gpointer user_data;
- gint stdin_fd;
- gint stdout_fd;
- gint stderr_fd;
-} QemuGSpawnFds;
-
-static inline void
-qemu_gspawn_fds_setup(gpointer user_data)
-{
- QemuGSpawnFds *q = (QemuGSpawnFds *)user_data;
-
- dup2(q->stdin_fd, 0);
- dup2(q->stdout_fd, 1);
- dup2(q->stderr_fd, 2);
- q->child_setup(q->user_data);
-}
-#endif
+#define g_memdup2(m, s) g_memdup2_qemu(m, s)
-static inline gboolean
-g_spawn_async_with_fds_qemu(const gchar *working_directory,
- gchar **argv,
- gchar **envp,
- GSpawnFlags flags,
- GSpawnChildSetupFunc child_setup,
- gpointer user_data,
- GPid *child_pid,
- gint stdin_fd,
- gint stdout_fd,
- gint stderr_fd,
- GError **error)
+#if defined(G_OS_UNIX)
+/*
+ * Note: The fallback implementation is not MT-safe, and it returns a copy of
+ * the libc passwd (must be g_free() after use) but not the content. Because of
+ * these important differences the caller must be aware of, it's not #define for
+ * GLib API substitution.
+ */
+static inline struct passwd *
+g_unix_get_passwd_entry_qemu(const gchar *user_name, GError **error)
{
-#if GLIB_CHECK_VERSION(2, 58, 0)
- return g_spawn_async_with_fds(working_directory, argv, envp, flags,
- child_setup, user_data,
- child_pid, stdin_fd, stdout_fd, stderr_fd,
- error);
+#if GLIB_CHECK_VERSION(2, 64, 0)
+ return g_unix_get_passwd_entry(user_name, error);
#else
- QemuGSpawnFds setup = {
- .child_setup = child_setup,
- .user_data = user_data,
- .stdin_fd = stdin_fd,
- .stdout_fd = stdout_fd,
- .stderr_fd = stderr_fd,
- };
-
- return g_spawn_async(working_directory, argv, envp, flags,
- qemu_gspawn_fds_setup, &setup,
- child_pid, error);
+ struct passwd *p = getpwnam(user_name);
+ if (!p) {
+ g_set_error_literal(error, G_UNIX_ERROR, 0, g_strerror(errno));
+ return NULL;
+ }
+ return (struct passwd *)g_memdup(p, sizeof(*p));
#endif
}
+#endif /* G_OS_UNIX */
-#define g_spawn_async_with_fds(wd, argv, env, f, c, d, p, ifd, ofd, efd, err) \
- g_spawn_async_with_fds_qemu(wd, argv, env, f, c, d, p, ifd, ofd, efd, err)
+static inline bool
+qemu_g_test_slow(void)
+{
+ static int cached = -1;
+ if (cached == -1) {
+ cached = g_test_slow() || getenv("G_TEST_SLOW") != NULL;
+ }
+ return cached;
+}
-#if defined(_WIN32) && !GLIB_CHECK_VERSION(2, 50, 0)
-/*
- * g_poll has a problem on Windows when using
- * timeouts < 10ms, so use wrapper.
- */
-#define g_poll(fds, nfds, timeout) g_poll_fixed(fds, nfds, timeout)
-gint g_poll_fixed(GPollFD *fds, guint nfds, gint timeout);
-#endif
+#undef g_test_slow
+#undef g_test_thorough
+#undef g_test_quick
+#define g_test_slow() qemu_g_test_slow()
+#define g_test_thorough() qemu_g_test_slow()
+#define g_test_quick() (!qemu_g_test_slow())
+#pragma GCC diagnostic pop
-#ifndef g_assert_cmpmem
-#define g_assert_cmpmem(m1, l1, m2, l2) \
- do { \
- gconstpointer __m1 = m1, __m2 = m2; \
- int __l1 = l1, __l2 = l2; \
- if (__l1 != __l2) { \
- g_assertion_message_cmpnum( \
- G_LOG_DOMAIN, __FILE__, __LINE__, G_STRFUNC, \
- #l1 " (len(" #m1 ")) == " #l2 " (len(" #m2 "))", __l1, "==", \
- __l2, 'i'); \
- } else if (memcmp(__m1, __m2, __l1) != 0) { \
- g_assertion_message(G_LOG_DOMAIN, __FILE__, __LINE__, G_STRFUNC, \
- "assertion failed (" #m1 " == " #m2 ")"); \
- } \
- } while (0)
+#ifndef G_NORETURN
+#define G_NORETURN G_GNUC_NORETURN
#endif
-#pragma GCC diagnostic pop
-
#endif
diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h
index df37f68757..0e6e82b339 100644
--- a/include/hw/acpi/acpi-defs.h
+++ b/include/hw/acpi/acpi-defs.h
@@ -41,35 +41,13 @@ enum {
};
typedef struct AcpiRsdpData {
- uint8_t oem_id[6]; /* OEM identification */
- uint8_t revision; /* Must be 0 for 1.0, 2 for 2.0 */
+ char *oem_id; /* OEM identification */
+ uint8_t revision; /* Must be 0 for 1.0, 2 for 2.0 */
unsigned *rsdt_tbl_offset;
unsigned *xsdt_tbl_offset;
} AcpiRsdpData;
-/* Table structure from Linux kernel (the ACPI tables are under the
- BSD license) */
-
-
-#define ACPI_TABLE_HEADER_DEF /* ACPI common table header */ \
- uint32_t signature; /* ACPI signature (4 ASCII characters) */ \
- uint32_t length; /* Length of table, in bytes, including header */ \
- uint8_t revision; /* ACPI Specification minor version # */ \
- uint8_t checksum; /* To make sum of entire table == 0 */ \
- uint8_t oem_id [6]; /* OEM identification */ \
- uint8_t oem_table_id [8]; /* OEM table identification */ \
- uint32_t oem_revision; /* OEM revision number */ \
- uint8_t asl_compiler_id [4]; /* ASL compiler vendor ID */ \
- uint32_t asl_compiler_revision; /* ASL compiler revision number */
-
-
-/* ACPI common table header */
-struct AcpiTableHeader {
- ACPI_TABLE_HEADER_DEF
-} QEMU_PACKED;
-typedef struct AcpiTableHeader AcpiTableHeader;
-
struct AcpiGenericAddress {
uint8_t space_id; /* Address space where struct or register exists */
uint8_t bit_width; /* Size in bits of given register */
@@ -77,7 +55,7 @@ struct AcpiGenericAddress {
uint8_t access_width; /* ACPI 3.0: Minimum Access size (ACPI 3.0),
ACPI 2.0: Reserved, Table 5-1 */
uint64_t address; /* 64-bit address of struct or register */
-} QEMU_PACKED;
+};
typedef struct AcpiFadtData {
struct AcpiGenericAddress pm1a_cnt; /* PM1a_CNT_BLK */
@@ -85,6 +63,8 @@ typedef struct AcpiFadtData {
struct AcpiGenericAddress pm_tmr; /* PM_TMR_BLK */
struct AcpiGenericAddress gpe0_blk; /* GPE0_BLK */
struct AcpiGenericAddress reset_reg; /* RESET_REG */
+ struct AcpiGenericAddress sleep_ctl; /* SLEEP_CONTROL_REG */
+ struct AcpiGenericAddress sleep_sts; /* SLEEP_STATUS_REG */
uint8_t reset_val; /* RESET_VALUE */
uint8_t rev; /* Revision */
uint32_t flags; /* Flags */
@@ -97,6 +77,7 @@ typedef struct AcpiFadtData {
uint16_t plvl2_lat; /* P_LVL2_LAT */
uint16_t plvl3_lat; /* P_LVL3_LAT */
uint16_t arm_boot_arch; /* ARM_BOOT_ARCH */
+ uint16_t iapc_boot_arch; /* IAPC_BOOT_ARCH */
uint8_t minor_ver; /* FADT Minor Version */
/*
@@ -109,544 +90,40 @@ typedef struct AcpiFadtData {
unsigned *xdsdt_tbl_offset;
} AcpiFadtData;
-#define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0)
-#define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1)
-
-/*
- * Serial Port Console Redirection Table (SPCR), Rev. 1.02
- *
- * For .interface_type see Debug Port Table 2 (DBG2) serial port
- * subtypes in Table 3, Rev. May 22, 2012
- */
-struct AcpiSerialPortConsoleRedirection {
- ACPI_TABLE_HEADER_DEF
- uint8_t interface_type;
- uint8_t reserved1[3];
- struct AcpiGenericAddress base_address;
- uint8_t interrupt_types;
- uint8_t irq;
- uint32_t gsi;
- uint8_t baud;
- uint8_t parity;
- uint8_t stopbits;
- uint8_t flowctrl;
- uint8_t term_type;
- uint8_t reserved2;
- uint16_t pci_device_id;
- uint16_t pci_vendor_id;
- uint8_t pci_bus;
- uint8_t pci_slot;
- uint8_t pci_func;
+typedef struct AcpiGas {
+ uint8_t id; /* Address space ID */
+ uint8_t width; /* Register bit width */
+ uint8_t offset; /* Register bit offset */
+ uint8_t size; /* Access size */
+ uint64_t addr; /* Address */
+} AcpiGas;
+
+/* SPCR (Serial Port Console Redirection table) */
+typedef struct AcpiSpcrData {
+ uint8_t interface_type;
+ uint8_t reserved[3];
+ struct AcpiGas base_addr;
+ uint8_t interrupt_type;
+ uint8_t pc_interrupt;
+ uint32_t interrupt; /* Global system interrupt */
+ uint8_t baud_rate;
+ uint8_t parity;
+ uint8_t stop_bits;
+ uint8_t flow_control;
+ uint8_t terminal_type;
+ uint8_t language;
+ uint8_t reserved1;
+ uint16_t pci_device_id; /* Must be 0xffff if not PCI device */
+ uint16_t pci_vendor_id; /* Must be 0xffff if not PCI device */
+ uint8_t pci_bus;
+ uint8_t pci_device;
+ uint8_t pci_function;
uint32_t pci_flags;
- uint8_t pci_seg;
- uint32_t reserved3;
-} QEMU_PACKED;
-typedef struct AcpiSerialPortConsoleRedirection
- AcpiSerialPortConsoleRedirection;
-
-/*
- * ACPI 1.0 Root System Description Table (RSDT)
- */
-struct AcpiRsdtDescriptorRev1 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint32_t table_offset_entry[0]; /* Array of pointers to other */
- /* ACPI tables */
-} QEMU_PACKED;
-typedef struct AcpiRsdtDescriptorRev1 AcpiRsdtDescriptorRev1;
-
-/*
- * ACPI 2.0 eXtended System Description Table (XSDT)
- */
-struct AcpiXsdtDescriptorRev2 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint64_t table_offset_entry[0]; /* Array of pointers to other */
- /* ACPI tables */
-} QEMU_PACKED;
-typedef struct AcpiXsdtDescriptorRev2 AcpiXsdtDescriptorRev2;
-
-/*
- * ACPI 1.0 Firmware ACPI Control Structure (FACS)
- */
-struct AcpiFacsDescriptorRev1 {
- uint32_t signature; /* ACPI Signature */
- uint32_t length; /* Length of structure, in bytes */
- uint32_t hardware_signature; /* Hardware configuration signature */
- uint32_t firmware_waking_vector; /* ACPI OS waking vector */
- uint32_t global_lock; /* Global Lock */
- uint32_t flags;
- uint8_t resverved3 [40]; /* Reserved - must be zero */
-} QEMU_PACKED;
-typedef struct AcpiFacsDescriptorRev1 AcpiFacsDescriptorRev1;
-
-/*
- * Differentiated System Description Table (DSDT)
- */
-
-/*
- * MADT values and structures
- */
-
-/* Values for MADT PCATCompat */
-
-#define ACPI_DUAL_PIC 0
-#define ACPI_MULTIPLE_APIC 1
-
-/* Master MADT */
-
-struct AcpiMultipleApicTable {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint32_t local_apic_address; /* Physical address of local APIC */
- uint32_t flags;
-} QEMU_PACKED;
-typedef struct AcpiMultipleApicTable AcpiMultipleApicTable;
-
-/* Values for Type in APIC sub-headers */
-
-#define ACPI_APIC_PROCESSOR 0
-#define ACPI_APIC_IO 1
-#define ACPI_APIC_XRUPT_OVERRIDE 2
-#define ACPI_APIC_NMI 3
-#define ACPI_APIC_LOCAL_NMI 4
-#define ACPI_APIC_ADDRESS_OVERRIDE 5
-#define ACPI_APIC_IO_SAPIC 6
-#define ACPI_APIC_LOCAL_SAPIC 7
-#define ACPI_APIC_XRUPT_SOURCE 8
-#define ACPI_APIC_LOCAL_X2APIC 9
-#define ACPI_APIC_LOCAL_X2APIC_NMI 10
-#define ACPI_APIC_GENERIC_CPU_INTERFACE 11
-#define ACPI_APIC_GENERIC_DISTRIBUTOR 12
-#define ACPI_APIC_GENERIC_MSI_FRAME 13
-#define ACPI_APIC_GENERIC_REDISTRIBUTOR 14
-#define ACPI_APIC_GENERIC_TRANSLATOR 15
-#define ACPI_APIC_RESERVED 16 /* 16 and greater are reserved */
-
-/*
- * MADT sub-structures (Follow MULTIPLE_APIC_DESCRIPTION_TABLE)
- */
-#define ACPI_SUB_HEADER_DEF /* Common ACPI sub-structure header */\
- uint8_t type; \
- uint8_t length;
-
-/* Sub-structures for MADT */
-
-struct AcpiMadtProcessorApic {
- ACPI_SUB_HEADER_DEF
- uint8_t processor_id; /* ACPI processor id */
- uint8_t local_apic_id; /* Processor's local APIC id */
- uint32_t flags;
-} QEMU_PACKED;
-typedef struct AcpiMadtProcessorApic AcpiMadtProcessorApic;
-
-struct AcpiMadtIoApic {
- ACPI_SUB_HEADER_DEF
- uint8_t io_apic_id; /* I/O APIC ID */
- uint8_t reserved; /* Reserved - must be zero */
- uint32_t address; /* APIC physical address */
- uint32_t interrupt; /* Global system interrupt where INTI
- * lines start */
-} QEMU_PACKED;
-typedef struct AcpiMadtIoApic AcpiMadtIoApic;
-
-struct AcpiMadtIntsrcovr {
- ACPI_SUB_HEADER_DEF
- uint8_t bus;
- uint8_t source;
- uint32_t gsi;
- uint16_t flags;
-} QEMU_PACKED;
-typedef struct AcpiMadtIntsrcovr AcpiMadtIntsrcovr;
-
-struct AcpiMadtLocalNmi {
- ACPI_SUB_HEADER_DEF
- uint8_t processor_id; /* ACPI processor id */
- uint16_t flags; /* MPS INTI flags */
- uint8_t lint; /* Local APIC LINT# */
-} QEMU_PACKED;
-typedef struct AcpiMadtLocalNmi AcpiMadtLocalNmi;
-
-struct AcpiMadtProcessorX2Apic {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t x2apic_id; /* Processor's local x2APIC ID */
- uint32_t flags;
- uint32_t uid; /* Processor object _UID */
-} QEMU_PACKED;
-typedef struct AcpiMadtProcessorX2Apic AcpiMadtProcessorX2Apic;
-
-struct AcpiMadtLocalX2ApicNmi {
- ACPI_SUB_HEADER_DEF
- uint16_t flags; /* MPS INTI flags */
- uint32_t uid; /* Processor object _UID */
- uint8_t lint; /* Local APIC LINT# */
- uint8_t reserved[3]; /* Local APIC LINT# */
-} QEMU_PACKED;
-typedef struct AcpiMadtLocalX2ApicNmi AcpiMadtLocalX2ApicNmi;
-
-struct AcpiMadtGenericCpuInterface {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t cpu_interface_number;
- uint32_t uid;
- uint32_t flags;
- uint32_t parking_version;
- uint32_t performance_interrupt;
- uint64_t parked_address;
- uint64_t base_address;
- uint64_t gicv_base_address;
- uint64_t gich_base_address;
- uint32_t vgic_interrupt;
- uint64_t gicr_base_address;
- uint64_t arm_mpidr;
-} QEMU_PACKED;
-
-typedef struct AcpiMadtGenericCpuInterface AcpiMadtGenericCpuInterface;
-
-/* GICC CPU Interface Flags */
-#define ACPI_MADT_GICC_ENABLED 1
-
-struct AcpiMadtGenericDistributor {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t gic_id;
- uint64_t base_address;
- uint32_t global_irq_base;
- /* ACPI 5.1 Errata 1228 Present GIC version in MADT table */
- uint8_t version;
- uint8_t reserved2[3];
-} QEMU_PACKED;
-
-typedef struct AcpiMadtGenericDistributor AcpiMadtGenericDistributor;
-
-struct AcpiMadtGenericMsiFrame {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t gic_msi_frame_id;
- uint64_t base_address;
- uint32_t flags;
- uint16_t spi_count;
- uint16_t spi_base;
-} QEMU_PACKED;
-
-typedef struct AcpiMadtGenericMsiFrame AcpiMadtGenericMsiFrame;
-
-struct AcpiMadtGenericRedistributor {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint64_t base_address;
- uint32_t range_length;
-} QEMU_PACKED;
-
-typedef struct AcpiMadtGenericRedistributor AcpiMadtGenericRedistributor;
-
-struct AcpiMadtGenericTranslator {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t translation_id;
- uint64_t base_address;
+ uint8_t pci_segment;
uint32_t reserved2;
-} QEMU_PACKED;
+} AcpiSpcrData;
-typedef struct AcpiMadtGenericTranslator AcpiMadtGenericTranslator;
-
-/*
- * Generic Timer Description Table (GTDT)
- */
-#define ACPI_GTDT_INTERRUPT_MODE_LEVEL (0 << 0)
-#define ACPI_GTDT_INTERRUPT_MODE_EDGE (1 << 0)
-#define ACPI_GTDT_CAP_ALWAYS_ON (1 << 2)
-
-struct AcpiGenericTimerTable {
- ACPI_TABLE_HEADER_DEF
- uint64_t counter_block_addresss;
- uint32_t reserved;
- uint32_t secure_el1_interrupt;
- uint32_t secure_el1_flags;
- uint32_t non_secure_el1_interrupt;
- uint32_t non_secure_el1_flags;
- uint32_t virtual_timer_interrupt;
- uint32_t virtual_timer_flags;
- uint32_t non_secure_el2_interrupt;
- uint32_t non_secure_el2_flags;
- uint64_t counter_read_block_address;
- uint32_t platform_timer_count;
- uint32_t platform_timer_offset;
-} QEMU_PACKED;
-typedef struct AcpiGenericTimerTable AcpiGenericTimerTable;
-
-/*
- * HPET Description Table
- */
-struct Acpi20Hpet {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint32_t timer_block_id;
- struct AcpiGenericAddress addr;
- uint8_t hpet_number;
- uint16_t min_tick;
- uint8_t page_protect;
-} QEMU_PACKED;
-typedef struct Acpi20Hpet Acpi20Hpet;
-
-/*
- * SRAT (NUMA topology description) table
- */
-
-struct AcpiSystemResourceAffinityTable {
- ACPI_TABLE_HEADER_DEF
- uint32_t reserved1;
- uint32_t reserved2[2];
-} QEMU_PACKED;
-typedef struct AcpiSystemResourceAffinityTable AcpiSystemResourceAffinityTable;
-
-#define ACPI_SRAT_PROCESSOR_APIC 0
-#define ACPI_SRAT_MEMORY 1
-#define ACPI_SRAT_PROCESSOR_x2APIC 2
-#define ACPI_SRAT_PROCESSOR_GICC 3
-
-struct AcpiSratProcessorAffinity {
- ACPI_SUB_HEADER_DEF
- uint8_t proximity_lo;
- uint8_t local_apic_id;
- uint32_t flags;
- uint8_t local_sapic_eid;
- uint8_t proximity_hi[3];
- uint32_t reserved;
-} QEMU_PACKED;
-typedef struct AcpiSratProcessorAffinity AcpiSratProcessorAffinity;
-
-struct AcpiSratProcessorX2ApicAffinity {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t proximity_domain;
- uint32_t x2apic_id;
- uint32_t flags;
- uint32_t clk_domain;
- uint32_t reserved2;
-} QEMU_PACKED;
-typedef struct AcpiSratProcessorX2ApicAffinity AcpiSratProcessorX2ApicAffinity;
-
-struct AcpiSratMemoryAffinity {
- ACPI_SUB_HEADER_DEF
- uint32_t proximity;
- uint16_t reserved1;
- uint64_t base_addr;
- uint64_t range_length;
- uint32_t reserved2;
- uint32_t flags;
- uint32_t reserved3[2];
-} QEMU_PACKED;
-typedef struct AcpiSratMemoryAffinity AcpiSratMemoryAffinity;
-
-struct AcpiSratProcessorGiccAffinity {
- ACPI_SUB_HEADER_DEF
- uint32_t proximity;
- uint32_t acpi_processor_uid;
- uint32_t flags;
- uint32_t clock_domain;
-} QEMU_PACKED;
-
-typedef struct AcpiSratProcessorGiccAffinity AcpiSratProcessorGiccAffinity;
-
-/* PCI fw r3.0 MCFG table. */
-/* Subtable */
-struct AcpiMcfgAllocation {
- uint64_t address; /* Base address, processor-relative */
- uint16_t pci_segment; /* PCI segment group number */
- uint8_t start_bus_number; /* Starting PCI Bus number */
- uint8_t end_bus_number; /* Final PCI Bus number */
- uint32_t reserved;
-} QEMU_PACKED;
-typedef struct AcpiMcfgAllocation AcpiMcfgAllocation;
-
-struct AcpiTableMcfg {
- ACPI_TABLE_HEADER_DEF;
- uint8_t reserved[8];
- AcpiMcfgAllocation allocation[0];
-} QEMU_PACKED;
-typedef struct AcpiTableMcfg AcpiTableMcfg;
-
-/*
- * TCPA Description Table
- *
- * Following Level 00, Rev 00.37 of specs:
- * http://www.trustedcomputinggroup.org/resources/tcg_acpi_specification
- */
-struct Acpi20Tcpa {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint16_t platform_class;
- uint32_t log_area_minimum_length;
- uint64_t log_area_start_address;
-} QEMU_PACKED;
-typedef struct Acpi20Tcpa Acpi20Tcpa;
-
-/*
- * TPM2
- *
- * Following Version 1.2, Revision 8 of specs:
- * https://trustedcomputinggroup.org/tcg-acpi-specification/
- */
-struct Acpi20TPM2 {
- ACPI_TABLE_HEADER_DEF
- uint16_t platform_class;
- uint16_t reserved;
- uint64_t control_area_address;
- uint32_t start_method;
- uint8_t start_method_params[12];
- uint32_t log_area_minimum_length;
- uint64_t log_area_start_address;
-} QEMU_PACKED;
-typedef struct Acpi20TPM2 Acpi20TPM2;
-
-/* DMAR - DMA Remapping table r2.2 */
-struct AcpiTableDmar {
- ACPI_TABLE_HEADER_DEF
- uint8_t host_address_width; /* Maximum DMA physical addressability */
- uint8_t flags;
- uint8_t reserved[10];
-} QEMU_PACKED;
-typedef struct AcpiTableDmar AcpiTableDmar;
-
-/* Masks for Flags field above */
-#define ACPI_DMAR_INTR_REMAP 1
-#define ACPI_DMAR_X2APIC_OPT_OUT (1 << 1)
-
-/* Values for sub-structure type for DMAR */
-enum {
- ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, /* DRHD */
- ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, /* RMRR */
- ACPI_DMAR_TYPE_ATSR = 2, /* ATSR */
- ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, /* RHSR */
- ACPI_DMAR_TYPE_ANDD = 4, /* ANDD */
- ACPI_DMAR_TYPE_RESERVED = 5 /* Reserved for furture use */
-};
-
-/*
- * Sub-structures for DMAR
- */
-
-/* Device scope structure for DRHD. */
-struct AcpiDmarDeviceScope {
- uint8_t entry_type;
- uint8_t length;
- uint16_t reserved;
- uint8_t enumeration_id;
- uint8_t bus;
- struct {
- uint8_t device;
- uint8_t function;
- } path[0];
-} QEMU_PACKED;
-typedef struct AcpiDmarDeviceScope AcpiDmarDeviceScope;
-
-/* Type 0: Hardware Unit Definition */
-struct AcpiDmarHardwareUnit {
- uint16_t type;
- uint16_t length;
- uint8_t flags;
- uint8_t reserved;
- uint16_t pci_segment; /* The PCI Segment associated with this unit */
- uint64_t address; /* Base address of remapping hardware register-set */
- AcpiDmarDeviceScope scope[0];
-} QEMU_PACKED;
-typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit;
-
-/* Type 2: Root Port ATS Capability Reporting Structure */
-struct AcpiDmarRootPortATS {
- uint16_t type;
- uint16_t length;
- uint8_t flags;
- uint8_t reserved;
- uint16_t pci_segment;
- AcpiDmarDeviceScope scope[0];
-} QEMU_PACKED;
-typedef struct AcpiDmarRootPortATS AcpiDmarRootPortATS;
-
-/* Masks for Flags field above */
-#define ACPI_DMAR_INCLUDE_PCI_ALL 1
-#define ACPI_DMAR_ATSR_ALL_PORTS 1
-
-/*
- * Input Output Remapping Table (IORT)
- * Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049B, October 2015
- */
-
-struct AcpiIortTable {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint32_t node_count;
- uint32_t node_offset;
- uint32_t reserved;
-} QEMU_PACKED;
-typedef struct AcpiIortTable AcpiIortTable;
-
-/*
- * IORT node types
- */
-
-#define ACPI_IORT_NODE_HEADER_DEF /* Node format common fields */ \
- uint8_t type; \
- uint16_t length; \
- uint8_t revision; \
- uint32_t reserved; \
- uint32_t mapping_count; \
- uint32_t mapping_offset;
-
-/* Values for node Type above */
-enum {
- ACPI_IORT_NODE_ITS_GROUP = 0x00,
- ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
- ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
- ACPI_IORT_NODE_SMMU = 0x03,
- ACPI_IORT_NODE_SMMU_V3 = 0x04
-};
-
-struct AcpiIortIdMapping {
- uint32_t input_base;
- uint32_t id_count;
- uint32_t output_base;
- uint32_t output_reference;
- uint32_t flags;
-} QEMU_PACKED;
-typedef struct AcpiIortIdMapping AcpiIortIdMapping;
-
-struct AcpiIortMemoryAccess {
- uint32_t cache_coherency;
- uint8_t hints;
- uint16_t reserved;
- uint8_t memory_flags;
-} QEMU_PACKED;
-typedef struct AcpiIortMemoryAccess AcpiIortMemoryAccess;
-
-struct AcpiIortItsGroup {
- ACPI_IORT_NODE_HEADER_DEF
- uint32_t its_count;
- uint32_t identifiers[0];
-} QEMU_PACKED;
-typedef struct AcpiIortItsGroup AcpiIortItsGroup;
-
-#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE 1
-
-struct AcpiIortSmmu3 {
- ACPI_IORT_NODE_HEADER_DEF
- uint64_t base_address;
- uint32_t flags;
- uint32_t reserved2;
- uint64_t vatos_address;
- uint32_t model;
- uint32_t event_gsiv;
- uint32_t pri_gsiv;
- uint32_t gerr_gsiv;
- uint32_t sync_gsiv;
- AcpiIortIdMapping id_mapping_array[0];
-} QEMU_PACKED;
-typedef struct AcpiIortSmmu3 AcpiIortSmmu3;
-
-struct AcpiIortRC {
- ACPI_IORT_NODE_HEADER_DEF
- AcpiIortMemoryAccess memory_properties;
- uint32_t ats_attribute;
- uint32_t pci_segment_number;
- AcpiIortIdMapping id_mapping_array[0];
-} QEMU_PACKED;
-typedef struct AcpiIortRC AcpiIortRC;
+#define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0)
+#define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1)
#endif
diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h
index bbf541263a..e0e51e85b4 100644
--- a/include/hw/acpi/acpi.h
+++ b/include/hw/acpi/acpi.h
@@ -8,7 +8,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,7 +22,6 @@
#include "qemu/notify.h"
#include "exec/memory.h"
-#include "hw/irq.h"
#include "hw/acpi/acpi_dev_interface.h"
/*
@@ -48,6 +47,8 @@
#define ACPI_PM_PROP_PM_IO_BASE "pm_io_base"
#define ACPI_PM_PROP_GPE0_BLK "gpe0_blk"
#define ACPI_PM_PROP_GPE0_BLK_LEN "gpe0_blk_len"
+#define ACPI_PM_PROP_ACPI_PCIHP_BRIDGE "acpi-pci-hotplug-with-bridge-support"
+#define ACPI_PM_PROP_ACPI_PCI_ROOTHP "acpi-root-pci-hotplug"
/* PM Timer ticks per second (HZ) */
#define PM_TIMER_FREQUENCY 3579545
@@ -65,7 +66,7 @@
#define ACPI_BITMASK_POWER_BUTTON_STATUS 0x0100
#define ACPI_BITMASK_SLEEP_BUTTON_STATUS 0x0200
#define ACPI_BITMASK_RT_CLOCK_STATUS 0x0400
-#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
+#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
#define ACPI_BITMASK_WAKE_STATUS 0x8000
#define ACPI_BITMASK_ALL_FIXED_STATUS (\
@@ -83,7 +84,7 @@
#define ACPI_BITMASK_POWER_BUTTON_ENABLE 0x0100
#define ACPI_BITMASK_SLEEP_BUTTON_ENABLE 0x0200
#define ACPI_BITMASK_RT_CLOCK_ENABLE 0x0400
-#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */
+#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */
#define ACPI_BITMASK_PM1_COMMON_ENABLED ( \
ACPI_BITMASK_RT_CLOCK_ENABLE | \
@@ -129,6 +130,7 @@ struct ACPIPM1CNT {
MemoryRegion io;
uint16_t cnt;
uint8_t s4_val;
+ bool acpi_only;
};
struct ACPIGPE {
@@ -164,7 +166,8 @@ void acpi_pm1_evt_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
/* PM1a_CNT: piix and ich9 don't implement PM1b CNT. */
void acpi_pm1_cnt_init(ACPIREGS *ar, MemoryRegion *parent,
- bool disable_s3, bool disable_s4, uint8_t s4_val);
+ bool disable_s3, bool disable_s4, uint8_t s4_val,
+ bool acpi_only);
void acpi_pm1_cnt_update(ACPIREGS *ar,
bool sci_enable, bool sci_disable);
void acpi_pm1_cnt_reset(ACPIREGS *ar);
@@ -182,7 +185,6 @@ void acpi_send_gpe_event(ACPIREGS *ar, qemu_irq irq,
void acpi_update_sci(ACPIREGS *acpi_regs, qemu_irq irq);
/* acpi.c */
-extern int acpi_enabled;
extern char unsigned *acpi_tables;
extern size_t acpi_tables_len;
@@ -190,7 +192,6 @@ uint8_t *acpi_table_first(void);
uint8_t *acpi_table_next(uint8_t *current);
unsigned acpi_table_len(void *current);
void acpi_table_add(const QemuOpts *opts, Error **errp);
-void acpi_table_add_builtin(const QemuOpts *opts, Error **errp);
typedef struct AcpiSlicOem AcpiSlicOem;
struct AcpiSlicOem {
diff --git a/include/hw/acpi/acpi_aml_interface.h b/include/hw/acpi/acpi_aml_interface.h
new file mode 100644
index 0000000000..11748a8866
--- /dev/null
+++ b/include/hw/acpi/acpi_aml_interface.h
@@ -0,0 +1,52 @@
+#ifndef ACPI_AML_INTERFACE_H
+#define ACPI_AML_INTERFACE_H
+
+#include "qom/object.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_ACPI_DEV_AML_IF "acpi-dev-aml-interface"
+typedef struct AcpiDevAmlIfClass AcpiDevAmlIfClass;
+DECLARE_CLASS_CHECKERS(AcpiDevAmlIfClass, ACPI_DEV_AML_IF, TYPE_ACPI_DEV_AML_IF)
+#define ACPI_DEV_AML_IF(obj) \
+ INTERFACE_CHECK(AcpiDevAmlIf, (obj), TYPE_ACPI_DEV_AML_IF)
+
+typedef struct AcpiDevAmlIf AcpiDevAmlIf;
+typedef void (*dev_aml_fn)(AcpiDevAmlIf *adev, Aml *scope);
+
+/**
+ * AcpiDevAmlIfClass:
+ *
+ * build_dev_aml: adds device specific AML blob to provided scope
+ *
+ * Interface is designed for providing generic callback that builds device
+ * specific AML blob.
+ */
+struct AcpiDevAmlIfClass {
+ /* <private> */
+ InterfaceClass parent_class;
+
+ /* <public> */
+ dev_aml_fn build_dev_aml;
+};
+
+static inline dev_aml_fn get_dev_aml_func(DeviceState *dev)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_ACPI_DEV_AML_IF)) {
+ AcpiDevAmlIfClass *klass = ACPI_DEV_AML_IF_GET_CLASS(dev);
+ return klass->build_dev_aml;
+ }
+ return NULL;
+}
+
+static inline void call_dev_aml_func(DeviceState *dev, Aml *scope)
+{
+ dev_aml_fn fn = get_dev_aml_func(dev);
+ if (fn) {
+ fn(ACPI_DEV_AML_IF(dev), scope);
+ }
+}
+
+void qbus_build_aml(BusState *bus, Aml *scope);
+
+#endif
diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h
index 43ff119179..68d9d15f50 100644
--- a/include/hw/acpi/acpi_dev_interface.h
+++ b/include/hw/acpi/acpi_dev_interface.h
@@ -1,8 +1,9 @@
#ifndef ACPI_DEV_INTERFACE_H
#define ACPI_DEV_INTERFACE_H
+#include "qapi/qapi-types-acpi.h"
#include "qom/object.h"
-#include "hw/boards.h"
+#include "hw/qdev-core.h"
/* These values are part of guest ABI, and can not be changed */
typedef enum {
@@ -11,16 +12,14 @@ typedef enum {
ACPI_MEMORY_HOTPLUG_STATUS = 8,
ACPI_NVDIMM_HOTPLUG_STATUS = 16,
ACPI_VMGENID_CHANGE_STATUS = 32,
+ ACPI_POWER_DOWN_STATUS = 64,
} AcpiEventStatusBits;
#define TYPE_ACPI_DEVICE_IF "acpi-device-interface"
-#define ACPI_DEVICE_IF_CLASS(klass) \
- OBJECT_CLASS_CHECK(AcpiDeviceIfClass, (klass), \
- TYPE_ACPI_DEVICE_IF)
-#define ACPI_DEVICE_IF_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AcpiDeviceIfClass, (obj), \
- TYPE_ACPI_DEVICE_IF)
+typedef struct AcpiDeviceIfClass AcpiDeviceIfClass;
+DECLARE_CLASS_CHECKERS(AcpiDeviceIfClass, ACPI_DEVICE_IF,
+ TYPE_ACPI_DEVICE_IF)
#define ACPI_DEVICE_IF(obj) \
INTERFACE_CHECK(AcpiDeviceIf, (obj), \
TYPE_ACPI_DEVICE_IF)
@@ -45,14 +44,12 @@ void acpi_send_event(DeviceState *dev, AcpiEventStatusBits event);
* knowledge about internals of actual device that implements
* ACPI interface.
*/
-typedef struct AcpiDeviceIfClass {
+struct AcpiDeviceIfClass {
/* <private> */
InterfaceClass parent_class;
/* <public> */
void (*ospm_status)(AcpiDeviceIf *adev, ACPIOSTInfoList ***list);
void (*send_event)(AcpiDeviceIf *adev, AcpiEventStatusBits ev);
- void (*madt_cpu)(AcpiDeviceIf *adev, int uid,
- const CPUArchIdList *apic_ids, GArray *entry);
-} AcpiDeviceIfClass;
+};
#endif
diff --git a/include/hw/acpi/acpi_generic_initiator.h b/include/hw/acpi/acpi_generic_initiator.h
new file mode 100644
index 0000000000..a304bad73e
--- /dev/null
+++ b/include/hw/acpi/acpi_generic_initiator.h
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef ACPI_GENERIC_INITIATOR_H
+#define ACPI_GENERIC_INITIATOR_H
+
+#include "qom/object_interfaces.h"
+
+#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator"
+
+typedef struct AcpiGenericInitiator {
+ /* private */
+ Object parent;
+
+ /* public */
+ char *pci_dev;
+ uint16_t node;
+} AcpiGenericInitiator;
+
+/*
+ * ACPI 6.3:
+ * Table 5-81 Flags – Generic Initiator Affinity Structure
+ */
+typedef enum {
+ /*
+ * If clear, the OSPM ignores the contents of the Generic
+ * Initiator/Port Affinity Structure. This allows system firmware
+ * to populate the SRAT with a static number of structures, but only
+ * enable them as necessary.
+ */
+ GEN_AFFINITY_ENABLED = (1 << 0),
+} GenericAffinityFlags;
+
+/*
+ * ACPI 6.3:
+ * Table 5-80 Device Handle - PCI
+ */
+typedef struct PCIDeviceHandle {
+ uint16_t segment;
+ uint16_t bdf;
+} PCIDeviceHandle;
+
+void build_srat_generic_pci_initiator(GArray *table_data);
+
+#endif
diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h
index 1a563ad756..a3784155cb 100644
--- a/include/hw/acpi/aml-build.h
+++ b/include/hw/acpi/aml-build.h
@@ -4,15 +4,13 @@
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/bios-linker-loader.h"
-/* Reserve RAM space for tables: add another order of magnitude. */
-#define ACPI_BUILD_TABLE_MAX_SIZE 0x200000
-
#define ACPI_BUILD_APPNAME6 "BOCHS "
-#define ACPI_BUILD_APPNAME4 "BXPC"
+#define ACPI_BUILD_APPNAME8 "BXPC "
#define ACPI_BUILD_TABLE_FILE "etc/acpi/tables"
#define ACPI_BUILD_RSDP_FILE "etc/acpi/rsdp"
#define ACPI_BUILD_TPMLOG_FILE "etc/tpm/log"
+#define ACPI_BUILD_LOADER_FILE "etc/table-loader"
#define AML_NOTIFY_METHOD "NTFY"
@@ -32,7 +30,6 @@ struct Aml {
uint8_t op;
AmlBlockFlags block_flags;
};
-typedef struct Aml Aml;
typedef enum {
AML_COMPATIBILITY = 0,
@@ -220,9 +217,41 @@ struct AcpiBuildTables {
GArray *rsdp;
GArray *tcpalog;
GArray *vmgenid;
+ GArray *hardware_errors;
BIOSLinker *linker;
} AcpiBuildTables;
+typedef
+struct CrsRangeEntry {
+ uint64_t base;
+ uint64_t limit;
+} CrsRangeEntry;
+
+typedef
+struct CrsRangeSet {
+ GPtrArray *io_ranges;
+ GPtrArray *mem_ranges;
+ GPtrArray *mem_64bit_ranges;
+} CrsRangeSet;
+
+
+/*
+ * ACPI 5.0: 6.4.3.8.2 Serial Bus Connection Descriptors
+ * Serial Bus Type
+ */
+#define AML_SERIAL_BUS_TYPE_I2C 1
+#define AML_SERIAL_BUS_TYPE_SPI 2
+#define AML_SERIAL_BUS_TYPE_UART 3
+
+/*
+ * ACPI 5.0: 6.4.3.8.2 Serial Bus Connection Descriptors
+ * General Flags
+ */
+/* Slave Mode */
+#define AML_SERIAL_BUS_FLAG_MASTER_DEVICE (1 << 0)
+/* Consumer/Producer */
+#define AML_SERIAL_BUS_FLAG_CONSUME_ONLY (1 << 1)
+
/**
* init_aml_allocator:
*
@@ -248,7 +277,7 @@ void free_aml_allocator(void);
* @child: element that is copied into @parent_ctx context
*
* Joins Aml elements together and helps to construct AML tables
- * Examle of usage:
+ * Example of usage:
* Aml *table = aml_def_block("SSDT", ...);
* Aml *sb = aml_scope("\\_SB");
* Aml *dev = aml_device("PCI0");
@@ -260,7 +289,7 @@ void free_aml_allocator(void);
void aml_append(Aml *parent_ctx, Aml *child);
/* non block AML object primitives */
-Aml *aml_name(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
+Aml *aml_name(const char *name_format, ...) G_GNUC_PRINTF(1, 2);
Aml *aml_name_decl(const char *name, Aml *val);
Aml *aml_debug(void);
Aml *aml_return(Aml *val);
@@ -269,9 +298,11 @@ Aml *aml_arg(int pos);
Aml *aml_to_integer(Aml *arg);
Aml *aml_to_hexstring(Aml *src, Aml *dst);
Aml *aml_to_buffer(Aml *src, Aml *dst);
+Aml *aml_to_decimalstring(Aml *src, Aml *dst);
Aml *aml_store(Aml *val, Aml *target);
Aml *aml_and(Aml *arg1, Aml *arg2, Aml *dst);
Aml *aml_or(Aml *arg1, Aml *arg2, Aml *dst);
+Aml *aml_land(Aml *arg1, Aml *arg2);
Aml *aml_lor(Aml *arg1, Aml *arg2);
Aml *aml_shiftleft(Aml *arg1, Aml *count);
Aml *aml_shiftright(Aml *arg1, Aml *count, Aml *dst);
@@ -282,6 +313,7 @@ Aml *aml_increment(Aml *arg);
Aml *aml_decrement(Aml *arg);
Aml *aml_index(Aml *arg1, Aml *idx);
Aml *aml_notify(Aml *arg1, Aml *arg2);
+Aml *aml_break(void);
Aml *aml_call0(const char *method);
Aml *aml_call1(const char *method, Aml *arg1);
Aml *aml_call2(const char *method, Aml *arg1, Aml *arg2);
@@ -289,6 +321,8 @@ Aml *aml_call3(const char *method, Aml *arg1, Aml *arg2, Aml *arg3);
Aml *aml_call4(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4);
Aml *aml_call5(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4,
Aml *arg5);
+Aml *aml_call6(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4,
+ Aml *arg5, Aml *arg6);
Aml *aml_gpio_int(AmlConsumerAndProducer con_and_pro,
AmlLevelAndEdge edge_level,
AmlActiveHighAndLow active_level, AmlShared shared,
@@ -310,13 +344,13 @@ Aml *aml_irq_no_flags(uint8_t irq);
Aml *aml_named_field(const char *name, unsigned length);
Aml *aml_reserved_field(unsigned length);
Aml *aml_local(int num);
-Aml *aml_string(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
+Aml *aml_string(const char *name_format, ...) G_GNUC_PRINTF(1, 2);
Aml *aml_lnot(Aml *arg);
Aml *aml_equal(Aml *arg1, Aml *arg2);
Aml *aml_lgreater(Aml *arg1, Aml *arg2);
Aml *aml_lgreater_equal(Aml *arg1, Aml *arg2);
Aml *aml_processor(uint8_t proc_id, uint32_t pblk_addr, uint8_t pblk_len,
- const char *name_format, ...) GCC_FMT_ATTR(4, 5);
+ const char *name_format, ...) G_GNUC_PRINTF(4, 5);
Aml *aml_eisaid(const char *str);
Aml *aml_word_bus_number(AmlMinFixed min_fixed, AmlMaxFixed max_fixed,
AmlDecode dec, uint16_t addr_gran,
@@ -347,10 +381,11 @@ Aml *aml_qword_memory(AmlDecode dec, AmlMinFixed min_fixed,
Aml *aml_dma(AmlDmaType typ, AmlDmaBusMaster bm, AmlTransferSize sz,
uint8_t channel);
Aml *aml_sleep(uint64_t msec);
+Aml *aml_i2c_serial_bus_device(uint16_t address, const char *resource_source);
/* Block AML object primitives */
-Aml *aml_scope(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
-Aml *aml_device(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
+Aml *aml_scope(const char *name_format, ...) G_GNUC_PRINTF(1, 2);
+Aml *aml_device(const char *name_format, ...) G_GNUC_PRINTF(1, 2);
Aml *aml_method(const char *name, int arg_count, AmlSerializeFlag sflag);
Aml *aml_if(Aml *predicate);
Aml *aml_else(void);
@@ -378,10 +413,37 @@ Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target);
Aml *aml_object_type(Aml *object);
void build_append_int_noprefix(GArray *table, uint64_t value, int size);
-void
-build_header(BIOSLinker *linker, GArray *table_data,
- AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
- const char *oem_id, const char *oem_table_id);
+
+typedef struct AcpiTable {
+ const char *sig;
+ const uint8_t rev;
+ const char *oem_id;
+ const char *oem_table_id;
+ /* private vars tracking table state */
+ GArray *array;
+ unsigned table_offset;
+} AcpiTable;
+
+/**
+ * acpi_table_begin:
+ * initializes table header and keeps track of
+ * table data/offsets
+ * @desc: ACPI table descriptor
+ * @array: blob where the ACPI table will be composed/stored.
+ */
+void acpi_table_begin(AcpiTable *desc, GArray *array);
+
+/**
+ * acpi_table_end:
+ * sets actual table length and tells bios loader
+ * where table is for the later initialization on
+ * guest side.
+ * @linker: reference to BIOSLinker object to use for the table
+ * @table: ACPI table descriptor that was used with @acpi_table_begin
+ * counterpart
+ */
+void acpi_table_end(BIOSLinker *linker, AcpiTable *table);
+
void *acpi_data_push(GArray *table_data, unsigned size);
unsigned acpi_data_len(GArray *table);
void acpi_add_table(GArray *table_offsets, GArray *table_data);
@@ -398,7 +460,7 @@ build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets,
int
build_append_named_dword(GArray *array, const char *name_format, ...)
-GCC_FMT_ATTR(2, 3);
+G_GNUC_PRINTF(2, 3);
void build_append_gas(GArray *table, AmlAddressSpace as,
uint8_t bit_width, uint8_t bit_offset,
@@ -411,11 +473,32 @@ build_append_gas_from_struct(GArray *table, const struct AcpiGenericAddress *s)
s->access_width, s->address);
}
-void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
+void crs_range_insert(GPtrArray *ranges, uint64_t base, uint64_t limit);
+void crs_replace_with_free_ranges(GPtrArray *ranges,
+ uint64_t start, uint64_t end);
+void crs_range_set_init(CrsRangeSet *range_set);
+void crs_range_set_free(CrsRangeSet *range_set);
+
+Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset,
+ uint32_t mmio32_offset, uint64_t mmio64_offset,
+ uint16_t bus_nr_offset);
+
+void build_srat_memory(GArray *table_data, uint64_t base,
uint64_t len, int node, MemoryAffinityFlags flags);
-void build_slit(GArray *table_data, BIOSLinker *linker);
+void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms,
+ const char *oem_id, const char *oem_table_id);
+
+void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
+ const char *oem_id, const char *oem_table_id);
void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
const char *oem_id, const char *oem_table_id);
+
+void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog,
+ const char *oem_id, const char *oem_table_id);
+
+void build_spcr(GArray *table_data, BIOSLinker *linker,
+ const AcpiSpcrData *f, const uint8_t rev,
+ const char *oem_id, const char *oem_table_id);
#endif
diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
index 89ce172941..e6e1a9ef59 100644
--- a/include/hw/acpi/cpu.h
+++ b/include/hw/acpi/cpu.h
@@ -12,16 +12,19 @@
#ifndef ACPI_CPU_H
#define ACPI_CPU_H
+#include "qapi/qapi-types-acpi.h"
#include "hw/qdev-core.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
+#include "hw/boards.h"
#include "hw/hotplug.h"
typedef struct AcpiCpuStatus {
- struct CPUState *cpu;
+ CPUState *cpu;
uint64_t arch_id;
bool is_inserting;
bool is_removing;
+ bool fw_remove;
uint32_t ost_event;
uint32_t ost_status;
} AcpiCpuStatus;
@@ -48,12 +51,17 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
CPUHotplugState *state, hwaddr base_addr);
typedef struct CPUHotplugFeatures {
- bool apci_1_compatible;
+ bool acpi_1_compatible;
bool has_legacy_cphp;
+ bool fw_unplugs_cpu;
+ const char *smi_path;
} CPUHotplugFeatures;
+typedef void (*build_madt_cpu_fn)(int uid, const CPUArchIdList *apic_ids,
+ GArray *entry, bool force_enabled);
+
void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
- hwaddr io_base,
+ build_madt_cpu_fn build_madt_cpu, hwaddr io_base,
const char *res_root,
const char *event_handler_method);
diff --git a/include/hw/acpi/cxl.h b/include/hw/acpi/cxl.h
new file mode 100644
index 0000000000..8f22c71530
--- /dev/null
+++ b/include/hw/acpi/cxl.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ACPI_CXL_H
+#define HW_ACPI_CXL_H
+
+#include "hw/acpi/bios-linker-loader.h"
+#include "hw/cxl/cxl.h"
+
+void cxl_build_cedt(GArray *table_offsets, GArray *table_data,
+ BIOSLinker *linker, const char *oem_id,
+ const char *oem_table_id, CXLState *cxl_state);
+void build_cxl_osc_method(Aml *dev);
+void build_cxl_dsm_method(Aml *dev);
+
+#endif
diff --git a/include/hw/acpi/erst.h b/include/hw/acpi/erst.h
new file mode 100644
index 0000000000..b2ff663ddc
--- /dev/null
+++ b/include/hw/acpi/erst.h
@@ -0,0 +1,27 @@
+/*
+ * ACPI Error Record Serialization Table, ERST, Implementation
+ *
+ * ACPI ERST introduced in ACPI 4.0, June 16, 2009.
+ * ACPI Platform Error Interfaces : Error Serialization
+ *
+ * Copyright (c) 2021 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_ACPI_ERST_H
+#define HW_ACPI_ERST_H
+
+#include "hw/acpi/bios-linker-loader.h"
+#include "qom/object.h"
+
+void build_erst(GArray *table_data, BIOSLinker *linker, Object *erst_dev,
+ const char *oem_id, const char *oem_table_id);
+
+#define TYPE_ACPI_ERST "acpi-erst"
+
+/* returns NULL unless there is exactly one device */
+static inline Object *find_erst_dev(void)
+{
+ return object_resolve_path_type("", TYPE_ACPI_ERST, NULL);
+}
+#endif
diff --git a/include/hw/acpi/generic_event_device.h b/include/hw/acpi/generic_event_device.h
new file mode 100644
index 0000000000..ba84ce0214
--- /dev/null
+++ b/include/hw/acpi/generic_event_device.h
@@ -0,0 +1,119 @@
+/*
+ *
+ * Copyright (c) 2018 Intel Corporation
+ * Copyright (c) 2019 Huawei Technologies R & D (UK) Ltd
+ * Written by Samuel Ortiz, Shameer Kolothum
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * The ACPI Generic Event Device (GED) is a hardware-reduced specific
+ * device[ACPI v6.1 Section 5.6.9] that handles all platform events,
+ * including the hotplug ones. Generic Event Device allows platforms
+ * to handle interrupts in ACPI ASL statements. It follows a very
+ * similar approach like the _EVT method from GPIO events. All
+ * interrupts are listed in _CRS and the handler is written in _EVT
+ * method. Here, we use a single interrupt for the GED device, relying
+ * on IO memory region to communicate the type of device affected by
+ * the interrupt. This way, we can support up to 32 events with a
+ * unique interrupt.
+ *
+ * Here is an example.
+ *
+ * Device (\_SB.GED)
+ * {
+ * Name (_HID, "ACPI0013")
+ * Name (_UID, Zero)
+ * Name (_CRS, ResourceTemplate ()
+ * {
+ * Interrupt (ResourceConsumer, Edge, ActiveHigh, Exclusive, ,, )
+ * {
+ * 0x00000029,
+ * }
+ * })
+ * OperationRegion (EREG, SystemMemory, 0x09080000, 0x04)
+ * Field (EREG, DWordAcc, NoLock, WriteAsZeros)
+ * {
+ * ESEL, 32
+ * }
+ *
+ * Method (_EVT, 1, Serialized) // _EVT: Event
+ * {
+ * Local0 = ESEL // ESEL = IO memory region which specifies the
+ * // device type.
+ * If (((Local0 & One) == One))
+ * {
+ * MethodEvent1()
+ * }
+ * If ((Local0 & 0x2) == 0x2)
+ * {
+ * MethodEvent2()
+ * }
+ * ...
+ * }
+ * }
+ *
+ */
+
+#ifndef HW_ACPI_GENERIC_EVENT_DEVICE_H
+#define HW_ACPI_GENERIC_EVENT_DEVICE_H
+
+#include "hw/sysbus.h"
+#include "hw/acpi/memory_hotplug.h"
+#include "hw/acpi/ghes.h"
+#include "qom/object.h"
+
+#define ACPI_POWER_BUTTON_DEVICE "PWRB"
+
+#define TYPE_ACPI_GED "acpi-ged"
+OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED)
+
+#define ACPI_GED_EVT_SEL_OFFSET 0x0
+#define ACPI_GED_EVT_SEL_LEN 0x4
+
+#define ACPI_GED_REG_SLEEP_CTL 0x00
+#define ACPI_GED_REG_SLEEP_STS 0x01
+#define ACPI_GED_REG_RESET 0x02
+#define ACPI_GED_REG_COUNT 0x03
+
+/* ACPI_GED_REG_RESET value for reset*/
+#define ACPI_GED_RESET_VALUE 0x42
+
+/* ACPI_GED_REG_SLEEP_CTL.SLP_TYP value for S5 (aka poweroff) */
+#define ACPI_GED_SLP_TYP_S5 0x05
+
+#define GED_DEVICE "GED"
+#define AML_GED_EVT_REG "EREG"
+#define AML_GED_EVT_SEL "ESEL"
+
+/*
+ * Platforms need to specify the GED event bitmap
+ * to describe what kind of events they want to support
+ * through GED.
+ */
+#define ACPI_GED_MEM_HOTPLUG_EVT 0x1
+#define ACPI_GED_PWR_DOWN_EVT 0x2
+#define ACPI_GED_NVDIMM_HOTPLUG_EVT 0x4
+
+typedef struct GEDState {
+ MemoryRegion evt;
+ MemoryRegion regs;
+ uint32_t sel;
+} GEDState;
+
+struct AcpiGedState {
+ SysBusDevice parent_obj;
+ MemHotplugState memhp_state;
+ MemoryRegion container_memhp;
+ GEDState ged_state;
+ uint32_t ged_event_bitmap;
+ qemu_irq irq;
+ AcpiGhesState ghes_state;
+};
+
+void build_ged_aml(Aml *table, const char* name, HotplugHandler *hotplug_dev,
+ uint32_t ged_irq, AmlRegionSpace rs, hwaddr ged_base);
+void acpi_dsdt_add_power_button(Aml *scope);
+
+#endif
diff --git a/include/hw/acpi/ghes.h b/include/hw/acpi/ghes.h
new file mode 100644
index 0000000000..674f6958e9
--- /dev/null
+++ b/include/hw/acpi/ghes.h
@@ -0,0 +1,84 @@
+/*
+ * Support for generating APEI tables and recording CPER for Guests
+ *
+ * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Author: Dongjiu Geng <gengdongjiu@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ACPI_GHES_H
+#define ACPI_GHES_H
+
+#include "hw/acpi/bios-linker-loader.h"
+
+/*
+ * Values for Hardware Error Notification Type field
+ */
+enum AcpiGhesNotifyType {
+ /* Polled */
+ ACPI_GHES_NOTIFY_POLLED = 0,
+ /* External Interrupt */
+ ACPI_GHES_NOTIFY_EXTERNAL = 1,
+ /* Local Interrupt */
+ ACPI_GHES_NOTIFY_LOCAL = 2,
+ /* SCI */
+ ACPI_GHES_NOTIFY_SCI = 3,
+ /* NMI */
+ ACPI_GHES_NOTIFY_NMI = 4,
+ /* CMCI, ACPI 5.0: 18.3.2.7, Table 18-290 */
+ ACPI_GHES_NOTIFY_CMCI = 5,
+ /* MCE, ACPI 5.0: 18.3.2.7, Table 18-290 */
+ ACPI_GHES_NOTIFY_MCE = 6,
+ /* GPIO-Signal, ACPI 6.0: 18.3.2.7, Table 18-332 */
+ ACPI_GHES_NOTIFY_GPIO = 7,
+ /* ARMv8 SEA, ACPI 6.1: 18.3.2.9, Table 18-345 */
+ ACPI_GHES_NOTIFY_SEA = 8,
+ /* ARMv8 SEI, ACPI 6.1: 18.3.2.9, Table 18-345 */
+ ACPI_GHES_NOTIFY_SEI = 9,
+ /* External Interrupt - GSIV, ACPI 6.1: 18.3.2.9, Table 18-345 */
+ ACPI_GHES_NOTIFY_GSIV = 10,
+ /* Software Delegated Exception, ACPI 6.2: 18.3.2.9, Table 18-383 */
+ ACPI_GHES_NOTIFY_SDEI = 11,
+ /* 12 and greater are reserved */
+ ACPI_GHES_NOTIFY_RESERVED = 12
+};
+
+enum {
+ ACPI_HEST_SRC_ID_SEA = 0,
+ /* future ids go here */
+ ACPI_HEST_SRC_ID_RESERVED,
+};
+
+typedef struct AcpiGhesState {
+ uint64_t ghes_addr_le;
+ bool present; /* True if GHES is present at all on this board */
+} AcpiGhesState;
+
+void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker);
+void acpi_build_hest(GArray *table_data, BIOSLinker *linker,
+ const char *oem_id, const char *oem_table_id);
+void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s,
+ GArray *hardware_errors);
+int acpi_ghes_record_errors(uint8_t notify, uint64_t error_physical_addr);
+
+/**
+ * acpi_ghes_present: Report whether ACPI GHES table is present
+ *
+ * Returns: true if the system has an ACPI GHES table and it is
+ * safe to call acpi_ghes_record_errors() to record a memory error.
+ */
+bool acpi_ghes_present(void);
+#endif
diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h
index 59aeb06393..2faf7f0cae 100644
--- a/include/hw/acpi/ich9.h
+++ b/include/hw/acpi/ich9.h
@@ -7,7 +7,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,9 +24,12 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/cpu_hotplug.h"
#include "hw/acpi/cpu.h"
+#include "hw/acpi/pcihp.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/acpi_dev_interface.h"
-#include "hw/acpi/tco.h"
+#include "hw/acpi/ich9_tco.h"
+
+#define ACPI_PCIHP_ADDR_ICH9 0x0cc0
typedef struct ICH9LPCPMRegs {
/*
@@ -53,33 +56,38 @@ typedef struct ICH9LPCPMRegs {
AcpiCpuHotplug gpe_cpu;
CPUHotplugState cpuhp_state;
+ bool keep_pci_slot_hpc;
+ bool use_acpi_hotplug_bridge;
+ AcpiPciHpState acpi_pci_hotplug;
MemHotplugState acpi_memory_hotplug;
uint8_t disable_s3;
uint8_t disable_s4;
uint8_t s4_val;
- uint8_t smm_enabled;
+ bool smm_enabled;
+ bool smm_compat;
bool enable_tco;
TCOIORegs tco_regs;
} ICH9LPCPMRegs;
#define ACPI_PM_PROP_TCO_ENABLED "enable_tco"
-void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
- bool smm_enabled,
- qemu_irq sci_irq);
+void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq);
void ich9_pm_iospace_update(ICH9LPCPMRegs *pm, uint32_t pm_io_base);
extern const VMStateDescription vmstate_ich9_pm;
-void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm, Error **errp);
+void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm);
+void ich9_pm_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp);
void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
void ich9_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
+bool ich9_pm_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus);
void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list);
#endif /* HW_ACPI_ICH9_H */
diff --git a/include/hw/acpi/tco.h b/include/hw/acpi/ich9_tco.h
index 52ad767ddd..2562a7cf39 100644
--- a/include/hw/acpi/tco.h
+++ b/include/hw/acpi/ich9_tco.h
@@ -1,15 +1,17 @@
/*
- * QEMU ICH9 TCO emulation
+ * QEMU ICH9 TCO emulation (total cost of ownership)
*
* Copyright (c) 2015 Paulo Alcantara <pcacjr@zytor.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
+
#ifndef HW_ACPI_TCO_H
#define HW_ACPI_TCO_H
-#include "qemu-common.h"
+#include "exec/memory.h"
+#include "migration/vmstate.h"
/* As per ICH9 spec, the internal timer has an error of ~0.6s on every tick */
#define TCO_TICK_NSEC 600000000LL
diff --git a/include/hw/acpi/ipmi.h b/include/hw/acpi/ipmi.h
index c38483565c..6c8079c97a 100644
--- a/include/hw/acpi/ipmi.h
+++ b/include/hw/acpi/ipmi.h
@@ -9,13 +9,8 @@
#ifndef HW_ACPI_IPMI_H
#define HW_ACPI_IPMI_H
-#include "hw/acpi/aml-build.h"
+#include "hw/acpi/acpi_aml_interface.h"
-/*
- * Add ACPI IPMI entries for all registered IPMI devices whose parent
- * bus matches the given bus. The resource is the ACPI resource that
- * contains the IPMI device, this is required for the I2C CRS.
- */
-void build_acpi_ipmi_devices(Aml *table, BusState *bus);
+void build_ipmi_dev_aml(AcpiDevAmlIf *adev, Aml *scope);
#endif /* HW_ACPI_IPMI_H */
diff --git a/include/hw/acpi/memory_hotplug.h b/include/hw/acpi/memory_hotplug.h
index 77c65765d6..38841d7b06 100644
--- a/include/hw/acpi/memory_hotplug.h
+++ b/include/hw/acpi/memory_hotplug.h
@@ -1,10 +1,15 @@
#ifndef QEMU_HW_ACPI_MEMORY_HOTPLUG_H
#define QEMU_HW_ACPI_MEMORY_HOTPLUG_H
+#include "qapi/qapi-types-acpi.h"
#include "hw/qdev-core.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
+#define MEMORY_SLOT_SCAN_METHOD "MSCN"
+#define MEMORY_DEVICES_CONTAINER "\\_SB.MHPC"
+#define MEMORY_HOTPLUG_IO_LEN 24
+
/**
* MemStatus:
* @is_removing: the memory device in slot has been requested to be ejected.
@@ -29,7 +34,7 @@ typedef struct MemHotplugState {
} MemHotplugState;
void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
- MemHotplugState *state, uint16_t io_base);
+ MemHotplugState *state, hwaddr io_base);
void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st,
DeviceState *dev, Error **errp);
@@ -48,5 +53,6 @@ void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list);
void build_memory_hotplug_aml(Aml *table, uint32_t nr_mem,
const char *res_root,
- const char *event_handler_method);
+ const char *event_handler_method,
+ AmlRegionSpace rs, hwaddr memhp_io_base);
#endif
diff --git a/include/hw/acpi/pc-hotplug.h b/include/hw/acpi/pc-hotplug.h
index 31bc9191c3..8a654248e9 100644
--- a/include/hw/acpi/pc-hotplug.h
+++ b/include/hw/acpi/pc-hotplug.h
@@ -13,7 +13,7 @@
#define PC_HOTPLUG_H
/*
- * ONLY DEFINEs are permited in this file since it's shared
+ * ONLY DEFINEs are permitted in this file since it's shared
* between C and ASL code.
*/
diff --git a/include/hw/acpi/pci.h b/include/hw/acpi/pci.h
new file mode 100644
index 0000000000..467a99461c
--- /dev/null
+++ b/include/hw/acpi/pci.h
@@ -0,0 +1,43 @@
+/*
+ * Support for generating PCI related ACPI tables and passing them to Guests
+ *
+ * Copyright (C) 2006 Fabrice Bellard
+ * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
+ * Copyright (C) 2013-2019 Red Hat Inc
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author: Wei Yang <richardw.yang@linux.intel.com>
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ACPI_PCI_H
+#define HW_ACPI_PCI_H
+
+#include "hw/acpi/bios-linker-loader.h"
+#include "hw/acpi/acpi_aml_interface.h"
+
+typedef struct AcpiMcfgInfo {
+ uint64_t base;
+ uint32_t size;
+} AcpiMcfgInfo;
+
+void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info,
+ const char *oem_id, const char *oem_table_id);
+Aml *aml_pci_device_dsm(void);
+
+void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus);
+void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope);
+#endif
diff --git a/include/hw/acpi/pcihp.h b/include/hw/acpi/pcihp.h
index 8bc4a4c01d..ac21a95913 100644
--- a/include/hw/acpi/pcihp.h
+++ b/include/hw/acpi/pcihp.h
@@ -10,7 +10,7 @@
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
- * License version 2 as published by the Free Software Foundation.
+ * License version 2.1 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -46,16 +46,19 @@ typedef struct AcpiPciHpPciStatus {
typedef struct AcpiPciHpState {
AcpiPciHpPciStatus acpi_pcihp_pci_status[ACPI_PCIHP_MAX_HOTPLUG_BUS];
uint32_t hotplug_select;
+ uint32_t acpi_index;
PCIBus *root;
MemoryRegion io;
- bool legacy_piix;
uint16_t io_base;
uint16_t io_len;
+ bool use_acpi_hotplug_bridge;
+ bool use_acpi_root_pci_hotplug;
} AcpiPciHpState;
void acpi_pcihp_init(Object *owner, AcpiPciHpState *, PCIBus *root,
- MemoryRegion *address_space_io, bool bridges_enabled);
+ MemoryRegion *io, uint16_t io_base);
+bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus);
void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
@@ -69,15 +72,19 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev,
/* Called on reset */
void acpi_pcihp_reset(AcpiPciHpState *s);
+void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus);
+
extern const VMStateDescription vmstate_acpi_pcihp_pci_status;
-#define VMSTATE_PCI_HOTPLUG(pcihp, state, test_pcihp) \
+#define VMSTATE_PCI_HOTPLUG(pcihp, state, test_pcihp, test_acpi_index) \
VMSTATE_UINT32_TEST(pcihp.hotplug_select, state, \
test_pcihp), \
VMSTATE_STRUCT_ARRAY_TEST(pcihp.acpi_pcihp_pci_status, state, \
ACPI_PCIHP_MAX_HOTPLUG_BUS, \
test_pcihp, 1, \
vmstate_acpi_pcihp_pci_status, \
- AcpiPciHpPciStatus)
+ AcpiPciHpPciStatus), \
+ VMSTATE_UINT32_TEST(pcihp.acpi_index, state, \
+ test_acpi_index)
#endif
diff --git a/include/hw/acpi/piix4.h b/include/hw/acpi/piix4.h
index 26c2370e30..eb1c122d80 100644
--- a/include/hw/acpi/piix4.h
+++ b/include/hw/acpi/piix4.h
@@ -1,6 +1,73 @@
+/*
+ * ACPI implementation
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
#ifndef HW_ACPI_PIIX4_H
#define HW_ACPI_PIIX4_H
-Object *piix4_pm_find(void);
+#include "hw/pci/pci_device.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/cpu_hotplug.h"
+#include "hw/acpi/memory_hotplug.h"
+#include "hw/acpi/pcihp.h"
+#include "hw/i2c/pm_smbus.h"
+#include "hw/isa/apm.h"
+
+#define TYPE_PIIX4_PM "PIIX4_PM"
+OBJECT_DECLARE_SIMPLE_TYPE(PIIX4PMState, PIIX4_PM)
+
+struct PIIX4PMState {
+ /*< private >*/
+ PCIDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion io;
+ uint32_t io_base;
+
+ MemoryRegion io_gpe;
+ ACPIREGS ar;
+
+ APMState apm;
+
+ PMSMBus smb;
+ uint32_t smb_io_base;
+
+ qemu_irq irq;
+ qemu_irq smi_irq;
+ bool smm_enabled;
+ bool smm_compat;
+ Notifier machine_ready;
+ Notifier powerdown_notifier;
+
+ AcpiPciHpState acpi_pci_hotplug;
+ bool not_migrate_acpi_index;
+
+ uint8_t disable_s3;
+ uint8_t disable_s4;
+ uint8_t s4_val;
+
+ bool cpu_hotplug_legacy;
+ AcpiCpuHotplug gpe_cpu;
+ CPUHotplugState cpuhp_state;
+
+ MemHotplugState acpi_memory_hotplug;
+};
#endif
diff --git a/include/hw/acpi/tpm.h b/include/hw/acpi/tpm.h
index 3580ffd50c..579c45f5ba 100644
--- a/include/hw/acpi/tpm.h
+++ b/include/hw/acpi/tpm.h
@@ -18,6 +18,10 @@
#include "qemu/units.h"
#include "hw/registerfields.h"
+#include "hw/acpi/aml-build.h"
+#include "sysemu/tpm.h"
+
+#ifdef CONFIG_TPM
#define TPM_TIS_ADDR_BASE 0xFED40000
#define TPM_TIS_ADDR_SIZE 0x5000
@@ -89,6 +93,7 @@
#define TPM_TIS_CAP_DATA_TRANSFER_64B (3 << 9)
#define TPM_TIS_CAP_DATA_TRANSFER_LEGACY (0 << 9)
#define TPM_TIS_CAP_BURST_COUNT_DYNAMIC (0 << 8)
+#define TPM_TIS_CAP_BURST_COUNT_STATIC (1 << 8)
#define TPM_TIS_CAP_INTERRUPT_LOW_LEVEL (1 << 4) /* support is mandatory */
#define TPM_TIS_CAPABILITIES_SUPPORTED1_3 \
(TPM_TIS_CAP_INTERRUPT_LOW_LEVEL | \
@@ -188,4 +193,65 @@ REG32(CRB_DATA_BUFFER, 0x80)
#define TPM2_START_METHOD_MMIO 6
#define TPM2_START_METHOD_CRB 7
+/*
+ * Physical Presence Interface
+ */
+#define TPM_PPI_ADDR_SIZE 0x400
+#define TPM_PPI_ADDR_BASE 0xFED45000
+
+#define TPM_PPI_VERSION_NONE 0
+#define TPM_PPI_VERSION_1_30 1
+
+/* whether function is blocked by BIOS settings; bits 0, 1, 2 */
+#define TPM_PPI_FUNC_NOT_IMPLEMENTED (0 << 0)
+#define TPM_PPI_FUNC_BIOS_ONLY (1 << 0)
+#define TPM_PPI_FUNC_BLOCKED (2 << 0)
+#define TPM_PPI_FUNC_ALLOWED_USR_REQ (3 << 0)
+#define TPM_PPI_FUNC_ALLOWED_USR_NOT_REQ (4 << 0)
+#define TPM_PPI_FUNC_MASK (7 << 0)
+
+/* TPM TIS I2C registers */
+#define TPM_I2C_REG_LOC_SEL 0x00
+#define TPM_I2C_REG_ACCESS 0x04
+#define TPM_I2C_REG_INT_ENABLE 0x08
+#define TPM_I2C_REG_INT_CAPABILITY 0x14
+#define TPM_I2C_REG_STS 0x18
+#define TPM_I2C_REG_DATA_FIFO 0x24
+#define TPM_I2C_REG_INTF_CAPABILITY 0x30
+#define TPM_I2C_REG_I2C_DEV_ADDRESS 0x38
+#define TPM_I2C_REG_DATA_CSUM_ENABLE 0x40
+#define TPM_I2C_REG_DATA_CSUM_GET 0x44
+#define TPM_I2C_REG_DID_VID 0x48
+#define TPM_I2C_REG_RID 0x4c
+#define TPM_I2C_REG_UNKNOWN 0xff
+
+/* I2C specific interface capabilities */
+#define TPM_I2C_CAP_INTERFACE_TYPE (0x2 << 0) /* FIFO interface */
+#define TPM_I2C_CAP_INTERFACE_VER (0x0 << 4) /* TCG I2C intf 1.0 */
+#define TPM_I2C_CAP_TPM2_FAMILY (0x1 << 7) /* TPM 2.0 family. */
+#define TPM_I2C_CAP_DEV_ADDR_CHANGE (0x0 << 27) /* No dev addr chng */
+#define TPM_I2C_CAP_BURST_COUNT_STATIC (0x1 << 29) /* Burst count static */
+#define TPM_I2C_CAP_LOCALITY_CAP (0x1 << 25) /* 0-5 locality */
+#define TPM_I2C_CAP_BUS_SPEED (3 << 21) /* std and fast mode */
+
+/*
+ * TPM_I2C_STS masks for read/writing bits from/to TIS
+ * TPM_STS mask for read bits 31:26 must be zero
+ */
+#define TPM_I2C_STS_READ_MASK 0x00ffffdd
+#define TPM_I2C_STS_WRITE_MASK 0x03000062
+
+/* Checksum enabled. */
+#define TPM_DATA_CSUM_ENABLED 0x1
+
+/*
+ * TPM_I2C_INT_ENABLE mask. Linux kernel does not support
+ * interrupts hence setting it to 0.
+ */
+#define TPM_I2C_INT_ENABLE_MASK 0x0
+
+void tpm_build_ppi_acpi(TPMIf *tpm, Aml *dev);
+
+#endif /* CONFIG_TPM */
+
#endif /* HW_ACPI_TPM_H */
diff --git a/include/hw/acpi/utils.h b/include/hw/acpi/utils.h
new file mode 100644
index 0000000000..0022df027d
--- /dev/null
+++ b/include/hw/acpi/utils.h
@@ -0,0 +1,8 @@
+#ifndef HW_ACPI_UTILS_H
+#define HW_ACPI_UTILS_H
+
+#include "hw/nvram/fw_cfg.h"
+
+MemoryRegion *acpi_add_rom_blob(FWCfgCallback update, void *opaque,
+ GArray *blob, const char *name);
+#endif
diff --git a/include/hw/acpi/vmgenid.h b/include/hw/acpi/vmgenid.h
index 38586ecbdf..fb135d5bcb 100644
--- a/include/hw/acpi/vmgenid.h
+++ b/include/hw/acpi/vmgenid.h
@@ -2,35 +2,36 @@
#define ACPI_VMGENID_H
#include "hw/acpi/bios-linker-loader.h"
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
#include "qemu/uuid.h"
+#include "qom/object.h"
-#define VMGENID_DEVICE "vmgenid"
+#define TYPE_VMGENID "vmgenid"
#define VMGENID_GUID "guid"
#define VMGENID_GUID_FW_CFG_FILE "etc/vmgenid_guid"
#define VMGENID_ADDR_FW_CFG_FILE "etc/vmgenid_addr"
#define VMGENID_FW_CFG_SIZE 4096 /* Occupy a page of memory */
#define VMGENID_GUID_OFFSET 40 /* allow space for
- * OVMF SDT Header Probe Supressor
+ * OVMF SDT Header Probe Suppressor
*/
-#define VMGENID(obj) OBJECT_CHECK(VmGenIdState, (obj), VMGENID_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(VmGenIdState, VMGENID)
-typedef struct VmGenIdState {
- DeviceClass parent_obj;
+struct VmGenIdState {
+ DeviceState parent_obj;
QemuUUID guid; /* The 128-bit GUID seen by the guest */
uint8_t vmgenid_addr_le[8]; /* Address of the GUID (little-endian) */
-} VmGenIdState;
+};
/* returns NULL unless there is exactly one device */
static inline Object *find_vmgenid_dev(void)
{
- return object_resolve_path_type("", VMGENID_DEVICE, NULL);
+ return object_resolve_path_type("", TYPE_VMGENID, NULL);
}
void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid,
- BIOSLinker *linker);
+ BIOSLinker *linker, const char *oem_id);
void vmgenid_add_fw_cfg(VmGenIdState *vms, FWCfgState *s, GArray *guid);
#endif
diff --git a/include/hw/adc/aspeed_adc.h b/include/hw/adc/aspeed_adc.h
new file mode 100644
index 0000000000..ff1d06ea91
--- /dev/null
+++ b/include/hw/adc/aspeed_adc.h
@@ -0,0 +1,56 @@
+/*
+ * Aspeed ADC
+ *
+ * Copyright 2017-2021 IBM Corp.
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_ADC_ASPEED_ADC_H
+#define HW_ADC_ASPEED_ADC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_ADC "aspeed.adc"
+#define TYPE_ASPEED_2400_ADC TYPE_ASPEED_ADC "-ast2400"
+#define TYPE_ASPEED_2500_ADC TYPE_ASPEED_ADC "-ast2500"
+#define TYPE_ASPEED_2600_ADC TYPE_ASPEED_ADC "-ast2600"
+#define TYPE_ASPEED_1030_ADC TYPE_ASPEED_ADC "-ast1030"
+OBJECT_DECLARE_TYPE(AspeedADCState, AspeedADCClass, ASPEED_ADC)
+
+#define TYPE_ASPEED_ADC_ENGINE "aspeed.adc.engine"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedADCEngineState, ASPEED_ADC_ENGINE)
+
+#define ASPEED_ADC_NR_CHANNELS 16
+#define ASPEED_ADC_NR_REGS (0xD0 >> 2)
+
+struct AspeedADCEngineState {
+ /* <private> */
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ qemu_irq irq;
+ uint32_t engine_id;
+ uint32_t nr_channels;
+ uint32_t regs[ASPEED_ADC_NR_REGS];
+};
+
+struct AspeedADCState {
+ /* <private> */
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ qemu_irq irq;
+
+ AspeedADCEngineState engines[2];
+};
+
+struct AspeedADCClass {
+ SysBusDeviceClass parent_class;
+
+ uint32_t nr_engines;
+};
+
+#endif /* HW_ADC_ASPEED_ADC_H */
diff --git a/include/hw/adc/max111x.h b/include/hw/adc/max111x.h
new file mode 100644
index 0000000000..beff59c815
--- /dev/null
+++ b/include/hw/adc/max111x.h
@@ -0,0 +1,56 @@
+/*
+ * Maxim MAX1110/1111 ADC chip emulation.
+ *
+ * Copyright (c) 2006 Openedhand Ltd.
+ * Written by Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * This code is licensed under the GNU GPLv2.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#ifndef HW_MISC_MAX111X_H
+#define HW_MISC_MAX111X_H
+
+#include "hw/ssi/ssi.h"
+#include "qom/object.h"
+
+/*
+ * This is a model of the Maxim MAX1110/1111 ADC chip, which for QEMU
+ * is an SSI slave device. It has either 4 (max1110) or 8 (max1111)
+ * 8-bit ADC channels.
+ *
+ * QEMU interface:
+ * + GPIO inputs 0..3 (for max1110) or 0..7 (for max1111): set the value
+ * of each ADC input, as an unsigned 8-bit value
+ * + GPIO output 0: interrupt line
+ * + Properties "input0" to "input3" (max1110) or "input0" to "input7"
+ * (max1111): initial reset values for ADC inputs.
+ *
+ * Known bugs:
+ * + the interrupt line is not correctly implemented, and will never
+ * be lowered once it has been asserted.
+ */
+struct MAX111xState {
+ SSIPeripheral parent_obj;
+
+ qemu_irq interrupt;
+ /* Values of inputs at system reset (settable by QOM property) */
+ uint8_t reset_input[8];
+
+ uint8_t tb1, rb2, rb3;
+ int cycle;
+
+ uint8_t input[8];
+ int inputs, com;
+};
+
+#define TYPE_MAX_111X "max111x"
+
+OBJECT_DECLARE_SIMPLE_TYPE(MAX111xState, MAX_111X)
+
+#define TYPE_MAX_1110 "max1110"
+#define TYPE_MAX_1111 "max1111"
+
+#endif
diff --git a/include/hw/adc/npcm7xx_adc.h b/include/hw/adc/npcm7xx_adc.h
new file mode 100644
index 0000000000..93330a408d
--- /dev/null
+++ b/include/hw/adc/npcm7xx_adc.h
@@ -0,0 +1,68 @@
+/*
+ * Nuvoton NPCM7xx ADC Module
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_ADC_H
+#define NPCM7XX_ADC_H
+
+#include "hw/clock.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+
+#define NPCM7XX_ADC_NUM_INPUTS 8
+/**
+ * This value should not be changed unless write_adc_calibration function in
+ * hw/arm/npcm7xx.c is also changed.
+ */
+#define NPCM7XX_ADC_NUM_CALIB 2
+
+/**
+ * struct NPCM7xxADCState - Analog to Digital Converter Module device state.
+ * @parent: System bus device.
+ * @iomem: Memory region through which registers are accessed.
+ * @conv_timer: The timer counts down remaining cycles for the conversion.
+ * @irq: GIC interrupt line to fire on expiration (if enabled).
+ * @con: The Control Register.
+ * @data: The Data Buffer.
+ * @clock: The ADC Clock.
+ * @adci: The input voltage in units of uV. 1uv = 1e-6V.
+ * @vref: The external reference voltage.
+ * @iref: The internal reference voltage, initialized at launch time.
+ * @rv: The calibrated output values of 0.5V and 1.5V for the ADC.
+ */
+struct NPCM7xxADCState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ QEMUTimer conv_timer;
+
+ qemu_irq irq;
+ uint32_t con;
+ uint32_t data;
+ Clock *clock;
+
+ /* Voltages are in unit of uV. 1V = 1000000uV. */
+ uint32_t adci[NPCM7XX_ADC_NUM_INPUTS];
+ uint32_t vref;
+ uint32_t iref;
+
+ uint16_t calibration_r_values[NPCM7XX_ADC_NUM_CALIB];
+};
+
+#define TYPE_NPCM7XX_ADC "npcm7xx-adc"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxADCState, NPCM7XX_ADC)
+
+#endif /* NPCM7XX_ADC_H */
diff --git a/include/hw/adc/stm32f2xx_adc.h b/include/hw/adc/stm32f2xx_adc.h
index a72f734eb1..42b48981f2 100644
--- a/include/hw/adc/stm32f2xx_adc.h
+++ b/include/hw/adc/stm32f2xx_adc.h
@@ -25,6 +25,9 @@
#ifndef HW_STM32F2XX_ADC_H
#define HW_STM32F2XX_ADC_H
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
#define ADC_SR 0x00
#define ADC_CR1 0x04
#define ADC_CR2 0x08
@@ -56,10 +59,9 @@
#define ADC_COMMON_ADDRESS 0x100
#define TYPE_STM32F2XX_ADC "stm32f2xx-adc"
-#define STM32F2XX_ADC(obj) \
- OBJECT_CHECK(STM32F2XXADCState, (obj), TYPE_STM32F2XX_ADC)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F2XXADCState, STM32F2XX_ADC)
-typedef struct {
+struct STM32F2XXADCState {
/* <private> */
SysBusDevice parent_obj;
@@ -82,6 +84,6 @@ typedef struct {
uint32_t adc_dr;
qemu_irq irq;
-} STM32F2XXADCState;
+};
#endif /* HW_STM32F2XX_ADC_H */
diff --git a/include/hw/misc/zynq-xadc.h b/include/hw/adc/zynq-xadc.h
index f1a410a376..c10cc4c379 100644
--- a/include/hw/misc/zynq-xadc.h
+++ b/include/hw/adc/zynq-xadc.h
@@ -16,17 +16,17 @@
#define ZYNQ_XADC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define ZYNQ_XADC_MMIO_SIZE 0x0020
#define ZYNQ_XADC_NUM_IO_REGS (ZYNQ_XADC_MMIO_SIZE / 4)
#define ZYNQ_XADC_NUM_ADC_REGS 128
#define ZYNQ_XADC_FIFO_DEPTH 15
-#define TYPE_ZYNQ_XADC "xlnx,zynq-xadc"
-#define ZYNQ_XADC(obj) \
- OBJECT_CHECK(ZynqXADCState, (obj), TYPE_ZYNQ_XADC)
+#define TYPE_ZYNQ_XADC "xlnx-zynq-xadc"
+OBJECT_DECLARE_SIMPLE_TYPE(ZynqXADCState, ZYNQ_XADC)
-typedef struct ZynqXADCState {
+struct ZynqXADCState {
/*< private >*/
SysBusDevice parent_obj;
@@ -39,8 +39,7 @@ typedef struct ZynqXADCState {
uint16_t xadc_dfifo[ZYNQ_XADC_FIFO_DEPTH];
uint16_t xadc_dfifo_entries;
- struct IRQState *qemu_irq;
-
-} ZynqXADCState;
+ qemu_irq irq;
+};
#endif /* ZYNQ_XADC_H */
diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h
index 389e128d0f..67a9a17b86 100644
--- a/include/hw/arm/allwinner-a10.h
+++ b/include/hw/arm/allwinner-a10.h
@@ -1,42 +1,70 @@
-#ifndef ALLWINNER_H_
+#ifndef HW_ARM_ALLWINNER_A10_H
+#define HW_ARM_ALLWINNER_A10_H
-#include "qemu-common.h"
-#include "qemu/error-report.h"
-#include "hw/char/serial.h"
-#include "hw/arm/arm.h"
#include "hw/timer/allwinner-a10-pit.h"
#include "hw/intc/allwinner-a10-pic.h"
#include "hw/net/allwinner_emac.h"
-#include "hw/ide/pci.h"
-#include "hw/ide/ahci.h"
+#include "hw/sd/allwinner-sdhost.h"
+#include "hw/ide/ahci-sysbus.h"
+#include "hw/usb/hcd-ohci.h"
+#include "hw/usb/hcd-ehci.h"
+#include "hw/rtc/allwinner-rtc.h"
+#include "hw/misc/allwinner-a10-ccm.h"
+#include "hw/misc/allwinner-a10-dramc.h"
+#include "hw/i2c/allwinner-i2c.h"
+#include "hw/watchdog/allwinner-wdt.h"
+#include "sysemu/block-backend.h"
-#include "sysemu/sysemu.h"
+#include "target/arm/cpu.h"
+#include "qom/object.h"
-#define AW_A10_PIC_REG_BASE 0x01c20400
-#define AW_A10_PIT_REG_BASE 0x01c20c00
-#define AW_A10_UART0_REG_BASE 0x01c28000
-#define AW_A10_EMAC_BASE 0x01c0b000
-#define AW_A10_SATA_BASE 0x01c18000
-
#define AW_A10_SDRAM_BASE 0x40000000
+#define AW_A10_NUM_USB 2
+
#define TYPE_AW_A10 "allwinner-a10"
-#define AW_A10(obj) OBJECT_CHECK(AwA10State, (obj), TYPE_AW_A10)
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10State, AW_A10)
-typedef struct AwA10State {
+struct AwA10State {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
ARMCPU cpu;
- qemu_irq irq[AW_A10_PIC_INT_NR];
+ AwA10ClockCtlState ccm;
+ AwA10DramControllerState dramc;
AwA10PITState timer;
AwA10PICState intc;
AwEmacState emac;
AllwinnerAHCIState sata;
+ AwSdHostState mmc0;
+ AWI2CState i2c0;
+ AwRtcState rtc;
+ AwWdtState wdt;
MemoryRegion sram_a;
-} AwA10State;
+ EHCISysBusState ehci[AW_A10_NUM_USB];
+ OHCISysBusState ohci[AW_A10_NUM_USB];
+};
+
+/**
+ * Emulate Boot ROM firmware setup functionality.
+ *
+ * A real Allwinner A10 SoC contains a Boot ROM
+ * which is the first code that runs right after
+ * the SoC is powered on. The Boot ROM is responsible
+ * for loading user code (e.g. a bootloader) from any
+ * of the supported external devices and writing the
+ * downloaded code to internal SRAM. After loading the SoC
+ * begins executing the code written to SRAM.
+ *
+ * This function emulates the Boot ROM by copying 32 KiB
+ * of data at offset 8 KiB from the given block device and writes it to
+ * the start of the first internal SRAM memory.
+ *
+ * @s: Allwinner A10 state object pointer
+ * @blk: Block backend device object pointer
+ */
+void allwinner_a10_bootrom_setup(AwA10State *s, BlockBackend *blk);
-#define ALLWINNER_H_
#endif
diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h
new file mode 100644
index 0000000000..24ba4e1bf4
--- /dev/null
+++ b/include/hw/arm/allwinner-h3.h
@@ -0,0 +1,172 @@
+/*
+ * Allwinner H3 System on Chip emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * The Allwinner H3 is a System on Chip containing four ARM Cortex-A7
+ * processor cores. Features and specifications include DDR2/DDR3 memory,
+ * SD/MMC storage cards, 10/100/1000Mbit Ethernet, USB 2.0, HDMI and
+ * various I/O modules.
+ *
+ * This implementation is based on the following datasheet:
+ *
+ * https://linux-sunxi.org/File:Allwinner_H3_Datasheet_V1.2.pdf
+ *
+ * The latest datasheet and more info can be found on the Linux Sunxi wiki:
+ *
+ * https://linux-sunxi.org/H3
+ */
+
+#ifndef HW_ARM_ALLWINNER_H3_H
+#define HW_ARM_ALLWINNER_H3_H
+
+#include "qom/object.h"
+#include "hw/timer/allwinner-a10-pit.h"
+#include "hw/intc/arm_gic.h"
+#include "hw/misc/allwinner-h3-ccu.h"
+#include "hw/misc/allwinner-cpucfg.h"
+#include "hw/misc/allwinner-h3-dramc.h"
+#include "hw/misc/allwinner-h3-sysctrl.h"
+#include "hw/misc/allwinner-sid.h"
+#include "hw/sd/allwinner-sdhost.h"
+#include "hw/net/allwinner-sun8i-emac.h"
+#include "hw/rtc/allwinner-rtc.h"
+#include "hw/i2c/allwinner-i2c.h"
+#include "hw/watchdog/allwinner-wdt.h"
+#include "target/arm/cpu.h"
+#include "sysemu/block-backend.h"
+
+/**
+ * Allwinner H3 device list
+ *
+ * This enumeration is can be used refer to a particular device in the
+ * Allwinner H3 SoC. For example, the physical memory base address for
+ * each device can be found in the AwH3State object in the memmap member
+ * using the device enum value as index.
+ *
+ * @see AwH3State
+ */
+enum {
+ AW_H3_DEV_SRAM_A1,
+ AW_H3_DEV_SRAM_A2,
+ AW_H3_DEV_SRAM_C,
+ AW_H3_DEV_SYSCTRL,
+ AW_H3_DEV_MMC0,
+ AW_H3_DEV_SID,
+ AW_H3_DEV_EHCI0,
+ AW_H3_DEV_OHCI0,
+ AW_H3_DEV_EHCI1,
+ AW_H3_DEV_OHCI1,
+ AW_H3_DEV_EHCI2,
+ AW_H3_DEV_OHCI2,
+ AW_H3_DEV_EHCI3,
+ AW_H3_DEV_OHCI3,
+ AW_H3_DEV_CCU,
+ AW_H3_DEV_PIT,
+ AW_H3_DEV_UART0,
+ AW_H3_DEV_UART1,
+ AW_H3_DEV_UART2,
+ AW_H3_DEV_UART3,
+ AW_H3_DEV_EMAC,
+ AW_H3_DEV_TWI0,
+ AW_H3_DEV_TWI1,
+ AW_H3_DEV_TWI2,
+ AW_H3_DEV_DRAMCOM,
+ AW_H3_DEV_DRAMCTL,
+ AW_H3_DEV_DRAMPHY,
+ AW_H3_DEV_GIC_DIST,
+ AW_H3_DEV_GIC_CPU,
+ AW_H3_DEV_GIC_HYP,
+ AW_H3_DEV_GIC_VCPU,
+ AW_H3_DEV_RTC,
+ AW_H3_DEV_CPUCFG,
+ AW_H3_DEV_R_TWI,
+ AW_H3_DEV_SDRAM,
+ AW_H3_DEV_WDT
+};
+
+/** Total number of CPU cores in the H3 SoC */
+#define AW_H3_NUM_CPUS (4)
+
+/**
+ * Allwinner H3 object model
+ * @{
+ */
+
+/** Object type for the Allwinner H3 SoC */
+#define TYPE_AW_H3 "allwinner-h3"
+
+/** Convert input object to Allwinner H3 state object */
+OBJECT_DECLARE_SIMPLE_TYPE(AwH3State, AW_H3)
+
+/** @} */
+
+/**
+ * Allwinner H3 object
+ *
+ * This struct contains the state of all the devices
+ * which are currently emulated by the H3 SoC code.
+ */
+struct AwH3State {
+ /*< private >*/
+ DeviceState parent_obj;
+ /*< public >*/
+
+ ARMCPU cpus[AW_H3_NUM_CPUS];
+ const hwaddr *memmap;
+ AwA10PITState timer;
+ AwH3ClockCtlState ccu;
+ AwCpuCfgState cpucfg;
+ AwH3DramCtlState dramc;
+ AwH3SysCtrlState sysctrl;
+ AwSidState sid;
+ AwSdHostState mmc0;
+ AWI2CState i2c0;
+ AWI2CState i2c1;
+ AWI2CState i2c2;
+ AWI2CState r_twi;
+ AwSun8iEmacState emac;
+ AwRtcState rtc;
+ AwWdtState wdt;
+ GICState gic;
+ MemoryRegion sram_a1;
+ MemoryRegion sram_a2;
+ MemoryRegion sram_c;
+};
+
+/**
+ * Emulate Boot ROM firmware setup functionality.
+ *
+ * A real Allwinner H3 SoC contains a Boot ROM
+ * which is the first code that runs right after
+ * the SoC is powered on. The Boot ROM is responsible
+ * for loading user code (e.g. a bootloader) from any
+ * of the supported external devices and writing the
+ * downloaded code to internal SRAM. After loading the SoC
+ * begins executing the code written to SRAM.
+ *
+ * This function emulates the Boot ROM by copying 32 KiB
+ * of data from the given block device and writes it to
+ * the start of the first internal SRAM memory.
+ *
+ * @s: Allwinner H3 state object pointer
+ * @blk: Block backend device object pointer
+ */
+void allwinner_h3_bootrom_setup(AwH3State *s, BlockBackend *blk);
+
+#endif /* HW_ARM_ALLWINNER_H3_H */
diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h
new file mode 100644
index 0000000000..614e74b7ed
--- /dev/null
+++ b/include/hw/arm/allwinner-r40.h
@@ -0,0 +1,157 @@
+/*
+ * Allwinner R40/A40i/T3 System on Chip emulation
+ *
+ * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ARM_ALLWINNER_R40_H
+#define HW_ARM_ALLWINNER_R40_H
+
+#include "qom/object.h"
+#include "hw/timer/allwinner-a10-pit.h"
+#include "hw/ide/ahci-sysbus.h"
+#include "hw/intc/arm_gic.h"
+#include "hw/sd/allwinner-sdhost.h"
+#include "hw/misc/allwinner-r40-ccu.h"
+#include "hw/misc/allwinner-r40-dramc.h"
+#include "hw/misc/allwinner-sramc.h"
+#include "hw/i2c/allwinner-i2c.h"
+#include "hw/net/allwinner_emac.h"
+#include "hw/net/allwinner-sun8i-emac.h"
+#include "hw/usb/hcd-ohci.h"
+#include "hw/usb/hcd-ehci.h"
+#include "hw/watchdog/allwinner-wdt.h"
+#include "target/arm/cpu.h"
+#include "sysemu/block-backend.h"
+
+enum {
+ AW_R40_DEV_SRAM_A1,
+ AW_R40_DEV_SRAM_A2,
+ AW_R40_DEV_SRAM_A3,
+ AW_R40_DEV_SRAM_A4,
+ AW_R40_DEV_SRAMC,
+ AW_R40_DEV_EMAC,
+ AW_R40_DEV_MMC0,
+ AW_R40_DEV_MMC1,
+ AW_R40_DEV_MMC2,
+ AW_R40_DEV_MMC3,
+ AW_R40_DEV_AHCI,
+ AW_R40_DEV_EHCI1,
+ AW_R40_DEV_OHCI1,
+ AW_R40_DEV_EHCI2,
+ AW_R40_DEV_OHCI2,
+ AW_R40_DEV_CCU,
+ AW_R40_DEV_PIT,
+ AW_R40_DEV_WDT,
+ AW_R40_DEV_UART0,
+ AW_R40_DEV_UART1,
+ AW_R40_DEV_UART2,
+ AW_R40_DEV_UART3,
+ AW_R40_DEV_UART4,
+ AW_R40_DEV_UART5,
+ AW_R40_DEV_UART6,
+ AW_R40_DEV_UART7,
+ AW_R40_DEV_TWI0,
+ AW_R40_DEV_GMAC,
+ AW_R40_DEV_GIC_DIST,
+ AW_R40_DEV_GIC_CPU,
+ AW_R40_DEV_GIC_HYP,
+ AW_R40_DEV_GIC_VCPU,
+ AW_R40_DEV_SDRAM,
+ AW_R40_DEV_DRAMCOM,
+ AW_R40_DEV_DRAMCTL,
+ AW_R40_DEV_DRAMPHY,
+};
+
+#define AW_R40_NUM_CPUS (4)
+
+/**
+ * Allwinner R40 object model
+ * @{
+ */
+
+/** Object type for the Allwinner R40 SoC */
+#define TYPE_AW_R40 "allwinner-r40"
+
+/** Convert input object to Allwinner R40 state object */
+OBJECT_DECLARE_SIMPLE_TYPE(AwR40State, AW_R40)
+
+/** @} */
+
+/**
+ * Allwinner R40 object
+ *
+ * This struct contains the state of all the devices
+ * which are currently emulated by the R40 SoC code.
+ */
+#define AW_R40_NUM_MMCS 4
+#define AW_R40_NUM_USB 2
+#define AW_R40_NUM_UARTS 8
+
+struct AwR40State {
+ /*< private >*/
+ DeviceState parent_obj;
+ /*< public >*/
+
+ /** Physical base address for start of RAM */
+ hwaddr ram_addr;
+
+ /** Total RAM size in megabytes */
+ uint32_t ram_size;
+
+ ARMCPU cpus[AW_R40_NUM_CPUS];
+ const hwaddr *memmap;
+ AwSRAMCState sramc;
+ AwA10PITState timer;
+ AwWdtState wdt;
+ AllwinnerAHCIState sata;
+ AwSdHostState mmc[AW_R40_NUM_MMCS];
+ EHCISysBusState ehci[AW_R40_NUM_USB];
+ OHCISysBusState ohci[AW_R40_NUM_USB];
+ AwR40ClockCtlState ccu;
+ AwR40DramCtlState dramc;
+ AWI2CState i2c0;
+ AwEmacState emac;
+ AwSun8iEmacState gmac;
+ GICState gic;
+ MemoryRegion sram_a1;
+ MemoryRegion sram_a2;
+ MemoryRegion sram_a3;
+ MemoryRegion sram_a4;
+};
+
+/**
+ * Emulate Boot ROM firmware setup functionality.
+ *
+ * A real Allwinner R40 SoC contains a Boot ROM
+ * which is the first code that runs right after
+ * the SoC is powered on. The Boot ROM is responsible
+ * for loading user code (e.g. a bootloader) from any
+ * of the supported external devices and writing the
+ * downloaded code to internal SRAM. After loading the SoC
+ * begins executing the code written to SRAM.
+ *
+ * This function emulates the Boot ROM by copying 32 KiB
+ * of data from the given block device and writes it to
+ * the start of the first internal SRAM memory.
+ *
+ * @s: Allwinner R40 state object pointer
+ * @blk: Block backend device object pointer
+ * @unit: the mmc control's unit
+ */
+bool allwinner_r40_bootrom_setup(AwR40State *s, BlockBackend *blk, int unit);
+
+#endif /* HW_ARM_ALLWINNER_R40_H */
diff --git a/include/hw/arm/armsse-version.h b/include/hw/arm/armsse-version.h
new file mode 100644
index 0000000000..60780fa984
--- /dev/null
+++ b/include/hw/arm/armsse-version.h
@@ -0,0 +1,42 @@
+/*
+ * ARM SSE (Subsystems for Embedded): IoTKit, SSE-200
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+#ifndef ARMSSE_VERSION_H
+#define ARMSSE_VERSION_H
+
+
+/*
+ * Define an enumeration of the possible values of the sse-version
+ * property implemented by various sub-devices of the SSE, and
+ * a validation function that checks that a valid value has been passed.
+ * These are arbitrary QEMU-internal values (nobody should be creating
+ * the sub-devices of the SSE except for the SSE object itself), but
+ * we pick obvious numbers for the benefit of people debugging with gdb.
+ */
+enum {
+ ARMSSE_IOTKIT = 0,
+ ARMSSE_SSE200 = 200,
+ ARMSSE_SSE300 = 300,
+};
+
+static inline bool armsse_version_valid(uint32_t sse_version)
+{
+ switch (sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#endif
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
new file mode 100644
index 0000000000..88b3b759c5
--- /dev/null
+++ b/include/hw/arm/armsse.h
@@ -0,0 +1,241 @@
+/*
+ * ARM SSE (Subsystems for Embedded): IoTKit, SSE-200
+ *
+ * Copyright (c) 2018 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the Arm "Subsystems for Embedded" family of
+ * hardware, which include the IoT Kit and the SSE-050, SSE-100 and
+ * SSE-200. Currently we model:
+ * - the Arm IoT Kit which is documented in
+ * https://developer.arm.com/documentation/ecm0601256/latest
+ * - the SSE-200 which is documented in
+ * https://developer.arm.com/documentation/101104/latest/
+ *
+ * The IoTKit contains:
+ * a Cortex-M33
+ * the IDAU
+ * some timers and watchdogs
+ * two peripheral protection controllers
+ * a memory protection controller
+ * a security controller
+ * a bus fabric which arranges that some parts of the address
+ * space are secure and non-secure aliases of each other
+ * The SSE-200 additionally contains:
+ * a second Cortex-M33
+ * two Message Handling Units (MHUs)
+ * an optional CryptoCell (which we do not model)
+ * more SRAM banks with associated MPCs
+ * multiple Power Policy Units (PPUs)
+ * a control interface for an icache for each CPU
+ * per-CPU identity and control register blocks
+ *
+ * QEMU interface:
+ * + Clock input "MAINCLK": clock for CPUs and most peripherals
+ * + Clock input "S32KCLK": slow 32KHz clock used for a few peripherals
+ * + QOM property "memory" is a MemoryRegion containing the devices provided
+ * by the board model.
+ * + QOM property "EXP_NUMIRQ" sets the number of expansion interrupts.
+ * (In hardware, the SSE-200 permits the number of expansion interrupts
+ * for the two CPUs to be configured separately, but we restrict it to
+ * being the same for both, to avoid having to have separate Property
+ * lists for different variants. This restriction can be relaxed later
+ * if necessary.)
+ * + QOM property "SRAM_ADDR_WIDTH" sets the number of bits used for the
+ * address of each SRAM bank (and thus the total amount of internal SRAM)
+ * + QOM property "init-svtor" sets the initial value of the CPU SVTOR register
+ * (where it expects to load the PC and SP from the vector table on reset)
+ * + QOM properties "CPU0_FPU", "CPU0_DSP", "CPU1_FPU" and "CPU1_DSP" which
+ * set whether the CPUs have the FPU and DSP features present. The default
+ * (matching the hardware) is that for CPU0 in an IoTKit and CPU1 in an
+ * SSE-200 both are present; CPU0 in an SSE-200 has neither.
+ * Since the IoTKit has only one CPU, it does not have the CPU1_* properties.
+ * + QOM properties "CPU0_MPU_NS", "CPU0_MPU_S", "CPU1_MPU_NS" and "CPU1_MPU_S"
+ * which set the number of MPU regions on the CPUs. If there is only one
+ * CPU the CPU1 properties are not present.
+ * + Named GPIO inputs "EXP_IRQ" 0..n are the expansion interrupts for CPU 0,
+ * which are wired to its NVIC lines 32 .. n+32
+ * + Named GPIO inputs "EXP_CPU1_IRQ" 0..n are the expansion interrupts for
+ * CPU 1, which are wired to its NVIC lines 32 .. n+32
+ * + sysbus MMIO region 0 is the "AHB Slave Expansion" which allows
+ * bus master devices in the board model to make transactions into
+ * all the devices and memory areas in the IoTKit
+ * Controlling up to 4 AHB expansion PPBs which a system using the IoTKit
+ * might provide:
+ * + named GPIO outputs apb_ppcexp{0,1,2,3}_nonsec[0..15]
+ * + named GPIO outputs apb_ppcexp{0,1,2,3}_ap[0..15]
+ * + named GPIO outputs apb_ppcexp{0,1,2,3}_irq_enable
+ * + named GPIO outputs apb_ppcexp{0,1,2,3}_irq_clear
+ * + named GPIO inputs apb_ppcexp{0,1,2,3}_irq_status
+ * Controlling each of the 4 expansion AHB PPCs which a system using the IoTKit
+ * might provide:
+ * + named GPIO outputs ahb_ppcexp{0,1,2,3}_nonsec[0..15]
+ * + named GPIO outputs ahb_ppcexp{0,1,2,3}_ap[0..15]
+ * + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_enable
+ * + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_clear
+ * + named GPIO inputs ahb_ppcexp{0,1,2,3}_irq_status
+ * Controlling each of the 16 expansion MPCs which a system using the IoTKit
+ * might provide:
+ * + named GPIO inputs mpcexp_status[0..15]
+ * Controlling each of the 16 expansion MSCs which a system using the IoTKit
+ * might provide:
+ * + named GPIO inputs mscexp_status[0..15]
+ * + named GPIO outputs mscexp_clear[0..15]
+ * + named GPIO outputs mscexp_ns[0..15]
+ */
+
+#ifndef ARMSSE_H
+#define ARMSSE_H
+
+#include "hw/sysbus.h"
+#include "hw/arm/armv7m.h"
+#include "hw/misc/iotkit-secctl.h"
+#include "hw/misc/tz-ppc.h"
+#include "hw/misc/tz-mpc.h"
+#include "hw/timer/cmsdk-apb-timer.h"
+#include "hw/timer/cmsdk-apb-dualtimer.h"
+#include "hw/timer/sse-counter.h"
+#include "hw/timer/sse-timer.h"
+#include "hw/watchdog/cmsdk-apb-watchdog.h"
+#include "hw/misc/iotkit-sysctl.h"
+#include "hw/misc/iotkit-sysinfo.h"
+#include "hw/misc/armsse-cpuid.h"
+#include "hw/misc/armsse-mhu.h"
+#include "hw/misc/armsse-cpu-pwrctrl.h"
+#include "hw/misc/unimp.h"
+#include "hw/or-irq.h"
+#include "hw/clock.h"
+#include "hw/core/split-irq.h"
+#include "hw/cpu/cluster.h"
+#include "qom/object.h"
+
+#define TYPE_ARM_SSE "arm-sse"
+OBJECT_DECLARE_TYPE(ARMSSE, ARMSSEClass,
+ ARM_SSE)
+
+/*
+ * These type names are for specific IoTKit subsystems; other than
+ * instantiating them, code using these devices should always handle
+ * them via the ARMSSE base class, so they have no IOTKIT() etc macros.
+ */
+#define TYPE_IOTKIT "iotkit"
+#define TYPE_SSE200 "sse-200"
+#define TYPE_SSE300 "sse-300"
+
+/* We have an IRQ splitter and an OR gate input for each external PPC
+ * and the 2 internal PPCs
+ */
+#define NUM_INTERNAL_PPCS 2
+#define NUM_EXTERNAL_PPCS (IOTS_NUM_AHB_EXP_PPC + IOTS_NUM_APB_EXP_PPC)
+#define NUM_PPCS (NUM_EXTERNAL_PPCS + NUM_INTERNAL_PPCS)
+
+#define MAX_SRAM_BANKS 4
+#if MAX_SRAM_BANKS > IOTS_NUM_MPC
+#error Too many SRAM banks
+#endif
+
+#define SSE_MAX_CPUS 2
+
+#define NUM_PPUS 8
+
+/* Number of CPU IRQs used by the SSE itself */
+#define NUM_SSE_IRQS 32
+
+struct ARMSSE {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ ARMv7MState armv7m[SSE_MAX_CPUS];
+ CPUClusterState cluster[SSE_MAX_CPUS];
+ IoTKitSecCtl secctl;
+ TZPPC apb_ppc[NUM_INTERNAL_PPCS];
+ TZMPC mpc[IOTS_NUM_MPC];
+ CMSDKAPBTimer timer[3];
+ OrIRQState ppc_irq_orgate;
+ SplitIRQ sec_resp_splitter;
+ SplitIRQ ppc_irq_splitter[NUM_PPCS];
+ SplitIRQ mpc_irq_splitter[IOTS_NUM_EXP_MPC + IOTS_NUM_MPC];
+ OrIRQState mpc_irq_orgate;
+ OrIRQState nmi_orgate;
+
+ SplitIRQ cpu_irq_splitter[NUM_SSE_IRQS];
+
+ CMSDKAPBDualTimer dualtimer;
+
+ CMSDKAPBWatchdog cmsdk_watchdog[3];
+
+ SSECounter sse_counter;
+ SSETimer sse_timer[4];
+
+ IoTKitSysCtl sysctl;
+ IoTKitSysCtl sysinfo;
+
+ ARMSSEMHU mhu[2];
+ UnimplementedDeviceState unimp[NUM_PPUS];
+ UnimplementedDeviceState cachectrl[SSE_MAX_CPUS];
+ UnimplementedDeviceState cpusecctrl[SSE_MAX_CPUS];
+
+ ARMSSECPUID cpuid[SSE_MAX_CPUS];
+
+ ARMSSECPUPwrCtrl cpu_pwrctrl[SSE_MAX_CPUS];
+
+ /*
+ * 'container' holds all devices seen by all CPUs.
+ * 'cpu_container[i]' is the view that CPU i has: this has the
+ * per-CPU devices of that CPU, plus as the background 'container'
+ * (or an alias of it, since we can only use it directly once).
+ * container_alias[i] is the alias of 'container' used by CPU i+1;
+ * CPU 0 can use 'container' directly.
+ */
+ MemoryRegion container;
+ MemoryRegion container_alias[SSE_MAX_CPUS - 1];
+ MemoryRegion cpu_container[SSE_MAX_CPUS];
+ MemoryRegion alias1;
+ MemoryRegion alias2;
+ MemoryRegion alias3[SSE_MAX_CPUS];
+ MemoryRegion sram[MAX_SRAM_BANKS];
+ MemoryRegion itcm;
+ MemoryRegion dtcm;
+
+ qemu_irq *exp_irqs[SSE_MAX_CPUS];
+ qemu_irq ppc0_irq;
+ qemu_irq ppc1_irq;
+ qemu_irq sec_resp_cfg;
+ qemu_irq sec_resp_cfg_in;
+ qemu_irq nsc_cfg_in;
+
+ qemu_irq irq_status_in[NUM_EXTERNAL_PPCS];
+ qemu_irq mpcexp_status_in[IOTS_NUM_EXP_MPC];
+
+ uint32_t nsccfg;
+
+ Clock *mainclk;
+ Clock *s32kclk;
+
+ /* Properties */
+ MemoryRegion *board_memory;
+ uint32_t exp_numirq;
+ uint32_t sram_addr_width;
+ uint32_t init_svtor;
+ uint32_t cpu_mpu_ns[SSE_MAX_CPUS];
+ uint32_t cpu_mpu_s[SSE_MAX_CPUS];
+ bool cpu_fpu[SSE_MAX_CPUS];
+ bool cpu_dsp[SSE_MAX_CPUS];
+};
+
+typedef struct ARMSSEInfo ARMSSEInfo;
+
+struct ARMSSEClass {
+ SysBusDeviceClass parent_class;
+ const ARMSSEInfo *info;
+};
+
+
+#endif
diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h
index 2ba24953b6..5c057ab2ec 100644
--- a/include/hw/arm/armv7m.h
+++ b/include/hw/arm/armv7m.h
@@ -12,12 +12,15 @@
#include "hw/sysbus.h"
#include "hw/intc/armv7m_nvic.h"
+#include "hw/misc/armv7m_ras.h"
#include "target/arm/idau.h"
+#include "qom/object.h"
+#include "hw/clock.h"
-#define TYPE_BITBAND "ARM,bitband-memory"
-#define BITBAND(obj) OBJECT_CHECK(BitBandState, (obj), TYPE_BITBAND)
+#define TYPE_BITBAND "ARM-bitband-memory"
+OBJECT_DECLARE_SIMPLE_TYPE(BitBandState, BITBAND)
-typedef struct {
+struct BitBandState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -26,37 +29,71 @@ typedef struct {
MemoryRegion iomem;
uint32_t base;
MemoryRegion *source_memory;
-} BitBandState;
+};
#define TYPE_ARMV7M "armv7m"
-#define ARMV7M(obj) OBJECT_CHECK(ARMv7MState, (obj), TYPE_ARMV7M)
+OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M)
#define ARMV7M_NUM_BITBANDS 2
/* ARMv7M container object.
* + Unnamed GPIO input lines: external IRQ lines for the NVIC
- * + Named GPIO output SYSRESETREQ: signalled for guest AIRCR.SYSRESETREQ
+ * + Named GPIO output SYSRESETREQ: signalled for guest AIRCR.SYSRESETREQ.
+ * If this GPIO is not wired up then the NVIC will default to performing
+ * a qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET).
* + Property "cpu-type": CPU type to instantiate
* + Property "num-irq": number of external IRQ lines
+ * + Property "num-prio-bits": number of priority bits in the NVIC
* + Property "memory": MemoryRegion defining the physical address space
* that CPU accesses see. (The NVIC, bitbanding and other CPU-internal
* devices will be automatically layered on top of this view.)
* + Property "idau": IDAU interface (forwarded to CPU object)
* + Property "init-svtor": secure VTOR reset value (forwarded to CPU object)
+ * + Property "init-nsvtor": non-secure VTOR reset value (forwarded to CPU object)
+ * + Property "vfp": enable VFP (forwarded to CPU object)
+ * + Property "dsp": enable DSP (forwarded to CPU object)
* + Property "enable-bitband": expose bitbanded IO
+ * + Property "mpu-ns-regions": number of Non-Secure MPU regions (forwarded
+ * to CPU object pmsav7-dregion property; default is whatever the default
+ * for the CPU is)
+ * + Property "mpu-s-regions": number of Secure MPU regions (default is
+ * whatever the default for the CPU is; must currently be set to the same
+ * value as mpu-ns-regions if the CPU implements the Security Extension)
+ * + Clock input "refclk" is the external reference clock for the systick timers
+ * + Clock input "cpuclk" is the main CPU clock
*/
-typedef struct ARMv7MState {
+struct ARMv7MState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
NVICState nvic;
BitBandState bitband[ARMV7M_NUM_BITBANDS];
ARMCPU *cpu;
+ ARMv7MRAS ras;
+ SysTickState systick[M_REG_NUM_BANKS];
/* MemoryRegion we pass to the CPU, with our devices layered on
* top of the ones the board provides in board_memory.
*/
MemoryRegion container;
+ /*
+ * MemoryRegion which passes the transaction to either the S or the
+ * NS systick device depending on the transaction attributes
+ */
+ MemoryRegion systickmem;
+ /*
+ * MemoryRegion which enforces the S/NS handling of the systick
+ * device NS alias region and passes the transaction to the
+ * NS systick device if appropriate.
+ */
+ MemoryRegion systick_ns_mem;
+ /* Ditto, for the sysregs region provided by the NVIC */
+ MemoryRegion sysreg_ns_mem;
+ /* MR providing default PPB behaviour */
+ MemoryRegion defaultmem;
+
+ Clock *refclk;
+ Clock *cpuclk;
/* Properties */
char *cpu_type;
@@ -64,7 +101,13 @@ typedef struct ARMv7MState {
MemoryRegion *board_memory;
Object *idau;
uint32_t init_svtor;
+ uint32_t init_nsvtor;
+ uint32_t mpu_ns_regions;
+ uint32_t mpu_s_regions;
bool enable_bitband;
-} ARMv7MState;
+ bool start_powered_off;
+ bool vfp;
+ bool dsp;
+};
#endif
diff --git a/include/hw/arm/aspeed.h b/include/hw/arm/aspeed.h
index 325c091d09..cbeacb214c 100644
--- a/include/hw/arm/aspeed.h
+++ b/include/hw/arm/aspeed.h
@@ -10,37 +10,36 @@
#define ARM_ASPEED_H
#include "hw/boards.h"
+#include "qom/object.h"
-typedef struct AspeedBoardState AspeedBoardState;
+typedef struct AspeedMachineState AspeedMachineState;
+
+#define TYPE_ASPEED_MACHINE MACHINE_TYPE_NAME("aspeed")
+typedef struct AspeedMachineClass AspeedMachineClass;
+DECLARE_OBJ_CHECKERS(AspeedMachineState, AspeedMachineClass,
+ ASPEED_MACHINE, TYPE_ASPEED_MACHINE)
+
+#define ASPEED_MAC0_ON (1 << 0)
+#define ASPEED_MAC1_ON (1 << 1)
+#define ASPEED_MAC2_ON (1 << 2)
+#define ASPEED_MAC3_ON (1 << 3)
+
+
+struct AspeedMachineClass {
+ MachineClass parent_obj;
-typedef struct AspeedBoardConfig {
const char *name;
const char *desc;
const char *soc_name;
uint32_t hw_strap1;
+ uint32_t hw_strap2;
const char *fmc_model;
const char *spi_model;
uint32_t num_cs;
- void (*i2c_init)(AspeedBoardState *bmc);
-} AspeedBoardConfig;
-
-#define TYPE_ASPEED_MACHINE MACHINE_TYPE_NAME("aspeed")
-#define ASPEED_MACHINE(obj) \
- OBJECT_CHECK(AspeedMachine, (obj), TYPE_ASPEED_MACHINE)
-
-typedef struct AspeedMachine {
- MachineState parent_obj;
-} AspeedMachine;
-
-#define ASPEED_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedMachineClass, (klass), TYPE_ASPEED_MACHINE)
-#define ASPEED_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedMachineClass, (obj), TYPE_ASPEED_MACHINE)
-
-typedef struct AspeedMachineClass {
- MachineClass parent_obj;
- const AspeedBoardConfig *board;
-} AspeedMachineClass;
+ uint32_t macs_mask;
+ void (*i2c_init)(AspeedMachineState *bmc);
+ uint32_t uart_default;
+};
#endif
diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h
index 11ec0179db..c60fac900a 100644
--- a/include/hw/arm/aspeed_soc.h
+++ b/include/hw/arm/aspeed_soc.h
@@ -12,61 +12,242 @@
#ifndef ASPEED_SOC_H
#define ASPEED_SOC_H
-#include "hw/arm/arm.h"
+#include "hw/cpu/a15mpcore.h"
+#include "hw/arm/armv7m.h"
#include "hw/intc/aspeed_vic.h"
#include "hw/misc/aspeed_scu.h"
+#include "hw/adc/aspeed_adc.h"
#include "hw/misc/aspeed_sdmc.h"
+#include "hw/misc/aspeed_xdma.h"
#include "hw/timer/aspeed_timer.h"
+#include "hw/rtc/aspeed_rtc.h"
#include "hw/i2c/aspeed_i2c.h"
+#include "hw/misc/aspeed_i3c.h"
#include "hw/ssi/aspeed_smc.h"
+#include "hw/misc/aspeed_hace.h"
+#include "hw/misc/aspeed_sbc.h"
#include "hw/watchdog/wdt_aspeed.h"
#include "hw/net/ftgmac100.h"
+#include "target/arm/cpu.h"
+#include "hw/gpio/aspeed_gpio.h"
+#include "hw/sd/aspeed_sdhci.h"
+#include "hw/usb/hcd-ehci.h"
+#include "qom/object.h"
+#include "hw/misc/aspeed_lpc.h"
+#include "hw/misc/unimp.h"
+#include "hw/misc/aspeed_peci.h"
+#include "hw/fsi/aspeed_apb2opb.h"
+#include "hw/char/serial.h"
#define ASPEED_SPIS_NUM 2
-#define ASPEED_WDTS_NUM 3
+#define ASPEED_EHCIS_NUM 2
+#define ASPEED_WDTS_NUM 4
+#define ASPEED_CPUS_NUM 2
+#define ASPEED_MACS_NUM 4
+#define ASPEED_UARTS_NUM 13
+#define ASPEED_JTAG_NUM 2
-typedef struct AspeedSoCState {
- /*< private >*/
+struct AspeedSoCState {
DeviceState parent;
- /*< public >*/
- ARMCPU cpu;
+ MemoryRegion *memory;
+ MemoryRegion *dram_mr;
+ MemoryRegion dram_container;
MemoryRegion sram;
- AspeedVICState vic;
+ MemoryRegion spi_boot_container;
+ MemoryRegion spi_boot;
+ AspeedRtcState rtc;
AspeedTimerCtrlState timerctrl;
AspeedI2CState i2c;
+ AspeedI3CState i3c;
AspeedSCUState scu;
+ AspeedHACEState hace;
+ AspeedXDMAState xdma;
+ AspeedADCState adc;
AspeedSMCState fmc;
AspeedSMCState spi[ASPEED_SPIS_NUM];
+ EHCISysBusState ehci[ASPEED_EHCIS_NUM];
+ AspeedSBCState sbc;
+ MemoryRegion secsram;
+ UnimplementedDeviceState sbc_unimplemented;
AspeedSDMCState sdmc;
AspeedWDTState wdt[ASPEED_WDTS_NUM];
- FTGMAC100State ftgmac100;
-} AspeedSoCState;
+ FTGMAC100State ftgmac100[ASPEED_MACS_NUM];
+ AspeedMiiState mii[ASPEED_MACS_NUM];
+ AspeedGPIOState gpio;
+ AspeedGPIOState gpio_1_8v;
+ AspeedSDHCIState sdhci;
+ AspeedSDHCIState emmc;
+ AspeedLPCState lpc;
+ AspeedPECIState peci;
+ SerialMM uart[ASPEED_UARTS_NUM];
+ Clock *sysclk;
+ UnimplementedDeviceState iomem;
+ UnimplementedDeviceState video;
+ UnimplementedDeviceState emmc_boot_controller;
+ UnimplementedDeviceState dpmcu;
+ UnimplementedDeviceState pwm;
+ UnimplementedDeviceState espi;
+ UnimplementedDeviceState udc;
+ UnimplementedDeviceState sgpiom;
+ UnimplementedDeviceState jtag[ASPEED_JTAG_NUM];
+ AspeedAPB2OPBState fsi[2];
+};
#define TYPE_ASPEED_SOC "aspeed-soc"
-#define ASPEED_SOC(obj) OBJECT_CHECK(AspeedSoCState, (obj), TYPE_ASPEED_SOC)
+OBJECT_DECLARE_TYPE(AspeedSoCState, AspeedSoCClass, ASPEED_SOC)
+
+struct Aspeed2400SoCState {
+ AspeedSoCState parent;
+
+ ARMCPU cpu[ASPEED_CPUS_NUM];
+ AspeedVICState vic;
+};
+
+#define TYPE_ASPEED2400_SOC "aspeed2400-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed2400SoCState, ASPEED2400_SOC)
+
+struct Aspeed2600SoCState {
+ AspeedSoCState parent;
+
+ A15MPPrivState a7mpcore;
+ ARMCPU cpu[ASPEED_CPUS_NUM]; /* XXX belong to a7mpcore */
+};
+
+#define TYPE_ASPEED2600_SOC "aspeed2600-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed2600SoCState, ASPEED2600_SOC)
+
+struct Aspeed10x0SoCState {
+ AspeedSoCState parent;
+
+ ARMv7MState armv7m;
+};
+
+#define TYPE_ASPEED10X0_SOC "aspeed10x0-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed10x0SoCState, ASPEED10X0_SOC)
+
+struct AspeedSoCClass {
+ DeviceClass parent_class;
-typedef struct AspeedSoCInfo {
const char *name;
- const char *cpu_type;
+ /** valid_cpu_types: NULL terminated array of a single CPU type. */
+ const char * const *valid_cpu_types;
uint32_t silicon_rev;
- hwaddr sdram_base;
uint64_t sram_size;
+ uint64_t secsram_size;
int spis_num;
- const hwaddr *spi_bases;
- const char *fmc_typename;
- const char **spi_typename;
+ int ehcis_num;
int wdts_num;
-} AspeedSoCInfo;
+ int macs_num;
+ int uarts_num;
+ int uarts_base;
+ const int *irqmap;
+ const hwaddr *memmap;
+ uint32_t num_cpus;
+ qemu_irq (*get_irq)(AspeedSoCState *s, int dev);
+};
-typedef struct AspeedSoCClass {
- DeviceClass parent_class;
- AspeedSoCInfo *info;
-} AspeedSoCClass;
+const char *aspeed_soc_cpu_type(AspeedSoCClass *sc);
+
+enum {
+ ASPEED_DEV_SPI_BOOT,
+ ASPEED_DEV_IOMEM,
+ ASPEED_DEV_UART0,
+ ASPEED_DEV_UART1,
+ ASPEED_DEV_UART2,
+ ASPEED_DEV_UART3,
+ ASPEED_DEV_UART4,
+ ASPEED_DEV_UART5,
+ ASPEED_DEV_UART6,
+ ASPEED_DEV_UART7,
+ ASPEED_DEV_UART8,
+ ASPEED_DEV_UART9,
+ ASPEED_DEV_UART10,
+ ASPEED_DEV_UART11,
+ ASPEED_DEV_UART12,
+ ASPEED_DEV_UART13,
+ ASPEED_DEV_VUART,
+ ASPEED_DEV_FMC,
+ ASPEED_DEV_SPI1,
+ ASPEED_DEV_SPI2,
+ ASPEED_DEV_EHCI1,
+ ASPEED_DEV_EHCI2,
+ ASPEED_DEV_VIC,
+ ASPEED_DEV_SDMC,
+ ASPEED_DEV_SCU,
+ ASPEED_DEV_ADC,
+ ASPEED_DEV_SBC,
+ ASPEED_DEV_SECSRAM,
+ ASPEED_DEV_EMMC_BC,
+ ASPEED_DEV_VIDEO,
+ ASPEED_DEV_SRAM,
+ ASPEED_DEV_SDHCI,
+ ASPEED_DEV_GPIO,
+ ASPEED_DEV_GPIO_1_8V,
+ ASPEED_DEV_RTC,
+ ASPEED_DEV_TIMER1,
+ ASPEED_DEV_TIMER2,
+ ASPEED_DEV_TIMER3,
+ ASPEED_DEV_TIMER4,
+ ASPEED_DEV_TIMER5,
+ ASPEED_DEV_TIMER6,
+ ASPEED_DEV_TIMER7,
+ ASPEED_DEV_TIMER8,
+ ASPEED_DEV_WDT,
+ ASPEED_DEV_PWM,
+ ASPEED_DEV_LPC,
+ ASPEED_DEV_IBT,
+ ASPEED_DEV_I2C,
+ ASPEED_DEV_PECI,
+ ASPEED_DEV_ETH1,
+ ASPEED_DEV_ETH2,
+ ASPEED_DEV_ETH3,
+ ASPEED_DEV_ETH4,
+ ASPEED_DEV_MII1,
+ ASPEED_DEV_MII2,
+ ASPEED_DEV_MII3,
+ ASPEED_DEV_MII4,
+ ASPEED_DEV_SDRAM,
+ ASPEED_DEV_XDMA,
+ ASPEED_DEV_EMMC,
+ ASPEED_DEV_KCS,
+ ASPEED_DEV_HACE,
+ ASPEED_DEV_DPMCU,
+ ASPEED_DEV_DP,
+ ASPEED_DEV_I3C,
+ ASPEED_DEV_ESPI,
+ ASPEED_DEV_UDC,
+ ASPEED_DEV_SGPIOM,
+ ASPEED_DEV_JTAG0,
+ ASPEED_DEV_JTAG1,
+ ASPEED_DEV_FSI1,
+ ASPEED_DEV_FSI2,
+};
+
+qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev);
+bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp);
+void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr);
+bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp);
+void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr);
+void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev,
+ const char *name, hwaddr addr,
+ uint64_t size);
+void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
+ unsigned int count, int unit0);
+
+static inline int aspeed_uart_index(int uart_dev)
+{
+ return uart_dev - ASPEED_DEV_UART0;
+}
+
+static inline int aspeed_uart_first(AspeedSoCClass *sc)
+{
+ return aspeed_uart_index(sc->uarts_base);
+}
-#define ASPEED_SOC_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedSoCClass, (klass), TYPE_ASPEED_SOC)
-#define ASPEED_SOC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedSoCClass, (obj), TYPE_ASPEED_SOC)
+static inline int aspeed_uart_last(AspeedSoCClass *sc)
+{
+ return aspeed_uart_first(sc) + sc->uarts_num - 1;
+}
#endif /* ASPEED_SOC_H */
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
index f5b193f670..636203baa5 100644
--- a/include/hw/arm/bcm2835_peripherals.h
+++ b/include/hw/arm/bcm2835_peripherals.h
@@ -5,30 +5,44 @@
* Rasperry Pi 2 emulation and refactoring Copyright (c) 2015, Microsoft
* Written by Andrew Baumann
*
- * This code is licensed under the GNU GPLv2 and later.
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2835_PERIPHERALS_H
#define BCM2835_PERIPHERALS_H
-#include "qemu-common.h"
#include "hw/sysbus.h"
+#include "hw/char/pl011.h"
#include "hw/char/bcm2835_aux.h"
#include "hw/display/bcm2835_fb.h"
#include "hw/dma/bcm2835_dma.h"
+#include "hw/or-irq.h"
#include "hw/intc/bcm2835_ic.h"
#include "hw/misc/bcm2835_property.h"
#include "hw/misc/bcm2835_rng.h"
#include "hw/misc/bcm2835_mbox.h"
+#include "hw/misc/bcm2835_mphi.h"
+#include "hw/misc/bcm2835_thermal.h"
+#include "hw/misc/bcm2835_cprman.h"
+#include "hw/misc/bcm2835_powermgt.h"
#include "hw/sd/sdhci.h"
#include "hw/sd/bcm2835_sdhost.h"
#include "hw/gpio/bcm2835_gpio.h"
+#include "hw/timer/bcm2835_systmr.h"
+#include "hw/usb/hcd-dwc2.h"
+#include "hw/ssi/bcm2835_spi.h"
+#include "hw/i2c/bcm2835_i2c.h"
+#include "hw/misc/unimp.h"
+#include "qom/object.h"
+#define TYPE_BCM_SOC_PERIPHERALS_BASE "bcm-soc-peripherals-base"
+OBJECT_DECLARE_TYPE(BCMSocPeripheralBaseState, BCMSocPeripheralBaseClass,
+ BCM_SOC_PERIPHERALS_BASE)
#define TYPE_BCM2835_PERIPHERALS "bcm2835-peripherals"
-#define BCM2835_PERIPHERALS(obj) \
- OBJECT_CHECK(BCM2835PeripheralState, (obj), TYPE_BCM2835_PERIPHERALS)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PeripheralState, BCM2835_PERIPHERALS)
-typedef struct BCM2835PeripheralState {
+struct BCMSocPeripheralBaseState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -37,17 +51,55 @@ typedef struct BCM2835PeripheralState {
MemoryRegion ram_alias[4];
qemu_irq irq, fiq;
- SysBusDevice *uart0;
+ BCM2835SystemTimerState systmr;
+ BCM2835MphiState mphi;
+ UnimplementedDeviceState txp;
+ UnimplementedDeviceState armtmr;
+ BCM2835PowerMgtState powermgt;
+ BCM2835CprmanState cprman;
+ PL011State uart0;
BCM2835AuxState aux;
BCM2835FBState fb;
BCM2835DMAState dma;
+ OrIRQState orgated_dma_irq;
BCM2835ICState ic;
BCM2835PropertyState property;
- BCM2835RngState rng;
BCM2835MboxState mboxes;
SDHCIState sdhci;
BCM2835SDHostState sdhost;
+ UnimplementedDeviceState i2s;
+ BCM2835SPIState spi[1];
+ BCM2835I2CState i2c[3];
+ OrIRQState orgated_i2c_irq;
+ UnimplementedDeviceState otp;
+ UnimplementedDeviceState dbus;
+ UnimplementedDeviceState ave0;
+ UnimplementedDeviceState v3d;
+ UnimplementedDeviceState bscsl;
+ UnimplementedDeviceState smi;
+ DWC2State dwc2;
+ UnimplementedDeviceState sdramc;
+};
+
+struct BCMSocPeripheralBaseClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+ /*< public >*/
+ uint64_t peri_size; /* Peripheral range size */
+};
+
+struct BCM2835PeripheralState {
+ /*< private >*/
+ BCMSocPeripheralBaseState parent_obj;
+ /*< public >*/
+ BCM2835RngState rng;
+ Bcm2835ThermalState thermal;
BCM2835GpioState gpio;
-} BCM2835PeripheralState;
+};
+
+void create_unimp(BCMSocPeripheralBaseState *ps,
+ UnimplementedDeviceState *uds,
+ const char *name, hwaddr ofs, hwaddr size);
+void bcm_soc_peripherals_common_realize(DeviceState *dev, Error **errp);
#endif /* BCM2835_PERIPHERALS_H */
diff --git a/include/hw/arm/bcm2836.h b/include/hw/arm/bcm2836.h
index 93248399ba..918fb3bf14 100644
--- a/include/hw/arm/bcm2836.h
+++ b/include/hw/arm/bcm2836.h
@@ -5,18 +5,22 @@
* Rasperry Pi 2 emulation and refactoring Copyright (c) 2015, Microsoft
* Written by Andrew Baumann
*
- * This code is licensed under the GNU GPLv2 and later.
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2836_H
#define BCM2836_H
-#include "hw/arm/arm.h"
#include "hw/arm/bcm2835_peripherals.h"
#include "hw/intc/bcm2836_control.h"
+#include "target/arm/cpu.h"
+#include "qom/object.h"
+#define TYPE_BCM283X_BASE "bcm283x-base"
+OBJECT_DECLARE_TYPE(BCM283XBaseState, BCM283XBaseClass, BCM283X_BASE)
#define TYPE_BCM283X "bcm283x"
-#define BCM283X(obj) OBJECT_CHECK(BCM283XState, (obj), TYPE_BCM283X)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM283XState, BCM283X)
#define BCM283X_NCPUS 4
@@ -24,32 +28,43 @@
* them, code using these devices should always handle them via the
* BCM283x base class, so they have no BCM2836(obj) etc macros.
*/
+#define TYPE_BCM2835 "bcm2835"
#define TYPE_BCM2836 "bcm2836"
#define TYPE_BCM2837 "bcm2837"
-typedef struct BCM283XState {
+struct BCM283XBaseState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
- char *cpu_type;
uint32_t enabled_cpus;
- ARMCPU cpus[BCM283X_NCPUS];
+ struct {
+ ARMCPU core;
+ } cpu[BCM283X_NCPUS];
BCM2836ControlState control;
- BCM2835PeripheralState peripherals;
-} BCM283XState;
-
-typedef struct BCM283XInfo BCM283XInfo;
+};
-typedef struct BCM283XClass {
+struct BCM283XBaseClass {
+ /*< private >*/
DeviceClass parent_class;
- const BCM283XInfo *info;
-} BCM283XClass;
+ /*< public >*/
+ const char *name;
+ const char *cpu_type;
+ unsigned core_count;
+ hwaddr peri_base; /* Peripheral base address seen by the CPU */
+ hwaddr ctrl_base; /* Interrupt controller and mailboxes etc. */
+ int clusterid;
+};
+
+struct BCM283XState {
+ /*< private >*/
+ BCM283XBaseState parent_obj;
+ /*< public >*/
+ BCM2835PeripheralState peripherals;
+};
-#define BCM283X_CLASS(klass) \
- OBJECT_CLASS_CHECK(BCM283XClass, (klass), TYPE_BCM283X)
-#define BCM283X_GET_CLASS(obj) \
- OBJECT_GET_CLASS(BCM283XClass, (obj), TYPE_BCM283X)
+bool bcm283x_common_realize(DeviceState *dev, BCMSocPeripheralBaseState *ps,
+ Error **errp);
#endif /* BCM2836_H */
diff --git a/include/hw/arm/bcm2838.h b/include/hw/arm/bcm2838.h
new file mode 100644
index 0000000000..e53c7bedf9
--- /dev/null
+++ b/include/hw/arm/bcm2838.h
@@ -0,0 +1,31 @@
+/*
+ * BCM2838 SoC emulation
+ *
+ * Copyright (C) 2022 Ovchinnikov Vitalii <vitalii.ovchinnikov@auriga.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef BCM2838_H
+#define BCM2838_H
+
+#include "hw/arm/bcm2836.h"
+#include "hw/intc/arm_gic.h"
+#include "hw/arm/bcm2838_peripherals.h"
+
+#define BCM2838_PERI_LOW_BASE 0xfc000000
+#define BCM2838_GIC_BASE 0x40000
+
+#define TYPE_BCM2838 "bcm2838"
+
+OBJECT_DECLARE_TYPE(BCM2838State, BCM2838Class, BCM2838)
+
+struct BCM2838State {
+ /*< private >*/
+ BCM283XBaseState parent_obj;
+ /*< public >*/
+ BCM2838PeripheralState peripherals;
+ GICState gic;
+};
+
+#endif /* BCM2838_H */
diff --git a/include/hw/arm/bcm2838_peripherals.h b/include/hw/arm/bcm2838_peripherals.h
new file mode 100644
index 0000000000..7ee1bd066f
--- /dev/null
+++ b/include/hw/arm/bcm2838_peripherals.h
@@ -0,0 +1,84 @@
+/*
+ * BCM2838 peripherals emulation
+ *
+ * Copyright (C) 2022 Ovchinnikov Vitalii <vitalii.ovchinnikov@auriga.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef BCM2838_PERIPHERALS_H
+#define BCM2838_PERIPHERALS_H
+
+#include "hw/arm/bcm2835_peripherals.h"
+#include "hw/sd/sdhci.h"
+#include "hw/gpio/bcm2838_gpio.h"
+
+/* SPI */
+#define GIC_SPI_INTERRUPT_MBOX 33
+#define GIC_SPI_INTERRUPT_MPHI 40
+#define GIC_SPI_INTERRUPT_DWC2 73
+#define GIC_SPI_INTERRUPT_DMA_0 80
+#define GIC_SPI_INTERRUPT_DMA_6 86
+#define GIC_SPI_INTERRUPT_DMA_7_8 87
+#define GIC_SPI_INTERRUPT_DMA_9_10 88
+#define GIC_SPI_INTERRUPT_AUX_UART1 93
+#define GIC_SPI_INTERRUPT_SDHOST 120
+#define GIC_SPI_INTERRUPT_UART0 121
+#define GIC_SPI_INTERRUPT_RNG200 125
+#define GIC_SPI_INTERRUPT_EMMC_EMMC2 126
+#define GIC_SPI_INTERRUPT_PCI_INT_A 143
+#define GIC_SPI_INTERRUPT_GENET_A 157
+#define GIC_SPI_INTERRUPT_GENET_B 158
+
+
+/* GPU (legacy) DMA interrupts */
+#define GPU_INTERRUPT_DMA0 16
+#define GPU_INTERRUPT_DMA1 17
+#define GPU_INTERRUPT_DMA2 18
+#define GPU_INTERRUPT_DMA3 19
+#define GPU_INTERRUPT_DMA4 20
+#define GPU_INTERRUPT_DMA5 21
+#define GPU_INTERRUPT_DMA6 22
+#define GPU_INTERRUPT_DMA7_8 23
+#define GPU_INTERRUPT_DMA9_10 24
+#define GPU_INTERRUPT_DMA11 25
+#define GPU_INTERRUPT_DMA12 26
+#define GPU_INTERRUPT_DMA13 27
+#define GPU_INTERRUPT_DMA14 28
+#define GPU_INTERRUPT_DMA15 31
+
+#define BCM2838_MPHI_OFFSET 0xb200
+#define BCM2838_MPHI_SIZE 0x200
+
+#define TYPE_BCM2838_PERIPHERALS "bcm2838-peripherals"
+OBJECT_DECLARE_TYPE(BCM2838PeripheralState, BCM2838PeripheralClass,
+ BCM2838_PERIPHERALS)
+
+struct BCM2838PeripheralState {
+ /*< private >*/
+ BCMSocPeripheralBaseState parent_obj;
+
+ /*< public >*/
+ MemoryRegion peri_low_mr;
+ MemoryRegion peri_low_mr_alias;
+ MemoryRegion mphi_mr_alias;
+
+ SDHCIState emmc2;
+ BCM2838GpioState gpio;
+
+ OrIRQState mmc_irq_orgate;
+ OrIRQState dma_7_8_irq_orgate;
+ OrIRQState dma_9_10_irq_orgate;
+
+ UnimplementedDeviceState asb;
+ UnimplementedDeviceState clkisp;
+};
+
+struct BCM2838PeripheralClass {
+ /*< private >*/
+ BCMSocPeripheralBaseClass parent_class;
+ /*< public >*/
+ uint64_t peri_low_size; /* Peripheral lower range size */
+};
+
+#endif /* BCM2838_PERIPHERALS_H */
diff --git a/include/hw/arm/arm.h b/include/hw/arm/boot.h
index ffed39252d..80c492d742 100644
--- a/include/hw/arm/arm.h
+++ b/include/hw/arm/boot.h
@@ -1,5 +1,5 @@
/*
- * Misc ARM declarations
+ * ARM kernel loader.
*
* Copyright (c) 2006 CodeSourcery.
* Written by Paul Brook
@@ -8,12 +8,10 @@
*
*/
-#ifndef HW_ARM_H
-#define HW_ARM_H
+#ifndef HW_ARM_BOOT_H
+#define HW_ARM_BOOT_H
-#include "exec/memory.h"
#include "target/arm/cpu-qom.h"
-#include "hw/irq.h"
#include "qemu/notify.h"
typedef enum {
@@ -27,13 +25,16 @@ typedef enum {
* armv7m_load_kernel:
* @cpu: CPU
* @kernel_filename: file to load
+ * @mem_base: base address to load image at (should be where the
+ * CPU expects to find its vector table on reset)
* @mem_size: mem_size: maximum image size to load
*
* Load the guest image for an ARMv7M system. This must be called by
* any ARMv7M board. (This is necessary to ensure that the CPU resets
* correctly on system reset, as well as for kernel loading.)
*/
-void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size);
+void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename,
+ hwaddr mem_base, int mem_size);
/* arm_boot.c */
struct arm_boot_info {
@@ -58,7 +59,6 @@ struct arm_boot_info {
hwaddr smp_loader_start;
hwaddr smp_bootreg_addr;
hwaddr gic_cpu_if_addr;
- int nb_cpus;
int board_id;
/* ARM machines that support the ARM Security Extensions use this field to
* control whether Linux is booted as secure(true) or non-secure(false).
@@ -72,6 +72,9 @@ struct arm_boot_info {
* boot loader/boot ROM code, and secondary_cpu_reset_hook() should
* perform any necessary CPU reset handling and set the PC for the
* secondary CPUs to point at this boot blob.
+ *
+ * These hooks won't be called if secondary CPUs are booting via
+ * emulated PSCI (see psci_conduit below).
*/
void (*write_secondary_boot)(ARMCPU *cpu,
const struct arm_boot_info *info);
@@ -88,6 +91,16 @@ struct arm_boot_info {
* the user it should implement this hook.
*/
void (*modify_dtb)(const struct arm_boot_info *info, void *fdt);
+ /*
+ * If a board wants to use the QEMU emulated-firmware PSCI support,
+ * it should set this to QEMU_PSCI_CONDUIT_HVC or QEMU_PSCI_CONDUIT_SMC
+ * as appropriate. arm_load_kernel() will set the psci-conduit and
+ * start-powered-off properties on the CPUs accordingly.
+ * Note that if the guest image is started at the same exception level
+ * as the conduit specifies calls should go to (eg guest firmware booted
+ * to EL3) then PSCI will not be enabled.
+ */
+ int psci_conduit;
/* Used internally by arm_boot.c */
int is_linux;
hwaddr initrd_start;
@@ -109,9 +122,12 @@ struct arm_boot_info {
void (*write_board_setup)(ARMCPU *cpu,
const struct arm_boot_info *info);
- /* If set, the board specific loader/setup blob will be run from secure
+ /*
+ * If set, the board specific loader/setup blob will be run from secure
* mode, regardless of secure_boot. The blob becomes responsible for
- * changing to non-secure state if implementing a non-secure boot
+ * changing to non-secure state if implementing a non-secure boot,
+ * including setting up EL3/Secure registers such as the NSACR as
+ * required by the Linux booting ABI before the switch to non-secure.
*/
bool secure_board_setup;
@@ -133,7 +149,7 @@ struct arm_boot_info {
* before sysbus-fdt arm_register_platform_bus_fdt_creator. Indeed the
* machine init done notifiers are called in registration reverse order.
*/
-void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info);
+void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info);
AddressSpace *arm_boot_address_space(ARMCPU *cpu,
const struct arm_boot_info *info);
@@ -160,15 +176,60 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu,
* Note: Must not be called unless have_dtb(binfo) is true.
*/
int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
- hwaddr addr_limit, AddressSpace *as);
+ hwaddr addr_limit, AddressSpace *as, MachineState *ms);
/* Write a secure board setup routine with a dummy handler for SMCs */
void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
const struct arm_boot_info *info,
hwaddr mvbar_addr);
-/* Multiplication factor to convert from system clock ticks to qemu timer
- ticks. */
-extern int system_clock_scale;
+typedef enum {
+ FIXUP_NONE = 0, /* do nothing */
+ FIXUP_TERMINATOR, /* end of insns */
+ FIXUP_BOARDID, /* overwrite with board ID number */
+ FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */
+ FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */
+ FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */
+ FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */
+ FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */
+ FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */
+ FIXUP_BOOTREG, /* overwrite with boot register address */
+ FIXUP_DSB, /* overwrite with correct DSB insn for cpu */
+ FIXUP_MAX,
+} FixupType;
+
+typedef struct ARMInsnFixup {
+ uint32_t insn;
+ FixupType fixup;
+} ARMInsnFixup;
+
+/**
+ * arm_write_bootloader - write a bootloader to guest memory
+ * @name: name of the bootloader blob
+ * @as: AddressSpace to write the bootloader
+ * @addr: guest address to write it
+ * @insns: the blob to be loaded
+ * @fixupcontext: context to be used for any fixups in @insns
+ *
+ * Write a bootloader to guest memory at address @addr in the address
+ * space @as. @name is the name to use for the resulting ROM blob, so
+ * it should be unique in the system and reasonably identifiable for debugging.
+ *
+ * @insns must be an array of ARMInsnFixup structs, each of which has
+ * one 32-bit value to be written to the guest memory, and a fixup to be
+ * applied to the value. FIXUP_NONE (do nothing) is value 0, so effectively
+ * the fixup is optional when writing a struct initializer.
+ * The final entry in the array must be { 0, FIXUP_TERMINATOR }.
+ *
+ * All other supported fixup types have the semantics "ignore insn
+ * and instead use the value from the array element @fixupcontext[fixup]".
+ * The caller should therefore provide @fixupcontext as an array of
+ * size FIXUP_MAX whose elements have been initialized for at least
+ * the entries that @insns refers to.
+ */
+void arm_write_bootloader(const char *name,
+ AddressSpace *as, hwaddr addr,
+ const ARMInsnFixup *insns,
+ const uint32_t *fixupcontext);
-#endif /* HW_ARM_H */
+#endif /* HW_ARM_BOOT_H */
diff --git a/include/hw/arm/bsa.h b/include/hw/arm/bsa.h
new file mode 100644
index 0000000000..8eaab603c0
--- /dev/null
+++ b/include/hw/arm/bsa.h
@@ -0,0 +1,35 @@
+/*
+ * Common definitions for Arm Base System Architecture (BSA) platforms.
+ *
+ * Copyright (c) 2015 Linaro Limited
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QEMU_ARM_BSA_H
+#define QEMU_ARM_BSA_H
+
+/* These are architectural INTID values */
+#define VIRTUAL_PMU_IRQ 23
+#define ARCH_GIC_MAINT_IRQ 25
+#define ARCH_TIMER_NS_EL2_IRQ 26
+#define ARCH_TIMER_VIRT_IRQ 27
+#define ARCH_TIMER_NS_EL2_VIRT_IRQ 28
+#define ARCH_TIMER_S_EL1_IRQ 29
+#define ARCH_TIMER_NS_EL1_IRQ 30
+
+#define INTID_TO_PPI(irq) ((irq) - 16)
+
+#endif /* QEMU_ARM_BSA_H */
diff --git a/include/hw/arm/digic.h b/include/hw/arm/digic.h
index 63785baaa8..8f2735c284 100644
--- a/include/hw/arm/digic.h
+++ b/include/hw/arm/digic.h
@@ -21,14 +21,15 @@
#include "cpu.h"
#include "hw/timer/digic-timer.h"
#include "hw/char/digic-uart.h"
+#include "qom/object.h"
#define TYPE_DIGIC "digic"
-#define DIGIC(obj) OBJECT_CHECK(DigicState, (obj), TYPE_DIGIC)
+OBJECT_DECLARE_SIMPLE_TYPE(DigicState, DIGIC)
#define DIGIC4_NB_TIMERS 3
-typedef struct DigicState {
+struct DigicState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
@@ -37,6 +38,6 @@ typedef struct DigicState {
DigicTimerState timer[DIGIC4_NB_TIMERS];
DigicUartState uart;
-} DigicState;
+};
#endif /* HW_ARM_DIGIC_H */
diff --git a/include/hw/arm/exynos4210.h b/include/hw/arm/exynos4210.h
index 098a69ec73..d33fe38586 100644
--- a/include/hw/arm/exynos4210.h
+++ b/include/hw/arm/exynos4210.h
@@ -19,15 +19,19 @@
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
- *
*/
#ifndef EXYNOS4210_H
#define EXYNOS4210_H
-#include "qemu-common.h"
-#include "exec/memory.h"
-#include "target/arm/cpu-qom.h"
+#include "hw/or-irq.h"
+#include "hw/sysbus.h"
+#include "hw/cpu/a9mpcore.h"
+#include "hw/intc/exynos4210_gic.h"
+#include "hw/intc/exynos4210_combiner.h"
+#include "hw/core/split-irq.h"
+#include "hw/arm/boot.h"
+#include "qom/object.h"
#define EXYNOS4210_NCPUS 2
@@ -65,29 +69,25 @@
#define EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ \
(EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ * 8)
-#define EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit) ((grp)*8 + (bit))
-#define EXYNOS4210_COMBINER_GET_GRP_NUM(irq) ((irq) / 8)
-#define EXYNOS4210_COMBINER_GET_BIT_NUM(irq) \
- ((irq) - 8 * EXYNOS4210_COMBINER_GET_GRP_NUM(irq))
-
-/* IRQs number for external and internal GIC */
-#define EXYNOS4210_EXT_GIC_NIRQ (160-32)
-#define EXYNOS4210_INT_GIC_NIRQ 64
-
#define EXYNOS4210_I2C_NUMBER 9
-typedef struct Exynos4210Irq {
- qemu_irq int_combiner_irq[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ];
- qemu_irq ext_combiner_irq[EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ];
- qemu_irq int_gic_irq[EXYNOS4210_INT_GIC_NIRQ];
- qemu_irq ext_gic_irq[EXYNOS4210_EXT_GIC_NIRQ];
- qemu_irq board_irqs[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ];
-} Exynos4210Irq;
+#define EXYNOS4210_NUM_DMA 3
+
+/*
+ * We need one splitter for every external combiner input, plus
+ * one for every non-zero entry in combiner_grp_to_gic_id[],
+ * minus one for every external combiner ID in second or later
+ * places in a combinermap[] line.
+ * We'll assert in exynos4210_init_board_irqs() if this is wrong.
+ */
+#define EXYNOS4210_NUM_SPLITTERS (EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ + 38)
-typedef struct Exynos4210State {
+struct Exynos4210State {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
ARMCPU *cpu[EXYNOS4210_NCPUS];
- Exynos4210Irq irqs;
- qemu_irq *irq_table;
+ qemu_irq irq_table[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ];
MemoryRegion chipid_mem;
MemoryRegion iram_mem;
@@ -96,20 +96,21 @@ typedef struct Exynos4210State {
MemoryRegion boot_secondary;
MemoryRegion bootreg_mem;
I2CBus *i2c_if[EXYNOS4210_I2C_NUMBER];
-} Exynos4210State;
+ OrIRQState pl330_irq_orgate[EXYNOS4210_NUM_DMA];
+ OrIRQState cpu_irq_orgate[EXYNOS4210_NCPUS];
+ A9MPPrivState a9mpcore;
+ Exynos4210GicState ext_gic;
+ Exynos4210CombinerState int_combiner;
+ Exynos4210CombinerState ext_combiner;
+ SplitIRQ splitter[EXYNOS4210_NUM_SPLITTERS];
+};
+
+#define TYPE_EXYNOS4210_SOC "exynos4210"
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210State, EXYNOS4210_SOC)
void exynos4210_write_secondary(ARMCPU *cpu,
const struct arm_boot_info *info);
-Exynos4210State *exynos4210_init(MemoryRegion *system_mem);
-
-/* Initialize exynos4210 IRQ subsystem stub */
-qemu_irq *exynos4210_init_irq(Exynos4210Irq *env);
-
-/* Initialize board IRQs.
- * These IRQs contain splitted Int/External Combiner and External Gic IRQs */
-void exynos4210_init_board_irqs(Exynos4210Irq *s);
-
/* Get IRQ number from exynos4210 IRQ subsystem stub.
* To identify IRQ source use internal combiner group and bit number
* grp - group number
@@ -117,12 +118,6 @@ void exynos4210_init_board_irqs(Exynos4210Irq *s);
uint32_t exynos4210_get_irq(uint32_t grp, uint32_t bit);
/*
- * Get Combiner input GPIO into irqs structure
- */
-void exynos4210_combiner_get_gpioin(Exynos4210Irq *irqs, DeviceState *dev,
- int ext);
-
-/*
* exynos4210 UART
*/
DeviceState *exynos4210_uart_create(hwaddr addr,
diff --git a/include/hw/arm/fsl-imx25.h b/include/hw/arm/fsl-imx25.h
index 65a73714ef..df2f83980f 100644
--- a/include/hw/arm/fsl-imx25.h
+++ b/include/hw/arm/fsl-imx25.h
@@ -17,27 +17,34 @@
#ifndef FSL_IMX25_H
#define FSL_IMX25_H
-#include "hw/arm/arm.h"
#include "hw/intc/imx_avic.h"
#include "hw/misc/imx25_ccm.h"
#include "hw/char/imx_serial.h"
#include "hw/timer/imx_gpt.h"
#include "hw/timer/imx_epit.h"
#include "hw/net/imx_fec.h"
+#include "hw/misc/imx_rngc.h"
#include "hw/i2c/imx_i2c.h"
#include "hw/gpio/imx_gpio.h"
+#include "hw/sd/sdhci.h"
+#include "hw/usb/chipidea.h"
+#include "hw/watchdog/wdt_imx2.h"
#include "exec/memory.h"
+#include "target/arm/cpu.h"
+#include "qom/object.h"
-#define TYPE_FSL_IMX25 "fsl,imx25"
-#define FSL_IMX25(obj) OBJECT_CHECK(FslIMX25State, (obj), TYPE_FSL_IMX25)
+#define TYPE_FSL_IMX25 "fsl-imx25"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX25State, FSL_IMX25)
#define FSL_IMX25_NUM_UARTS 5
#define FSL_IMX25_NUM_GPTS 4
#define FSL_IMX25_NUM_EPITS 2
#define FSL_IMX25_NUM_I2CS 3
#define FSL_IMX25_NUM_GPIOS 4
+#define FSL_IMX25_NUM_ESDHCS 2
+#define FSL_IMX25_NUM_USBS 2
-typedef struct FslIMX25State {
+struct FslIMX25State {
/*< private >*/
DeviceState parent_obj;
@@ -49,12 +56,17 @@ typedef struct FslIMX25State {
IMXGPTState gpt[FSL_IMX25_NUM_GPTS];
IMXEPITState epit[FSL_IMX25_NUM_EPITS];
IMXFECState fec;
+ IMXRNGCState rngc;
IMXI2CState i2c[FSL_IMX25_NUM_I2CS];
IMXGPIOState gpio[FSL_IMX25_NUM_GPIOS];
+ SDHCIState esdhc[FSL_IMX25_NUM_ESDHCS];
+ ChipideaState usb[FSL_IMX25_NUM_USBS];
+ IMX2WdtState wdt;
MemoryRegion rom[2];
MemoryRegion iram;
MemoryRegion iram_alias;
-} FslIMX25State;
+ uint32_t phy_num;
+};
/**
* i.MX25 memory map
@@ -166,7 +178,7 @@ typedef struct FslIMX25State {
* 0xBB00_0000 0xBB00_0FFF 4 Kbytes NAND flash main area buffer
* 0xBB00_1000 0xBB00_11FF 512 B NAND flash spare area buffer
* 0xBB00_1200 0xBB00_1DFF 3 Kbytes Reserved
- * 0xBB00_1E00 0xBB00_1FFF 512 B NAND flash control regisers
+ * 0xBB00_1E00 0xBB00_1FFF 512 B NAND flash control registers
* 0xBB01_2000 0xBFFF_FFFF 96 Mbytes (minus 8 Kbytes) Reserved
* 0xC000_0000 0xFFFF_FFFF 1024 Mbytes Reserved
*/
@@ -210,10 +222,22 @@ typedef struct FslIMX25State {
#define FSL_IMX25_GPIO4_SIZE 0x4000
#define FSL_IMX25_GPIO3_ADDR 0x53FA4000
#define FSL_IMX25_GPIO3_SIZE 0x4000
+#define FSL_IMX25_RNGC_ADDR 0x53FB0000
+#define FSL_IMX25_RNGC_SIZE 0x4000
+#define FSL_IMX25_ESDHC1_ADDR 0x53FB4000
+#define FSL_IMX25_ESDHC1_SIZE 0x4000
+#define FSL_IMX25_ESDHC2_ADDR 0x53FB8000
+#define FSL_IMX25_ESDHC2_SIZE 0x4000
#define FSL_IMX25_GPIO1_ADDR 0x53FCC000
#define FSL_IMX25_GPIO1_SIZE 0x4000
#define FSL_IMX25_GPIO2_ADDR 0x53FD0000
#define FSL_IMX25_GPIO2_SIZE 0x4000
+#define FSL_IMX25_WDT_ADDR 0x53FDC000
+#define FSL_IMX25_WDT_SIZE 0x4000
+#define FSL_IMX25_USB1_ADDR 0x53FF4000
+#define FSL_IMX25_USB1_SIZE 0x0200
+#define FSL_IMX25_USB2_ADDR 0x53FF4400
+#define FSL_IMX25_USB2_SIZE 0x0200
#define FSL_IMX25_AVIC_ADDR 0x68000000
#define FSL_IMX25_AVIC_SIZE 0x4000
#define FSL_IMX25_IRAM_ADDR 0x78000000
@@ -237,6 +261,7 @@ typedef struct FslIMX25State {
#define FSL_IMX25_EPIT1_IRQ 28
#define FSL_IMX25_EPIT2_IRQ 27
#define FSL_IMX25_FEC_IRQ 57
+#define FSL_IMX25_RNGC_IRQ 22
#define FSL_IMX25_I2C1_IRQ 3
#define FSL_IMX25_I2C2_IRQ 4
#define FSL_IMX25_I2C3_IRQ 10
@@ -244,5 +269,10 @@ typedef struct FslIMX25State {
#define FSL_IMX25_GPIO2_IRQ 51
#define FSL_IMX25_GPIO3_IRQ 16
#define FSL_IMX25_GPIO4_IRQ 23
+#define FSL_IMX25_ESDHC1_IRQ 9
+#define FSL_IMX25_ESDHC2_IRQ 8
+#define FSL_IMX25_USB1_IRQ 37
+#define FSL_IMX25_USB2_IRQ 35
+#define FSL_IMX25_WDT_IRQ 55
#endif /* FSL_IMX25_H */
diff --git a/include/hw/arm/fsl-imx31.h b/include/hw/arm/fsl-imx31.h
index d408abbba0..40c593a5cf 100644
--- a/include/hw/arm/fsl-imx31.h
+++ b/include/hw/arm/fsl-imx31.h
@@ -17,7 +17,6 @@
#ifndef FSL_IMX31_H
#define FSL_IMX31_H
-#include "hw/arm/arm.h"
#include "hw/intc/imx_avic.h"
#include "hw/misc/imx31_ccm.h"
#include "hw/char/imx_serial.h"
@@ -25,17 +24,20 @@
#include "hw/timer/imx_epit.h"
#include "hw/i2c/imx_i2c.h"
#include "hw/gpio/imx_gpio.h"
+#include "hw/watchdog/wdt_imx2.h"
#include "exec/memory.h"
+#include "target/arm/cpu.h"
+#include "qom/object.h"
-#define TYPE_FSL_IMX31 "fsl,imx31"
-#define FSL_IMX31(obj) OBJECT_CHECK(FslIMX31State, (obj), TYPE_FSL_IMX31)
+#define TYPE_FSL_IMX31 "fsl-imx31"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX31State, FSL_IMX31)
#define FSL_IMX31_NUM_UARTS 2
#define FSL_IMX31_NUM_EPITS 2
#define FSL_IMX31_NUM_I2CS 3
#define FSL_IMX31_NUM_GPIOS 3
-typedef struct FslIMX31State {
+struct FslIMX31State {
/*< private >*/
DeviceState parent_obj;
@@ -48,11 +50,12 @@ typedef struct FslIMX31State {
IMXEPITState epit[FSL_IMX31_NUM_EPITS];
IMXI2CState i2c[FSL_IMX31_NUM_I2CS];
IMXGPIOState gpio[FSL_IMX31_NUM_GPIOS];
+ IMX2WdtState wdt;
MemoryRegion secure_rom;
MemoryRegion rom;
MemoryRegion iram;
MemoryRegion iram_alias;
-} FslIMX31State;
+};
#define FSL_IMX31_SECURE_ROM_ADDR 0x00000000
#define FSL_IMX31_SECURE_ROM_SIZE 0x4000
@@ -86,6 +89,8 @@ typedef struct FslIMX31State {
#define FSL_IMX31_GPIO1_SIZE 0x4000
#define FSL_IMX31_GPIO2_ADDR 0x53FD0000
#define FSL_IMX31_GPIO2_SIZE 0x4000
+#define FSL_IMX31_WDT_ADDR 0x53FDC000
+#define FSL_IMX31_WDT_SIZE 0x4000
#define FSL_IMX31_AVIC_ADDR 0x68000000
#define FSL_IMX31_AVIC_SIZE 0x100
#define FSL_IMX31_SDRAM0_ADDR 0x80000000
diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h
index 06f8aaeda4..61c593ffd2 100644
--- a/include/hw/arm/fsl-imx6.h
+++ b/include/hw/arm/fsl-imx6.h
@@ -17,10 +17,11 @@
#ifndef FSL_IMX6_H
#define FSL_IMX6_H
-#include "hw/arm/arm.h"
#include "hw/cpu/a9mpcore.h"
#include "hw/misc/imx6_ccm.h"
#include "hw/misc/imx6_src.h"
+#include "hw/misc/imx7_snvs.h"
+#include "hw/watchdog/wdt_imx2.h"
#include "hw/char/imx_serial.h"
#include "hw/timer/imx_gpt.h"
#include "hw/timer/imx_epit.h"
@@ -29,11 +30,15 @@
#include "hw/sd/sdhci.h"
#include "hw/ssi/imx_spi.h"
#include "hw/net/imx_fec.h"
+#include "hw/usb/chipidea.h"
+#include "hw/usb/imx-usb-phy.h"
+#include "hw/pci-host/designware.h"
#include "exec/memory.h"
#include "cpu.h"
+#include "qom/object.h"
-#define TYPE_FSL_IMX6 "fsl,imx6"
-#define FSL_IMX6(obj) OBJECT_CHECK(FslIMX6State, (obj), TYPE_FSL_IMX6)
+#define TYPE_FSL_IMX6 "fsl-imx6"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX6State, FSL_IMX6)
#define FSL_IMX6_NUM_CPUS 4
#define FSL_IMX6_NUM_UARTS 5
@@ -42,29 +47,38 @@
#define FSL_IMX6_NUM_GPIOS 7
#define FSL_IMX6_NUM_ESDHCS 4
#define FSL_IMX6_NUM_ECSPIS 5
+#define FSL_IMX6_NUM_WDTS 2
+#define FSL_IMX6_NUM_USB_PHYS 2
+#define FSL_IMX6_NUM_USBS 4
-typedef struct FslIMX6State {
+struct FslIMX6State {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
- ARMCPU cpu[FSL_IMX6_NUM_CPUS];
- A9MPPrivState a9mpcore;
- IMX6CCMState ccm;
- IMX6SRCState src;
- IMXSerialState uart[FSL_IMX6_NUM_UARTS];
- IMXGPTState gpt;
- IMXEPITState epit[FSL_IMX6_NUM_EPITS];
- IMXI2CState i2c[FSL_IMX6_NUM_I2CS];
- IMXGPIOState gpio[FSL_IMX6_NUM_GPIOS];
- SDHCIState esdhc[FSL_IMX6_NUM_ESDHCS];
- IMXSPIState spi[FSL_IMX6_NUM_ECSPIS];
- IMXFECState eth;
- MemoryRegion rom;
- MemoryRegion caam;
- MemoryRegion ocram;
- MemoryRegion ocram_alias;
-} FslIMX6State;
+ ARMCPU cpu[FSL_IMX6_NUM_CPUS];
+ A9MPPrivState a9mpcore;
+ IMX6CCMState ccm;
+ IMX6SRCState src;
+ IMX7SNVSState snvs;
+ IMXSerialState uart[FSL_IMX6_NUM_UARTS];
+ IMXGPTState gpt;
+ IMXEPITState epit[FSL_IMX6_NUM_EPITS];
+ IMXI2CState i2c[FSL_IMX6_NUM_I2CS];
+ IMXGPIOState gpio[FSL_IMX6_NUM_GPIOS];
+ SDHCIState esdhc[FSL_IMX6_NUM_ESDHCS];
+ IMXSPIState spi[FSL_IMX6_NUM_ECSPIS];
+ IMX2WdtState wdt[FSL_IMX6_NUM_WDTS];
+ IMXUSBPHYState usbphy[FSL_IMX6_NUM_USB_PHYS];
+ ChipideaState usb[FSL_IMX6_NUM_USBS];
+ IMXFECState eth;
+ DesignwarePCIEHost pcie;
+ MemoryRegion rom;
+ MemoryRegion caam;
+ MemoryRegion ocram;
+ MemoryRegion ocram_alias;
+ uint32_t phy_num;
+};
#define FSL_IMX6_MMDC_ADDR 0x10000000
diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h
index 5897217194..8277b0e8b2 100644
--- a/include/hw/arm/fsl-imx6ul.h
+++ b/include/hw/arm/fsl-imx6ul.h
@@ -17,28 +17,29 @@
#ifndef FSL_IMX6UL_H
#define FSL_IMX6UL_H
-#include "hw/arm/arm.h"
#include "hw/cpu/a15mpcore.h"
#include "hw/misc/imx6ul_ccm.h"
#include "hw/misc/imx6_src.h"
#include "hw/misc/imx7_snvs.h"
-#include "hw/misc/imx7_gpr.h"
#include "hw/intc/imx_gpcv2.h"
-#include "hw/misc/imx2_wdt.h"
+#include "hw/watchdog/wdt_imx2.h"
#include "hw/gpio/imx_gpio.h"
#include "hw/char/imx_serial.h"
#include "hw/timer/imx_gpt.h"
#include "hw/timer/imx_epit.h"
#include "hw/i2c/imx_i2c.h"
-#include "hw/gpio/imx_gpio.h"
#include "hw/sd/sdhci.h"
#include "hw/ssi/imx_spi.h"
#include "hw/net/imx_fec.h"
+#include "hw/usb/chipidea.h"
+#include "hw/usb/imx-usb-phy.h"
#include "exec/memory.h"
#include "cpu.h"
+#include "qom/object.h"
+#include "qemu/units.h"
-#define TYPE_FSL_IMX6UL "fsl,imx6ul"
-#define FSL_IMX6UL(obj) OBJECT_CHECK(FslIMX6ULState, (obj), TYPE_FSL_IMX6UL)
+#define TYPE_FSL_IMX6UL "fsl-imx6ul"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX6ULState, FSL_IMX6UL)
enum FslIMX6ULConfiguration {
FSL_IMX6UL_NUM_CPUS = 1,
@@ -54,14 +55,19 @@ enum FslIMX6ULConfiguration {
FSL_IMX6UL_NUM_I2CS = 4,
FSL_IMX6UL_NUM_ECSPIS = 4,
FSL_IMX6UL_NUM_ADCS = 2,
+ FSL_IMX6UL_NUM_USB_PHYS = 2,
+ FSL_IMX6UL_NUM_USBS = 2,
+ FSL_IMX6UL_NUM_SAIS = 3,
+ FSL_IMX6UL_NUM_CANS = 2,
+ FSL_IMX6UL_NUM_PWMS = 8,
};
-typedef struct FslIMX6ULState {
+struct FslIMX6ULState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
- ARMCPU cpu[FSL_IMX6UL_NUM_CPUS];
+ ARMCPU cpu;
A15MPPrivState a7mpcore;
IMXGPTState gpt[FSL_IMX6UL_NUM_GPTS];
IMXEPITState epit[FSL_IMX6UL_NUM_EPITS];
@@ -70,130 +76,248 @@ typedef struct FslIMX6ULState {
IMX6SRCState src;
IMX7SNVSState snvs;
IMXGPCv2State gpcv2;
- IMX7GPRState gpr;
IMXSPIState spi[FSL_IMX6UL_NUM_ECSPIS];
IMXI2CState i2c[FSL_IMX6UL_NUM_I2CS];
IMXSerialState uart[FSL_IMX6UL_NUM_UARTS];
IMXFECState eth[FSL_IMX6UL_NUM_ETHS];
SDHCIState usdhc[FSL_IMX6UL_NUM_USDHCS];
IMX2WdtState wdt[FSL_IMX6UL_NUM_WDTS];
+ IMXUSBPHYState usbphy[FSL_IMX6UL_NUM_USB_PHYS];
+ ChipideaState usb[FSL_IMX6UL_NUM_USBS];
MemoryRegion rom;
MemoryRegion caam;
MemoryRegion ocram;
MemoryRegion ocram_alias;
-} FslIMX6ULState;
+
+ uint32_t phy_num[FSL_IMX6UL_NUM_ETHS];
+ bool phy_connected[FSL_IMX6UL_NUM_ETHS];
+};
enum FslIMX6ULMemoryMap {
FSL_IMX6UL_MMDC_ADDR = 0x80000000,
- FSL_IMX6UL_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL,
+ FSL_IMX6UL_MMDC_SIZE = (2 * GiB),
FSL_IMX6UL_QSPI1_MEM_ADDR = 0x60000000,
+ FSL_IMX6UL_QSPI1_MEM_SIZE = (256 * MiB),
+
FSL_IMX6UL_EIM_ALIAS_ADDR = 0x58000000,
+ FSL_IMX6UL_EIM_ALIAS_SIZE = (128 * MiB),
+
FSL_IMX6UL_EIM_CS_ADDR = 0x50000000,
+ FSL_IMX6UL_EIM_CS_SIZE = (128 * MiB),
+
FSL_IMX6UL_AES_ENCRYPT_ADDR = 0x10000000,
+ FSL_IMX6UL_AES_ENCRYPT_SIZE = (1 * MiB),
+
FSL_IMX6UL_QSPI1_RX_ADDR = 0x0C000000,
+ FSL_IMX6UL_QSPI1_RX_SIZE = (32 * MiB),
- /* AIPS-2 */
+ /* AIPS-2 Begin */
FSL_IMX6UL_UART6_ADDR = 0x021FC000,
+
FSL_IMX6UL_I2C4_ADDR = 0x021F8000,
+
FSL_IMX6UL_UART5_ADDR = 0x021F4000,
FSL_IMX6UL_UART4_ADDR = 0x021F0000,
FSL_IMX6UL_UART3_ADDR = 0x021EC000,
FSL_IMX6UL_UART2_ADDR = 0x021E8000,
+
FSL_IMX6UL_WDOG3_ADDR = 0x021E4000,
+
FSL_IMX6UL_QSPI_ADDR = 0x021E0000,
+ FSL_IMX6UL_QSPI_SIZE = 0x500,
+
FSL_IMX6UL_SYS_CNT_CTRL_ADDR = 0x021DC000,
+ FSL_IMX6UL_SYS_CNT_CTRL_SIZE = (16 * KiB),
+
FSL_IMX6UL_SYS_CNT_CMP_ADDR = 0x021D8000,
+ FSL_IMX6UL_SYS_CNT_CMP_SIZE = (16 * KiB),
+
FSL_IMX6UL_SYS_CNT_RD_ADDR = 0x021D4000,
+ FSL_IMX6UL_SYS_CNT_RD_SIZE = (16 * KiB),
+
FSL_IMX6UL_TZASC_ADDR = 0x021D0000,
+ FSL_IMX6UL_TZASC_SIZE = (16 * KiB),
+
FSL_IMX6UL_PXP_ADDR = 0x021CC000,
+ FSL_IMX6UL_PXP_SIZE = (16 * KiB),
+
FSL_IMX6UL_LCDIF_ADDR = 0x021C8000,
+ FSL_IMX6UL_LCDIF_SIZE = 0x100,
+
FSL_IMX6UL_CSI_ADDR = 0x021C4000,
+ FSL_IMX6UL_CSI_SIZE = 0x100,
+
FSL_IMX6UL_CSU_ADDR = 0x021C0000,
+ FSL_IMX6UL_CSU_SIZE = (16 * KiB),
+
FSL_IMX6UL_OCOTP_CTRL_ADDR = 0x021BC000,
+ FSL_IMX6UL_OCOTP_CTRL_SIZE = (4 * KiB),
+
FSL_IMX6UL_EIM_ADDR = 0x021B8000,
+ FSL_IMX6UL_EIM_SIZE = 0x100,
+
FSL_IMX6UL_SIM2_ADDR = 0x021B4000,
+
FSL_IMX6UL_MMDC_CFG_ADDR = 0x021B0000,
+ FSL_IMX6UL_MMDC_CFG_SIZE = (4 * KiB),
+
FSL_IMX6UL_ROMCP_ADDR = 0x021AC000,
+ FSL_IMX6UL_ROMCP_SIZE = 0x300,
+
FSL_IMX6UL_I2C3_ADDR = 0x021A8000,
FSL_IMX6UL_I2C2_ADDR = 0x021A4000,
FSL_IMX6UL_I2C1_ADDR = 0x021A0000,
+
FSL_IMX6UL_ADC2_ADDR = 0x0219C000,
FSL_IMX6UL_ADC1_ADDR = 0x02198000,
+ FSL_IMX6UL_ADCn_SIZE = 0x100,
+
FSL_IMX6UL_USDHC2_ADDR = 0x02194000,
FSL_IMX6UL_USDHC1_ADDR = 0x02190000,
+
FSL_IMX6UL_SIM1_ADDR = 0x0218C000,
+ FSL_IMX6UL_SIMn_SIZE = (16 * KiB),
+
FSL_IMX6UL_ENET1_ADDR = 0x02188000,
+
FSL_IMX6UL_USBO2_USBMISC_ADDR = 0x02184800,
- FSL_IMX6UL_USBO2_USB_ADDR = 0x02184000,
+ FSL_IMX6UL_USBO2_USBMISC_SIZE = 0x200,
+
+ FSL_IMX6UL_USBO2_USB1_ADDR = 0x02184000,
+ FSL_IMX6UL_USBO2_USB2_ADDR = 0x02184200,
+
FSL_IMX6UL_USBO2_PL301_ADDR = 0x02180000,
+ FSL_IMX6UL_USBO2_PL301_SIZE = (16 * KiB),
+
FSL_IMX6UL_AIPS2_CFG_ADDR = 0x0217C000,
+ FSL_IMX6UL_AIPS2_CFG_SIZE = 0x100,
+
FSL_IMX6UL_CAAM_ADDR = 0x02140000,
+ FSL_IMX6UL_CAAM_SIZE = (16 * KiB),
+
FSL_IMX6UL_A7MPCORE_DAP_ADDR = 0x02100000,
+ FSL_IMX6UL_A7MPCORE_DAP_SIZE = (4 * KiB),
+ /* AIPS-2 End */
- /* AIPS-1 */
+ /* AIPS-1 Begin */
FSL_IMX6UL_PWM8_ADDR = 0x020FC000,
FSL_IMX6UL_PWM7_ADDR = 0x020F8000,
FSL_IMX6UL_PWM6_ADDR = 0x020F4000,
FSL_IMX6UL_PWM5_ADDR = 0x020F0000,
+
FSL_IMX6UL_SDMA_ADDR = 0x020EC000,
+ FSL_IMX6UL_SDMA_SIZE = 0x300,
+
FSL_IMX6UL_GPT2_ADDR = 0x020E8000,
+
FSL_IMX6UL_IOMUXC_GPR_ADDR = 0x020E4000,
+ FSL_IMX6UL_IOMUXC_GPR_SIZE = 0x40,
+
FSL_IMX6UL_IOMUXC_ADDR = 0x020E0000,
+ FSL_IMX6UL_IOMUXC_SIZE = 0x700,
+
FSL_IMX6UL_GPC_ADDR = 0x020DC000,
+
FSL_IMX6UL_SRC_ADDR = 0x020D8000,
+
FSL_IMX6UL_EPIT2_ADDR = 0x020D4000,
FSL_IMX6UL_EPIT1_ADDR = 0x020D0000,
+
FSL_IMX6UL_SNVS_HP_ADDR = 0x020CC000,
+
+ FSL_IMX6UL_USBPHY2_ADDR = 0x020CA000,
+ FSL_IMX6UL_USBPHY1_ADDR = 0x020C9000,
+
FSL_IMX6UL_ANALOG_ADDR = 0x020C8000,
+ FSL_IMX6UL_ANALOG_SIZE = 0x300,
+
FSL_IMX6UL_CCM_ADDR = 0x020C4000,
+
FSL_IMX6UL_WDOG2_ADDR = 0x020C0000,
FSL_IMX6UL_WDOG1_ADDR = 0x020BC000,
+
FSL_IMX6UL_KPP_ADDR = 0x020B8000,
+ FSL_IMX6UL_KPP_SIZE = 0x10,
+
FSL_IMX6UL_ENET2_ADDR = 0x020B4000,
+
FSL_IMX6UL_SNVS_LP_ADDR = 0x020B0000,
+ FSL_IMX6UL_SNVS_LP_SIZE = (16 * KiB),
+
FSL_IMX6UL_GPIO5_ADDR = 0x020AC000,
FSL_IMX6UL_GPIO4_ADDR = 0x020A8000,
FSL_IMX6UL_GPIO3_ADDR = 0x020A4000,
FSL_IMX6UL_GPIO2_ADDR = 0x020A0000,
FSL_IMX6UL_GPIO1_ADDR = 0x0209C000,
+
FSL_IMX6UL_GPT1_ADDR = 0x02098000,
+
FSL_IMX6UL_CAN2_ADDR = 0x02094000,
FSL_IMX6UL_CAN1_ADDR = 0x02090000,
+ FSL_IMX6UL_CANn_SIZE = (4 * KiB),
+
FSL_IMX6UL_PWM4_ADDR = 0x0208C000,
FSL_IMX6UL_PWM3_ADDR = 0x02088000,
FSL_IMX6UL_PWM2_ADDR = 0x02084000,
FSL_IMX6UL_PWM1_ADDR = 0x02080000,
+ FSL_IMX6UL_PWMn_SIZE = 0x20,
+
FSL_IMX6UL_AIPS1_CFG_ADDR = 0x0207C000,
+ FSL_IMX6UL_AIPS1_CFG_SIZE = (16 * KiB),
+
FSL_IMX6UL_BEE_ADDR = 0x02044000,
+ FSL_IMX6UL_BEE_SIZE = (16 * KiB),
+
FSL_IMX6UL_TOUCH_CTRL_ADDR = 0x02040000,
+ FSL_IMX6UL_TOUCH_CTRL_SIZE = 0x100,
+
FSL_IMX6UL_SPBA_ADDR = 0x0203C000,
+ FSL_IMX6UL_SPBA_SIZE = 0x100,
+
FSL_IMX6UL_ASRC_ADDR = 0x02034000,
+ FSL_IMX6UL_ASRC_SIZE = 0x100,
+
FSL_IMX6UL_SAI3_ADDR = 0x02030000,
FSL_IMX6UL_SAI2_ADDR = 0x0202C000,
FSL_IMX6UL_SAI1_ADDR = 0x02028000,
+ FSL_IMX6UL_SAIn_SIZE = 0x200,
+
FSL_IMX6UL_UART8_ADDR = 0x02024000,
FSL_IMX6UL_UART1_ADDR = 0x02020000,
FSL_IMX6UL_UART7_ADDR = 0x02018000,
+
FSL_IMX6UL_ECSPI4_ADDR = 0x02014000,
FSL_IMX6UL_ECSPI3_ADDR = 0x02010000,
FSL_IMX6UL_ECSPI2_ADDR = 0x0200C000,
FSL_IMX6UL_ECSPI1_ADDR = 0x02008000,
+
FSL_IMX6UL_SPDIF_ADDR = 0x02004000,
+ FSL_IMX6UL_SPDIF_SIZE = 0x100,
+ /* AIPS-1 End */
+
+ FSL_IMX6UL_BCH_ADDR = 0x01808000,
+ FSL_IMX6UL_BCH_SIZE = 0x200,
+
+ FSL_IMX6UL_GPMI_ADDR = 0x01806000,
+ FSL_IMX6UL_GPMI_SIZE = 0x200,
FSL_IMX6UL_APBH_DMA_ADDR = 0x01804000,
- FSL_IMX6UL_APBH_DMA_SIZE = (32 * 1024),
+ FSL_IMX6UL_APBH_DMA_SIZE = (4 * KiB),
FSL_IMX6UL_A7MPCORE_ADDR = 0x00A00000,
FSL_IMX6UL_OCRAM_ALIAS_ADDR = 0x00920000,
- FSL_IMX6UL_OCRAM_ALIAS_SIZE = 0x00060000,
+ FSL_IMX6UL_OCRAM_ALIAS_SIZE = (384 * KiB),
+
FSL_IMX6UL_OCRAM_MEM_ADDR = 0x00900000,
- FSL_IMX6UL_OCRAM_MEM_SIZE = 0x00020000,
+ FSL_IMX6UL_OCRAM_MEM_SIZE = (128 * KiB),
+
FSL_IMX6UL_CAAM_MEM_ADDR = 0x00100000,
- FSL_IMX6UL_CAAM_MEM_SIZE = 0x00008000,
+ FSL_IMX6UL_CAAM_MEM_SIZE = (32 * KiB),
+
FSL_IMX6UL_ROM_ADDR = 0x00000000,
- FSL_IMX6UL_ROM_SIZE = 0x00018000,
+ FSL_IMX6UL_ROM_SIZE = (96 * KiB),
};
enum FslIMX6ULIRQs {
@@ -241,10 +365,10 @@ enum FslIMX6ULIRQs {
FSL_IMX6UL_UART7_IRQ = 39,
FSL_IMX6UL_UART8_IRQ = 40,
- FSL_IMX6UL_USB1_IRQ = 42,
- FSL_IMX6UL_USB2_IRQ = 43,
+ FSL_IMX6UL_USB1_IRQ = 43,
+ FSL_IMX6UL_USB2_IRQ = 42,
FSL_IMX6UL_USB_PHY1_IRQ = 44,
- FSL_IMX6UL_USB_PHY2_IRQ = 44,
+ FSL_IMX6UL_USB_PHY2_IRQ = 45,
FSL_IMX6UL_CAAM_JQ2_IRQ = 46,
FSL_IMX6UL_CAAM_ERR_IRQ = 47,
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
index d848262bfd..411fa1c2e3 100644
--- a/include/hw/arm/fsl-imx7.h
+++ b/include/hw/arm/fsl-imx7.h
@@ -19,30 +19,29 @@
#ifndef FSL_IMX7_H
#define FSL_IMX7_H
-#include "hw/arm/arm.h"
#include "hw/cpu/a15mpcore.h"
#include "hw/intc/imx_gpcv2.h"
#include "hw/misc/imx7_ccm.h"
#include "hw/misc/imx7_snvs.h"
#include "hw/misc/imx7_gpr.h"
-#include "hw/misc/imx6_src.h"
-#include "hw/misc/imx2_wdt.h"
+#include "hw/misc/imx7_src.h"
+#include "hw/watchdog/wdt_imx2.h"
#include "hw/gpio/imx_gpio.h"
#include "hw/char/imx_serial.h"
#include "hw/timer/imx_gpt.h"
#include "hw/timer/imx_epit.h"
#include "hw/i2c/imx_i2c.h"
-#include "hw/gpio/imx_gpio.h"
#include "hw/sd/sdhci.h"
#include "hw/ssi/imx_spi.h"
#include "hw/net/imx_fec.h"
#include "hw/pci-host/designware.h"
#include "hw/usb/chipidea.h"
-#include "exec/memory.h"
#include "cpu.h"
+#include "qom/object.h"
+#include "qemu/units.h"
-#define TYPE_FSL_IMX7 "fsl,imx7"
-#define FSL_IMX7(obj) OBJECT_CHECK(FslIMX7State, (obj), TYPE_FSL_IMX7)
+#define TYPE_FSL_IMX7 "fsl-imx7"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX7State, FSL_IMX7)
enum FslIMX7Configuration {
FSL_IMX7_NUM_CPUS = 2,
@@ -58,9 +57,12 @@ enum FslIMX7Configuration {
FSL_IMX7_NUM_ECSPIS = 4,
FSL_IMX7_NUM_USBS = 3,
FSL_IMX7_NUM_ADCS = 2,
+ FSL_IMX7_NUM_SAIS = 3,
+ FSL_IMX7_NUM_CANS = 2,
+ FSL_IMX7_NUM_PWMS = 4,
};
-typedef struct FslIMX7State {
+struct FslIMX7State {
/*< private >*/
DeviceState parent_obj;
@@ -72,6 +74,7 @@ typedef struct FslIMX7State {
IMX7CCMState ccm;
IMX7AnalogState analog;
IMX7SNVSState snvs;
+ IMX7SRCState src;
IMXGPCv2State gpcv2;
IMXSPIState spi[FSL_IMX7_NUM_ECSPIS];
IMXI2CState i2c[FSL_IMX7_NUM_I2CS];
@@ -82,103 +85,293 @@ typedef struct FslIMX7State {
IMX7GPRState gpr;
ChipideaState usb[FSL_IMX7_NUM_USBS];
DesignwarePCIEHost pcie;
-} FslIMX7State;
+ MemoryRegion rom;
+ MemoryRegion caam;
+ MemoryRegion ocram;
+ MemoryRegion ocram_epdc;
+ MemoryRegion ocram_pxp;
+ MemoryRegion ocram_s;
+
+ uint32_t phy_num[FSL_IMX7_NUM_ETHS];
+ bool phy_connected[FSL_IMX7_NUM_ETHS];
+};
enum FslIMX7MemoryMap {
FSL_IMX7_MMDC_ADDR = 0x80000000,
- FSL_IMX7_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL,
+ FSL_IMX7_MMDC_SIZE = (2 * GiB),
- FSL_IMX7_GPIO1_ADDR = 0x30200000,
- FSL_IMX7_GPIO2_ADDR = 0x30210000,
- FSL_IMX7_GPIO3_ADDR = 0x30220000,
- FSL_IMX7_GPIO4_ADDR = 0x30230000,
- FSL_IMX7_GPIO5_ADDR = 0x30240000,
- FSL_IMX7_GPIO6_ADDR = 0x30250000,
- FSL_IMX7_GPIO7_ADDR = 0x30260000,
+ FSL_IMX7_QSPI1_MEM_ADDR = 0x60000000,
+ FSL_IMX7_QSPI1_MEM_SIZE = (256 * MiB),
- FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000,
+ FSL_IMX7_PCIE1_MEM_ADDR = 0x40000000,
+ FSL_IMX7_PCIE1_MEM_SIZE = (256 * MiB),
- FSL_IMX7_WDOG1_ADDR = 0x30280000,
- FSL_IMX7_WDOG2_ADDR = 0x30290000,
- FSL_IMX7_WDOG3_ADDR = 0x302A0000,
- FSL_IMX7_WDOG4_ADDR = 0x302B0000,
+ FSL_IMX7_QSPI1_RX_BUF_ADDR = 0x34000000,
+ FSL_IMX7_QSPI1_RX_BUF_SIZE = (32 * MiB),
- FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000,
+ /* PCIe Peripherals */
+ FSL_IMX7_PCIE_REG_ADDR = 0x33800000,
- FSL_IMX7_GPT1_ADDR = 0x302D0000,
- FSL_IMX7_GPT2_ADDR = 0x302E0000,
- FSL_IMX7_GPT3_ADDR = 0x302F0000,
- FSL_IMX7_GPT4_ADDR = 0x30300000,
+ /* MMAP Peripherals */
+ FSL_IMX7_DMA_APBH_ADDR = 0x33000000,
+ FSL_IMX7_DMA_APBH_SIZE = 0x8000,
+
+ /* GPV configuration */
+ FSL_IMX7_GPV6_ADDR = 0x32600000,
+ FSL_IMX7_GPV5_ADDR = 0x32500000,
+ FSL_IMX7_GPV4_ADDR = 0x32400000,
+ FSL_IMX7_GPV3_ADDR = 0x32300000,
+ FSL_IMX7_GPV2_ADDR = 0x32200000,
+ FSL_IMX7_GPV1_ADDR = 0x32100000,
+ FSL_IMX7_GPV0_ADDR = 0x32000000,
+ FSL_IMX7_GPVn_SIZE = (1 * MiB),
+
+ /* Arm Peripherals */
+ FSL_IMX7_A7MPCORE_ADDR = 0x31000000,
- FSL_IMX7_IOMUXC_ADDR = 0x30330000,
- FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000,
- FSL_IMX7_IOMUXCn_SIZE = 0x1000,
+ /* AIPS-3 Begin */
- FSL_IMX7_ANALOG_ADDR = 0x30360000,
- FSL_IMX7_SNVS_ADDR = 0x30370000,
- FSL_IMX7_CCM_ADDR = 0x30380000,
+ FSL_IMX7_ENET2_ADDR = 0x30BF0000,
+ FSL_IMX7_ENET1_ADDR = 0x30BE0000,
- FSL_IMX7_SRC_ADDR = 0x30390000,
- FSL_IMX7_SRC_SIZE = 0x1000,
+ FSL_IMX7_SDMA_ADDR = 0x30BD0000,
+ FSL_IMX7_SDMA_SIZE = (4 * KiB),
- FSL_IMX7_ADC1_ADDR = 0x30610000,
- FSL_IMX7_ADC2_ADDR = 0x30620000,
- FSL_IMX7_ADCn_SIZE = 0x1000,
+ FSL_IMX7_EIM_ADDR = 0x30BC0000,
+ FSL_IMX7_EIM_SIZE = (4 * KiB),
- FSL_IMX7_GPC_ADDR = 0x303A0000,
+ FSL_IMX7_QSPI_ADDR = 0x30BB0000,
+ FSL_IMX7_QSPI_SIZE = 0x8000,
+
+ FSL_IMX7_SIM2_ADDR = 0x30BA0000,
+ FSL_IMX7_SIM1_ADDR = 0x30B90000,
+ FSL_IMX7_SIMn_SIZE = (4 * KiB),
+
+ FSL_IMX7_USDHC3_ADDR = 0x30B60000,
+ FSL_IMX7_USDHC2_ADDR = 0x30B50000,
+ FSL_IMX7_USDHC1_ADDR = 0x30B40000,
+
+ FSL_IMX7_USB3_ADDR = 0x30B30000,
+ FSL_IMX7_USBMISC3_ADDR = 0x30B30200,
+ FSL_IMX7_USB2_ADDR = 0x30B20000,
+ FSL_IMX7_USBMISC2_ADDR = 0x30B20200,
+ FSL_IMX7_USB1_ADDR = 0x30B10000,
+ FSL_IMX7_USBMISC1_ADDR = 0x30B10200,
+ FSL_IMX7_USBMISCn_SIZE = 0x200,
+
+ FSL_IMX7_USB_PL301_ADDR = 0x30AD0000,
+ FSL_IMX7_USB_PL301_SIZE = (64 * KiB),
+
+ FSL_IMX7_SEMAPHORE_HS_ADDR = 0x30AC0000,
+ FSL_IMX7_SEMAPHORE_HS_SIZE = (64 * KiB),
+
+ FSL_IMX7_MUB_ADDR = 0x30AB0000,
+ FSL_IMX7_MUA_ADDR = 0x30AA0000,
+ FSL_IMX7_MUn_SIZE = (KiB),
+
+ FSL_IMX7_UART7_ADDR = 0x30A90000,
+ FSL_IMX7_UART6_ADDR = 0x30A80000,
+ FSL_IMX7_UART5_ADDR = 0x30A70000,
+ FSL_IMX7_UART4_ADDR = 0x30A60000,
- FSL_IMX7_I2C1_ADDR = 0x30A20000,
- FSL_IMX7_I2C2_ADDR = 0x30A30000,
- FSL_IMX7_I2C3_ADDR = 0x30A40000,
FSL_IMX7_I2C4_ADDR = 0x30A50000,
+ FSL_IMX7_I2C3_ADDR = 0x30A40000,
+ FSL_IMX7_I2C2_ADDR = 0x30A30000,
+ FSL_IMX7_I2C1_ADDR = 0x30A20000,
- FSL_IMX7_ECSPI1_ADDR = 0x30820000,
- FSL_IMX7_ECSPI2_ADDR = 0x30830000,
- FSL_IMX7_ECSPI3_ADDR = 0x30840000,
- FSL_IMX7_ECSPI4_ADDR = 0x30630000,
+ FSL_IMX7_CAN2_ADDR = 0x30A10000,
+ FSL_IMX7_CAN1_ADDR = 0x30A00000,
+ FSL_IMX7_CANn_SIZE = (4 * KiB),
- FSL_IMX7_LCDIF_ADDR = 0x30730000,
- FSL_IMX7_LCDIF_SIZE = 0x1000,
+ FSL_IMX7_AIPS3_CONF_ADDR = 0x309F0000,
+ FSL_IMX7_AIPS3_CONF_SIZE = (64 * KiB),
- FSL_IMX7_UART1_ADDR = 0x30860000,
+ FSL_IMX7_CAAM_ADDR = 0x30900000,
+ FSL_IMX7_CAAM_SIZE = (256 * KiB),
+
+ FSL_IMX7_SPBA_ADDR = 0x308F0000,
+ FSL_IMX7_SPBA_SIZE = (4 * KiB),
+
+ FSL_IMX7_SAI3_ADDR = 0x308C0000,
+ FSL_IMX7_SAI2_ADDR = 0x308B0000,
+ FSL_IMX7_SAI1_ADDR = 0x308A0000,
+ FSL_IMX7_SAIn_SIZE = (4 * KiB),
+
+ FSL_IMX7_UART3_ADDR = 0x30880000,
/*
* Some versions of the reference manual claim that UART2 is @
* 0x30870000, but experiments with HW + DT files in upstream
* Linux kernel show that not to be true and that block is
- * acutally located @ 0x30890000
+ * actually located @ 0x30890000
*/
FSL_IMX7_UART2_ADDR = 0x30890000,
- FSL_IMX7_UART3_ADDR = 0x30880000,
- FSL_IMX7_UART4_ADDR = 0x30A60000,
- FSL_IMX7_UART5_ADDR = 0x30A70000,
- FSL_IMX7_UART6_ADDR = 0x30A80000,
- FSL_IMX7_UART7_ADDR = 0x30A90000,
+ FSL_IMX7_UART1_ADDR = 0x30860000,
- FSL_IMX7_ENET1_ADDR = 0x30BE0000,
- FSL_IMX7_ENET2_ADDR = 0x30BF0000,
+ FSL_IMX7_ECSPI3_ADDR = 0x30840000,
+ FSL_IMX7_ECSPI2_ADDR = 0x30830000,
+ FSL_IMX7_ECSPI1_ADDR = 0x30820000,
+ FSL_IMX7_ECSPIn_SIZE = (4 * KiB),
- FSL_IMX7_USB1_ADDR = 0x30B10000,
- FSL_IMX7_USBMISC1_ADDR = 0x30B10200,
- FSL_IMX7_USB2_ADDR = 0x30B20000,
- FSL_IMX7_USBMISC2_ADDR = 0x30B20200,
- FSL_IMX7_USB3_ADDR = 0x30B30000,
- FSL_IMX7_USBMISC3_ADDR = 0x30B30200,
- FSL_IMX7_USBMISCn_SIZE = 0x200,
+ /* AIPS-3 End */
- FSL_IMX7_USDHC1_ADDR = 0x30B40000,
- FSL_IMX7_USDHC2_ADDR = 0x30B50000,
- FSL_IMX7_USDHC3_ADDR = 0x30B60000,
+ /* AIPS-2 Begin */
- FSL_IMX7_SDMA_ADDR = 0x30BD0000,
- FSL_IMX7_SDMA_SIZE = 0x1000,
+ FSL_IMX7_AXI_DEBUG_MON_ADDR = 0x307E0000,
+ FSL_IMX7_AXI_DEBUG_MON_SIZE = (64 * KiB),
+
+ FSL_IMX7_PERFMON2_ADDR = 0x307D0000,
+ FSL_IMX7_PERFMON1_ADDR = 0x307C0000,
+ FSL_IMX7_PERFMONn_SIZE = (64 * KiB),
+
+ FSL_IMX7_DDRC_ADDR = 0x307A0000,
+ FSL_IMX7_DDRC_SIZE = (4 * KiB),
+
+ FSL_IMX7_DDRC_PHY_ADDR = 0x30790000,
+ FSL_IMX7_DDRC_PHY_SIZE = (4 * KiB),
+
+ FSL_IMX7_TZASC_ADDR = 0x30780000,
+ FSL_IMX7_TZASC_SIZE = (64 * KiB),
+
+ FSL_IMX7_MIPI_DSI_ADDR = 0x30760000,
+ FSL_IMX7_MIPI_DSI_SIZE = (4 * KiB),
+
+ FSL_IMX7_MIPI_CSI_ADDR = 0x30750000,
+ FSL_IMX7_MIPI_CSI_SIZE = 0x4000,
+
+ FSL_IMX7_LCDIF_ADDR = 0x30730000,
+ FSL_IMX7_LCDIF_SIZE = 0x8000,
+
+ FSL_IMX7_CSI_ADDR = 0x30710000,
+ FSL_IMX7_CSI_SIZE = (4 * KiB),
+
+ FSL_IMX7_PXP_ADDR = 0x30700000,
+ FSL_IMX7_PXP_SIZE = 0x4000,
+
+ FSL_IMX7_EPDC_ADDR = 0x306F0000,
+ FSL_IMX7_EPDC_SIZE = (4 * KiB),
+
+ FSL_IMX7_PCIE_PHY_ADDR = 0x306D0000,
+ FSL_IMX7_PCIE_PHY_SIZE = (4 * KiB),
+
+ FSL_IMX7_SYSCNT_CTRL_ADDR = 0x306C0000,
+ FSL_IMX7_SYSCNT_CMP_ADDR = 0x306B0000,
+ FSL_IMX7_SYSCNT_RD_ADDR = 0x306A0000,
+
+ FSL_IMX7_PWM4_ADDR = 0x30690000,
+ FSL_IMX7_PWM3_ADDR = 0x30680000,
+ FSL_IMX7_PWM2_ADDR = 0x30670000,
+ FSL_IMX7_PWM1_ADDR = 0x30660000,
+ FSL_IMX7_PWMn_SIZE = (4 * KiB),
+
+ FSL_IMX7_FlEXTIMER2_ADDR = 0x30650000,
+ FSL_IMX7_FlEXTIMER1_ADDR = 0x30640000,
+ FSL_IMX7_FLEXTIMERn_SIZE = (4 * KiB),
+
+ FSL_IMX7_ECSPI4_ADDR = 0x30630000,
+
+ FSL_IMX7_ADC2_ADDR = 0x30620000,
+ FSL_IMX7_ADC1_ADDR = 0x30610000,
+ FSL_IMX7_ADCn_SIZE = (4 * KiB),
+
+ FSL_IMX7_AIPS2_CONF_ADDR = 0x305F0000,
+ FSL_IMX7_AIPS2_CONF_SIZE = (64 * KiB),
+
+ /* AIPS-2 End */
+
+ /* AIPS-1 Begin */
+
+ FSL_IMX7_CSU_ADDR = 0x303E0000,
+ FSL_IMX7_CSU_SIZE = (64 * KiB),
+
+ FSL_IMX7_RDC_ADDR = 0x303D0000,
+ FSL_IMX7_RDC_SIZE = (4 * KiB),
+
+ FSL_IMX7_SEMAPHORE2_ADDR = 0x303C0000,
+ FSL_IMX7_SEMAPHORE1_ADDR = 0x303B0000,
+ FSL_IMX7_SEMAPHOREn_SIZE = (4 * KiB),
+
+ FSL_IMX7_GPC_ADDR = 0x303A0000,
+
+ FSL_IMX7_SRC_ADDR = 0x30390000,
+
+ FSL_IMX7_CCM_ADDR = 0x30380000,
+
+ FSL_IMX7_SNVS_HP_ADDR = 0x30370000,
+
+ FSL_IMX7_ANALOG_ADDR = 0x30360000,
+
+ FSL_IMX7_OCOTP_ADDR = 0x30350000,
+ FSL_IMX7_OCOTP_SIZE = 0x10000,
+
+ FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000,
+ FSL_IMX7_IOMUXC_GPR_SIZE = (4 * KiB),
+
+ FSL_IMX7_IOMUXC_ADDR = 0x30330000,
+ FSL_IMX7_IOMUXC_SIZE = (4 * KiB),
+
+ FSL_IMX7_KPP_ADDR = 0x30320000,
+ FSL_IMX7_KPP_SIZE = (4 * KiB),
+
+ FSL_IMX7_ROMCP_ADDR = 0x30310000,
+ FSL_IMX7_ROMCP_SIZE = (4 * KiB),
+
+ FSL_IMX7_GPT4_ADDR = 0x30300000,
+ FSL_IMX7_GPT3_ADDR = 0x302F0000,
+ FSL_IMX7_GPT2_ADDR = 0x302E0000,
+ FSL_IMX7_GPT1_ADDR = 0x302D0000,
+
+ FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000,
+ FSL_IMX7_IOMUXC_LPSR_SIZE = (4 * KiB),
+
+ FSL_IMX7_WDOG4_ADDR = 0x302B0000,
+ FSL_IMX7_WDOG3_ADDR = 0x302A0000,
+ FSL_IMX7_WDOG2_ADDR = 0x30290000,
+ FSL_IMX7_WDOG1_ADDR = 0x30280000,
+
+ FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000,
+
+ FSL_IMX7_GPIO7_ADDR = 0x30260000,
+ FSL_IMX7_GPIO6_ADDR = 0x30250000,
+ FSL_IMX7_GPIO5_ADDR = 0x30240000,
+ FSL_IMX7_GPIO4_ADDR = 0x30230000,
+ FSL_IMX7_GPIO3_ADDR = 0x30220000,
+ FSL_IMX7_GPIO2_ADDR = 0x30210000,
+ FSL_IMX7_GPIO1_ADDR = 0x30200000,
+
+ FSL_IMX7_AIPS1_CONF_ADDR = 0x301F0000,
+ FSL_IMX7_AIPS1_CONF_SIZE = (64 * KiB),
- FSL_IMX7_A7MPCORE_ADDR = 0x31000000,
FSL_IMX7_A7MPCORE_DAP_ADDR = 0x30000000,
+ FSL_IMX7_A7MPCORE_DAP_SIZE = (1 * MiB),
- FSL_IMX7_PCIE_REG_ADDR = 0x33800000,
- FSL_IMX7_PCIE_REG_SIZE = 16 * 1024,
+ /* AIPS-1 End */
+
+ FSL_IMX7_EIM_CS0_ADDR = 0x28000000,
+ FSL_IMX7_EIM_CS0_SIZE = (128 * MiB),
+
+ FSL_IMX7_OCRAM_PXP_ADDR = 0x00940000,
+ FSL_IMX7_OCRAM_PXP_SIZE = (32 * KiB),
+
+ FSL_IMX7_OCRAM_EPDC_ADDR = 0x00920000,
+ FSL_IMX7_OCRAM_EPDC_SIZE = (128 * KiB),
+
+ FSL_IMX7_OCRAM_MEM_ADDR = 0x00900000,
+ FSL_IMX7_OCRAM_MEM_SIZE = (128 * KiB),
+
+ FSL_IMX7_TCMU_ADDR = 0x00800000,
+ FSL_IMX7_TCMU_SIZE = (32 * KiB),
+
+ FSL_IMX7_TCML_ADDR = 0x007F8000,
+ FSL_IMX7_TCML_SIZE = (32 * KiB),
+
+ FSL_IMX7_OCRAM_S_ADDR = 0x00180000,
+ FSL_IMX7_OCRAM_S_SIZE = (32 * KiB),
+
+ FSL_IMX7_CAAM_MEM_ADDR = 0x00100000,
+ FSL_IMX7_CAAM_MEM_SIZE = (32 * KiB),
- FSL_IMX7_GPR_ADDR = 0x30340000,
+ FSL_IMX7_ROM_ADDR = 0x00000000,
+ FSL_IMX7_ROM_SIZE = (96 * KiB),
};
enum FslIMX7IRQs {
@@ -207,10 +400,35 @@ enum FslIMX7IRQs {
FSL_IMX7_USB2_IRQ = 42,
FSL_IMX7_USB3_IRQ = 40,
- FSL_IMX7_PCI_INTA_IRQ = 122,
- FSL_IMX7_PCI_INTB_IRQ = 123,
- FSL_IMX7_PCI_INTC_IRQ = 124,
- FSL_IMX7_PCI_INTD_IRQ = 125,
+ FSL_IMX7_GPT1_IRQ = 55,
+ FSL_IMX7_GPT2_IRQ = 54,
+ FSL_IMX7_GPT3_IRQ = 53,
+ FSL_IMX7_GPT4_IRQ = 52,
+
+ FSL_IMX7_GPIO1_LOW_IRQ = 64,
+ FSL_IMX7_GPIO1_HIGH_IRQ = 65,
+ FSL_IMX7_GPIO2_LOW_IRQ = 66,
+ FSL_IMX7_GPIO2_HIGH_IRQ = 67,
+ FSL_IMX7_GPIO3_LOW_IRQ = 68,
+ FSL_IMX7_GPIO3_HIGH_IRQ = 69,
+ FSL_IMX7_GPIO4_LOW_IRQ = 70,
+ FSL_IMX7_GPIO4_HIGH_IRQ = 71,
+ FSL_IMX7_GPIO5_LOW_IRQ = 72,
+ FSL_IMX7_GPIO5_HIGH_IRQ = 73,
+ FSL_IMX7_GPIO6_LOW_IRQ = 74,
+ FSL_IMX7_GPIO6_HIGH_IRQ = 75,
+ FSL_IMX7_GPIO7_LOW_IRQ = 76,
+ FSL_IMX7_GPIO7_HIGH_IRQ = 77,
+
+ FSL_IMX7_WDOG1_IRQ = 78,
+ FSL_IMX7_WDOG2_IRQ = 79,
+ FSL_IMX7_WDOG3_IRQ = 10,
+ FSL_IMX7_WDOG4_IRQ = 109,
+
+ FSL_IMX7_PCI_INTA_IRQ = 125,
+ FSL_IMX7_PCI_INTB_IRQ = 124,
+ FSL_IMX7_PCI_INTC_IRQ = 123,
+ FSL_IMX7_PCI_INTD_IRQ = 122,
FSL_IMX7_UART7_IRQ = 126,
diff --git a/include/hw/arm/iotkit.h b/include/hw/arm/iotkit.h
deleted file mode 100644
index 3a8ee63908..0000000000
--- a/include/hw/arm/iotkit.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * ARM IoT Kit
- *
- * Copyright (c) 2018 Linaro Limited
- * Written by Peter Maydell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 or
- * (at your option) any later version.
- */
-
-/* This is a model of the Arm IoT Kit which is documented in
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ecm0601256/index.html
- * It contains:
- * a Cortex-M33
- * the IDAU
- * some timers and watchdogs
- * two peripheral protection controllers
- * a memory protection controller
- * a security controller
- * a bus fabric which arranges that some parts of the address
- * space are secure and non-secure aliases of each other
- *
- * QEMU interface:
- * + QOM property "memory" is a MemoryRegion containing the devices provided
- * by the board model.
- * + QOM property "MAINCLK" is the frequency of the main system clock
- * + QOM property "EXP_NUMIRQ" sets the number of expansion interrupts
- * + Named GPIO inputs "EXP_IRQ" 0..n are the expansion interrupts, which
- * are wired to the NVIC lines 32 .. n+32
- * + sysbus MMIO region 0 is the "AHB Slave Expansion" which allows
- * bus master devices in the board model to make transactions into
- * all the devices and memory areas in the IoTKit
- * Controlling up to 4 AHB expansion PPBs which a system using the IoTKit
- * might provide:
- * + named GPIO outputs apb_ppcexp{0,1,2,3}_nonsec[0..15]
- * + named GPIO outputs apb_ppcexp{0,1,2,3}_ap[0..15]
- * + named GPIO outputs apb_ppcexp{0,1,2,3}_irq_enable
- * + named GPIO outputs apb_ppcexp{0,1,2,3}_irq_clear
- * + named GPIO inputs apb_ppcexp{0,1,2,3}_irq_status
- * Controlling each of the 4 expansion AHB PPCs which a system using the IoTKit
- * might provide:
- * + named GPIO outputs ahb_ppcexp{0,1,2,3}_nonsec[0..15]
- * + named GPIO outputs ahb_ppcexp{0,1,2,3}_ap[0..15]
- * + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_enable
- * + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_clear
- * + named GPIO inputs ahb_ppcexp{0,1,2,3}_irq_status
- * Controlling each of the 16 expansion MPCs which a system using the IoTKit
- * might provide:
- * + named GPIO inputs mpcexp_status[0..15]
- * Controlling each of the 16 expansion MSCs which a system using the IoTKit
- * might provide:
- * + named GPIO inputs mscexp_status[0..15]
- * + named GPIO outputs mscexp_clear[0..15]
- * + named GPIO outputs mscexp_ns[0..15]
- */
-
-#ifndef IOTKIT_H
-#define IOTKIT_H
-
-#include "hw/sysbus.h"
-#include "hw/arm/armv7m.h"
-#include "hw/misc/iotkit-secctl.h"
-#include "hw/misc/tz-ppc.h"
-#include "hw/misc/tz-mpc.h"
-#include "hw/timer/cmsdk-apb-timer.h"
-#include "hw/timer/cmsdk-apb-dualtimer.h"
-#include "hw/watchdog/cmsdk-apb-watchdog.h"
-#include "hw/misc/iotkit-sysctl.h"
-#include "hw/misc/iotkit-sysinfo.h"
-#include "hw/or-irq.h"
-#include "hw/core/split-irq.h"
-
-#define TYPE_IOTKIT "iotkit"
-#define IOTKIT(obj) OBJECT_CHECK(IoTKit, (obj), TYPE_IOTKIT)
-
-/* We have an IRQ splitter and an OR gate input for each external PPC
- * and the 2 internal PPCs
- */
-#define NUM_EXTERNAL_PPCS (IOTS_NUM_AHB_EXP_PPC + IOTS_NUM_APB_EXP_PPC)
-#define NUM_PPCS (NUM_EXTERNAL_PPCS + 2)
-
-typedef struct IoTKit {
- /*< private >*/
- SysBusDevice parent_obj;
-
- /*< public >*/
- ARMv7MState armv7m;
- IoTKitSecCtl secctl;
- TZPPC apb_ppc0;
- TZPPC apb_ppc1;
- TZMPC mpc;
- CMSDKAPBTIMER timer0;
- CMSDKAPBTIMER timer1;
- CMSDKAPBTIMER s32ktimer;
- qemu_or_irq ppc_irq_orgate;
- SplitIRQ sec_resp_splitter;
- SplitIRQ ppc_irq_splitter[NUM_PPCS];
- SplitIRQ mpc_irq_splitter[IOTS_NUM_EXP_MPC + IOTS_NUM_MPC];
- qemu_or_irq mpc_irq_orgate;
- qemu_or_irq nmi_orgate;
-
- CMSDKAPBDualTimer dualtimer;
-
- CMSDKAPBWatchdog s32kwatchdog;
- CMSDKAPBWatchdog nswatchdog;
- CMSDKAPBWatchdog swatchdog;
-
- IoTKitSysCtl sysctl;
- IoTKitSysCtl sysinfo;
-
- MemoryRegion container;
- MemoryRegion alias1;
- MemoryRegion alias2;
- MemoryRegion alias3;
- MemoryRegion sram0;
-
- qemu_irq *exp_irqs;
- qemu_irq ppc0_irq;
- qemu_irq ppc1_irq;
- qemu_irq sec_resp_cfg;
- qemu_irq sec_resp_cfg_in;
- qemu_irq nsc_cfg_in;
-
- qemu_irq irq_status_in[NUM_EXTERNAL_PPCS];
- qemu_irq mpcexp_status_in[IOTS_NUM_EXP_MPC];
-
- uint32_t nsccfg;
-
- /* Properties */
- MemoryRegion *board_memory;
- uint32_t exp_numirq;
- uint32_t mainclk_frq;
-} IoTKit;
-
-#endif
diff --git a/include/hw/arm/linux-boot-if.h b/include/hw/arm/linux-boot-if.h
index 7bbdfd1cc6..c85f33b2c5 100644
--- a/include/hw/arm/linux-boot-if.h
+++ b/include/hw/arm/linux-boot-if.h
@@ -9,16 +9,15 @@
#include "qom/object.h"
#define TYPE_ARM_LINUX_BOOT_IF "arm-linux-boot-if"
-#define ARM_LINUX_BOOT_IF_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMLinuxBootIfClass, (klass), TYPE_ARM_LINUX_BOOT_IF)
-#define ARM_LINUX_BOOT_IF_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMLinuxBootIfClass, (obj), TYPE_ARM_LINUX_BOOT_IF)
+typedef struct ARMLinuxBootIfClass ARMLinuxBootIfClass;
+DECLARE_CLASS_CHECKERS(ARMLinuxBootIfClass, ARM_LINUX_BOOT_IF,
+ TYPE_ARM_LINUX_BOOT_IF)
#define ARM_LINUX_BOOT_IF(obj) \
INTERFACE_CHECK(ARMLinuxBootIf, (obj), TYPE_ARM_LINUX_BOOT_IF)
typedef struct ARMLinuxBootIf ARMLinuxBootIf;
-typedef struct ARMLinuxBootIfClass {
+struct ARMLinuxBootIfClass {
/*< private >*/
InterfaceClass parent_class;
@@ -35,6 +34,6 @@ typedef struct ARMLinuxBootIfClass {
* (or for a CPU which doesn't support TrustZone)
*/
void (*arm_linux_init)(ARMLinuxBootIf *obj, bool secure_boot);
-} ARMLinuxBootIfClass;
+};
#endif
diff --git a/include/hw/arm/msf2-soc.h b/include/hw/arm/msf2-soc.h
index 3cfe5c76ee..9300664e8e 100644
--- a/include/hw/arm/msf2-soc.h
+++ b/include/hw/arm/msf2-soc.h
@@ -29,9 +29,12 @@
#include "hw/timer/mss-timer.h"
#include "hw/misc/msf2-sysreg.h"
#include "hw/ssi/mss-spi.h"
+#include "hw/net/msf2-emac.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_MSF2_SOC "msf2-soc"
-#define MSF2_SOC(obj) OBJECT_CHECK(MSF2State, (obj), TYPE_MSF2_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(MSF2State, MSF2_SOC)
#define MSF2_NUM_SPIS 2
#define MSF2_NUM_UARTS 2
@@ -43,25 +46,28 @@
*/
#define MSF2_NUM_TIMERS 2
-typedef struct MSF2State {
- /*< private >*/
+struct MSF2State {
SysBusDevice parent_obj;
- /*< public >*/
ARMv7MState armv7m;
- char *cpu_type;
char *part_name;
uint64_t envm_size;
uint64_t esram_size;
- uint32_t m3clk;
+ Clock *m3clk;
+ Clock *refclk;
uint8_t apb0div;
uint8_t apb1div;
MSF2SysregState sysreg;
MSSTimerState timer;
MSSSpiState spi[MSF2_NUM_SPIS];
-} MSF2State;
+ MSF2EmacState emac;
+
+ MemoryRegion nvm;
+ MemoryRegion nvm_alias;
+ MemoryRegion sram;
+};
#endif
diff --git a/include/hw/arm/npcm7xx.h b/include/hw/arm/npcm7xx.h
new file mode 100644
index 0000000000..4e0d210188
--- /dev/null
+++ b/include/hw/arm/npcm7xx.h
@@ -0,0 +1,139 @@
+/*
+ * Nuvoton NPCM7xx SoC family.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_H
+#define NPCM7XX_H
+
+#include "hw/boards.h"
+#include "hw/adc/npcm7xx_adc.h"
+#include "hw/core/split-irq.h"
+#include "hw/cpu/a9mpcore.h"
+#include "hw/gpio/npcm7xx_gpio.h"
+#include "hw/i2c/npcm7xx_smbus.h"
+#include "hw/mem/npcm7xx_mc.h"
+#include "hw/misc/npcm7xx_clk.h"
+#include "hw/misc/npcm7xx_gcr.h"
+#include "hw/misc/npcm7xx_mft.h"
+#include "hw/misc/npcm7xx_pwm.h"
+#include "hw/misc/npcm7xx_rng.h"
+#include "hw/net/npcm7xx_emc.h"
+#include "hw/net/npcm_gmac.h"
+#include "hw/nvram/npcm7xx_otp.h"
+#include "hw/timer/npcm7xx_timer.h"
+#include "hw/ssi/npcm7xx_fiu.h"
+#include "hw/ssi/npcm_pspi.h"
+#include "hw/usb/hcd-ehci.h"
+#include "hw/usb/hcd-ohci.h"
+#include "target/arm/cpu.h"
+#include "hw/sd/npcm7xx_sdhci.h"
+
+#define NPCM7XX_MAX_NUM_CPUS (2)
+
+/* The first half of the address space is reserved for DDR4 DRAM. */
+#define NPCM7XX_DRAM_BA (0x00000000)
+#define NPCM7XX_DRAM_SZ (2 * GiB)
+
+/* Magic addresses for setting up direct kernel booting and SMP boot stubs. */
+#define NPCM7XX_LOADER_START (0x00000000) /* Start of SDRAM */
+#define NPCM7XX_SMP_LOADER_START (0xffff0000) /* Boot ROM */
+#define NPCM7XX_SMP_BOOTREG_ADDR (0xf080013c) /* GCR.SCRPAD */
+#define NPCM7XX_GIC_CPU_IF_ADDR (0xf03fe100) /* GIC within A9 */
+#define NPCM7XX_BOARD_SETUP_ADDR (0xffff1000) /* Boot ROM */
+
+#define NPCM7XX_NR_PWM_MODULES 2
+
+struct NPCM7xxMachine {
+ MachineState parent;
+ /*
+ * PWM fan splitter. each splitter connects to one PWM output and
+ * multiple MFT inputs.
+ */
+ SplitIRQ fan_splitter[NPCM7XX_NR_PWM_MODULES *
+ NPCM7XX_PWM_PER_MODULE];
+};
+
+#define TYPE_NPCM7XX_MACHINE MACHINE_TYPE_NAME("npcm7xx")
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxMachine, NPCM7XX_MACHINE)
+
+typedef struct NPCM7xxMachineClass {
+ MachineClass parent;
+
+ const char *soc_type;
+} NPCM7xxMachineClass;
+
+#define NPCM7XX_MACHINE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(NPCM7xxMachineClass, (klass), TYPE_NPCM7XX_MACHINE)
+#define NPCM7XX_MACHINE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(NPCM7xxMachineClass, (obj), TYPE_NPCM7XX_MACHINE)
+
+struct NPCM7xxState {
+ DeviceState parent;
+
+ ARMCPU cpu[NPCM7XX_MAX_NUM_CPUS];
+ A9MPPrivState a9mpcore;
+
+ MemoryRegion sram;
+ MemoryRegion irom;
+ MemoryRegion ram3;
+ MemoryRegion *dram;
+
+ NPCM7xxGCRState gcr;
+ NPCM7xxCLKState clk;
+ NPCM7xxTimerCtrlState tim[3];
+ NPCM7xxADCState adc;
+ NPCM7xxPWMState pwm[NPCM7XX_NR_PWM_MODULES];
+ NPCM7xxMFTState mft[8];
+ NPCM7xxOTPState key_storage;
+ NPCM7xxOTPState fuse_array;
+ NPCM7xxMCState mc;
+ NPCM7xxRNGState rng;
+ NPCM7xxGPIOState gpio[8];
+ NPCM7xxSMBusState smbus[16];
+ EHCISysBusState ehci;
+ OHCISysBusState ohci;
+ NPCM7xxFIUState fiu[2];
+ NPCM7xxEMCState emc[2];
+ NPCMGMACState gmac[2];
+ NPCM7xxSDHCIState mmc;
+ NPCMPSPIState pspi[2];
+};
+
+#define TYPE_NPCM7XX "npcm7xx"
+OBJECT_DECLARE_TYPE(NPCM7xxState, NPCM7xxClass, NPCM7XX)
+
+#define TYPE_NPCM730 "npcm730"
+#define TYPE_NPCM750 "npcm750"
+
+typedef struct NPCM7xxClass {
+ DeviceClass parent;
+
+ /* Bitmask of modules that are permanently disabled on this chip. */
+ uint32_t disabled_modules;
+ /* Number of CPU cores enabled in this SoC class (may be 1 or 2). */
+ uint32_t num_cpus;
+} NPCM7xxClass;
+
+/**
+ * npcm7xx_load_kernel - Loads memory with everything needed to boot
+ * @machine - The machine containing the SoC to be booted.
+ * @soc - The SoC containing the CPU to be booted.
+ *
+ * This will set up the ARM boot info structure for the specific NPCM7xx
+ * derivative and call arm_load_kernel() to set up loading of the kernel, etc.
+ * into memory, if requested by the user.
+ */
+void npcm7xx_load_kernel(MachineState *machine, NPCM7xxState *soc);
+
+#endif /* NPCM7XX_H */
diff --git a/include/hw/arm/nrf51.h b/include/hw/arm/nrf51.h
index 175bb6c301..de836beaa4 100644
--- a/include/hw/arm/nrf51.h
+++ b/include/hw/arm/nrf51.h
@@ -24,9 +24,10 @@
#define NRF51_IOMEM_BASE 0x40000000
#define NRF51_IOMEM_SIZE 0x20000000
+#define NRF51_PERIPHERAL_SIZE 0x00001000
#define NRF51_UART_BASE 0x40002000
+#define NRF51_TWI_BASE 0x40003000
#define NRF51_TIMER_BASE 0x40008000
-#define NRF51_TIMER_SIZE 0x00001000
#define NRF51_RNG_BASE 0x4000D000
#define NRF51_NVMC_BASE 0x4001E000
#define NRF51_GPIO_BASE 0x50000000
diff --git a/include/hw/arm/nrf51_soc.h b/include/hw/arm/nrf51_soc.h
index e06f0304b4..e52a56e75e 100644
--- a/include/hw/arm/nrf51_soc.h
+++ b/include/hw/arm/nrf51_soc.h
@@ -15,15 +15,17 @@
#include "hw/char/nrf51_uart.h"
#include "hw/misc/nrf51_rng.h"
#include "hw/gpio/nrf51_gpio.h"
+#include "hw/nvram/nrf51_nvm.h"
#include "hw/timer/nrf51_timer.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_NRF51_SOC "nrf51-soc"
-#define NRF51_SOC(obj) \
- OBJECT_CHECK(NRF51State, (obj), TYPE_NRF51_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51State, NRF51_SOC)
#define NRF51_NUM_TIMERS 3
-typedef struct NRF51State {
+struct NRF51State {
/*< private >*/
SysBusDevice parent_obj;
@@ -32,6 +34,7 @@ typedef struct NRF51State {
NRF51UARTState uart;
NRF51RNGState rng;
+ NRF51NVMState nvm;
NRF51GPIOState gpio;
NRF51TimerState timer[NRF51_NUM_TIMERS];
@@ -39,6 +42,7 @@ typedef struct NRF51State {
MemoryRegion sram;
MemoryRegion flash;
MemoryRegion clock;
+ MemoryRegion twi;
uint32_t sram_size;
uint32_t flash_size;
@@ -47,7 +51,7 @@ typedef struct NRF51State {
MemoryRegion container;
-} NRF51State;
+ Clock *sysclk;
+};
#endif
-
diff --git a/include/hw/arm/omap.h b/include/hw/arm/omap.h
index e7fbd340f3..40ee8ea9e5 100644
--- a/include/hw/arm/omap.h
+++ b/include/hw/arm/omap.h
@@ -16,12 +16,15 @@
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef hw_omap_h
+
+#ifndef HW_ARM_OMAP_H
+#define HW_ARM_OMAP_H
+
#include "exec/memory.h"
-# define hw_omap_h "omap.h"
-#include "hw/irq.h"
+#include "hw/input/tsc2xxx.h"
#include "target/arm/cpu-qom.h"
#include "qemu/log.h"
+#include "qom/object.h"
# define OMAP_EMIFS_BASE 0x00000000
# define OMAP2_Q0_BASE 0x00000000
@@ -65,6 +68,55 @@ void omap_clk_setrate(omap_clk clk, int divide, int multiply);
int64_t omap_clk_getrate(omap_clk clk);
void omap_clk_reparent(omap_clk clk, omap_clk parent);
+/* omap_intc.c */
+#define TYPE_OMAP_INTC "common-omap-intc"
+typedef struct OMAPIntcState OMAPIntcState;
+DECLARE_INSTANCE_CHECKER(OMAPIntcState, OMAP_INTC, TYPE_OMAP_INTC)
+
+
+/*
+ * TODO: Ideally we should have a clock framework that
+ * let us wire these clocks up with QOM properties or links.
+ *
+ * qdev should support a generic means of defining a 'port' with
+ * an arbitrary interface for connecting two devices. Then we
+ * could reframe the omap clock API in terms of clock ports,
+ * and get some type safety. For now the best qdev provides is
+ * passing an arbitrary pointer.
+ * (It's not possible to pass in the string which is the clock
+ * name, because this device does not have the necessary information
+ * (ie the struct omap_mpu_state_s*) to do the clockname to pointer
+ * translation.)
+ */
+void omap_intc_set_iclk(OMAPIntcState *intc, omap_clk clk);
+void omap_intc_set_fclk(OMAPIntcState *intc, omap_clk clk);
+
+/* omap_i2c.c */
+#define TYPE_OMAP_I2C "omap_i2c"
+OBJECT_DECLARE_SIMPLE_TYPE(OMAPI2CState, OMAP_I2C)
+
+
+/* TODO: clock framework (see above) */
+void omap_i2c_set_iclk(OMAPI2CState *i2c, omap_clk clk);
+void omap_i2c_set_fclk(OMAPI2CState *i2c, omap_clk clk);
+
+/* omap_gpio.c */
+#define TYPE_OMAP1_GPIO "omap-gpio"
+typedef struct Omap1GpioState Omap1GpioState;
+DECLARE_INSTANCE_CHECKER(Omap1GpioState, OMAP1_GPIO,
+ TYPE_OMAP1_GPIO)
+
+#define TYPE_OMAP2_GPIO "omap2-gpio"
+typedef struct Omap2GpioState Omap2GpioState;
+DECLARE_INSTANCE_CHECKER(Omap2GpioState, OMAP2_GPIO,
+ TYPE_OMAP2_GPIO)
+
+/* TODO: clock framework (see above) */
+void omap_gpio_set_clk(Omap1GpioState *gpio, omap_clk clk);
+
+void omap2_gpio_set_iclk(Omap2GpioState *gpio, omap_clk clk);
+void omap2_gpio_set_fclk(Omap2GpioState *gpio, uint8_t i, omap_clk clk);
+
/* OMAP2 l4 Interconnect */
struct omap_l4_s;
struct omap_l4_region_s {
@@ -672,18 +724,12 @@ struct omap_uart_s *omap2_uart_init(MemoryRegion *sysmem,
qemu_irq txdma, qemu_irq rxdma,
const char *label, Chardev *chr);
void omap_uart_reset(struct omap_uart_s *s);
-void omap_uart_attach(struct omap_uart_s *s, Chardev *chr);
struct omap_mpuio_s;
qemu_irq *omap_mpuio_in_get(struct omap_mpuio_s *s);
void omap_mpuio_out_set(struct omap_mpuio_s *s, int line, qemu_irq handler);
void omap_mpuio_key(struct omap_mpuio_s *s, int row, int col, int down);
-struct uWireSlave {
- uint16_t (*receive)(void *opaque);
- void (*send)(void *opaque, uint16_t data);
- void *opaque;
-};
struct omap_uwire_s;
void omap_uwire_attach(struct omap_uwire_s *s,
uWireSlave *slave, int chipselect);
@@ -826,8 +872,6 @@ struct omap_mpu_state_s {
MemoryRegion mpui_io_iomem;
MemoryRegion tap_iomem;
MemoryRegion imif_ram;
- MemoryRegion emiff_ram;
- MemoryRegion sdram;
MemoryRegion sram;
struct omap_dma_port_if_s {
@@ -839,7 +883,7 @@ struct omap_mpu_state_s {
hwaddr addr);
} port[__omap_dma_port_last];
- unsigned long sdram_size;
+ uint64_t sdram_size;
unsigned long sram_size;
/* MPUI-TIPB peripherals */
@@ -936,13 +980,11 @@ struct omap_mpu_state_s {
};
/* omap1.c */
-struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *system_memory,
- unsigned long sdram_size,
+struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *sdram,
const char *core);
/* omap2.c */
-struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sysmem,
- unsigned long sdram_size,
+struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sdram,
const char *core);
uint32_t omap_badwidth_read8(void *opaque, hwaddr addr);
@@ -966,7 +1008,8 @@ void omap_mpu_wakeup(void *opaque, int irq, int req);
__func__, paddr)
/* OMAP-specific Linux bootloader tags for the ATAG_BOARD area
- (Board-specifc tags are not here) */
+ * (Board-specific tags are not here)
+ */
#define OMAP_TAG_CLOCK 0x4f01
#define OMAP_TAG_MMC 0x4f02
#define OMAP_TAG_SERIAL_CONSOLE 0x4f03
@@ -995,4 +1038,4 @@ enum {
# define OMAP_MPUI_REG_MASK 0x000007ff
-#endif /* hw_omap_h */
+#endif
diff --git a/include/hw/arm/pxa.h b/include/hw/arm/pxa.h
index 0df1199caa..4c6caee113 100644
--- a/include/hw/arm/pxa.h
+++ b/include/hw/arm/pxa.h
@@ -12,6 +12,8 @@
#include "exec/memory.h"
#include "target/arm/cpu-qom.h"
+#include "hw/pcmcia.h"
+#include "qom/object.h"
/* Interrupt numbers */
# define PXA2XX_PIC_SSP3 0
@@ -85,18 +87,19 @@ PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem,
void pxa2xx_lcd_vsync_notifier(PXA2xxLCDState *s, qemu_irq handler);
/* pxa2xx_mmci.c */
-typedef struct PXA2xxMMCIState PXA2xxMMCIState;
+#define TYPE_PXA2XX_MMCI "pxa2xx-mmci"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxMMCIState, PXA2XX_MMCI)
+
PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem,
hwaddr base,
- BlockBackend *blk, qemu_irq irq,
- qemu_irq rx_dma, qemu_irq tx_dma);
+ qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma);
void pxa2xx_mmci_handlers(PXA2xxMMCIState *s, qemu_irq readonly,
qemu_irq coverswitch);
/* pxa2xx_pcmcia.c */
-typedef struct PXA2xxPCMCIAState PXA2xxPCMCIAState;
-PXA2xxPCMCIAState *pxa2xx_pcmcia_init(MemoryRegion *sysmem,
- hwaddr base);
+#define TYPE_PXA2XX_PCMCIA "pxa2xx-pcmcia"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxPCMCIAState, PXA2XX_PCMCIA)
+
int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card);
int pxa2xx_pcmcia_detach(void *opaque);
void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq);
@@ -114,13 +117,17 @@ void pxa27x_register_keypad(PXA2xxKeyPadState *kp,
const struct keymap *map, int size);
/* pxa2xx.c */
-typedef struct PXA2xxI2CState PXA2xxI2CState;
+#define TYPE_PXA2XX_I2C "pxa2xx_i2c"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxI2CState, PXA2XX_I2C)
+
PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base,
qemu_irq irq, uint32_t page_size);
I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s);
typedef struct PXA2xxI2SState PXA2xxI2SState;
-typedef struct PXA2xxFIrState PXA2xxFIrState;
+
+#define TYPE_PXA2XX_FIR "pxa2xx-fir"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxFIrState, PXA2XX_FIR)
typedef struct {
ARMCPU *cpu;
@@ -183,10 +190,8 @@ struct PXA2xxI2SState {
};
# define PA_FMT "0x%08lx"
-# define REG_FMT "0x" TARGET_FMT_plx
-PXA2xxState *pxa270_init(MemoryRegion *address_space, unsigned int sdram_size,
- const char *revision);
-PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size);
+PXA2xxState *pxa270_init(unsigned int sdram_size, const char *revision);
+PXA2xxState *pxa255_init(unsigned int sdram_size);
#endif /* PXA_H */
diff --git a/include/hw/arm/raspberrypi-fw-defs.h b/include/hw/arm/raspberrypi-fw-defs.h
new file mode 100644
index 0000000000..8b404e0533
--- /dev/null
+++ b/include/hw/arm/raspberrypi-fw-defs.h
@@ -0,0 +1,173 @@
+/*
+ * Raspberry Pi firmware definitions
+ *
+ * Copyright (C) 2022 Auriga LLC, based on Linux kernel
+ * `include/soc/bcm2835/raspberrypi-firmware.h` (Copyright © 2015 Broadcom)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_
+#define INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_
+
+
+enum rpi_firmware_property_tag {
+ RPI_FWREQ_PROPERTY_END = 0,
+ RPI_FWREQ_GET_FIRMWARE_REVISION = 0x00000001,
+ RPI_FWREQ_GET_FIRMWARE_VARIANT = 0x00000002,
+ RPI_FWREQ_GET_FIRMWARE_HASH = 0x00000003,
+
+ RPI_FWREQ_SET_CURSOR_INFO = 0x00008010,
+ RPI_FWREQ_SET_CURSOR_STATE = 0x00008011,
+
+ RPI_FWREQ_GET_BOARD_MODEL = 0x00010001,
+ RPI_FWREQ_GET_BOARD_REVISION = 0x00010002,
+ RPI_FWREQ_GET_BOARD_MAC_ADDRESS = 0x00010003,
+ RPI_FWREQ_GET_BOARD_SERIAL = 0x00010004,
+ RPI_FWREQ_GET_ARM_MEMORY = 0x00010005,
+ RPI_FWREQ_GET_VC_MEMORY = 0x00010006,
+ RPI_FWREQ_GET_CLOCKS = 0x00010007,
+ RPI_FWREQ_GET_POWER_STATE = 0x00020001,
+ RPI_FWREQ_GET_TIMING = 0x00020002,
+ RPI_FWREQ_SET_POWER_STATE = 0x00028001,
+ RPI_FWREQ_GET_CLOCK_STATE = 0x00030001,
+ RPI_FWREQ_GET_CLOCK_RATE = 0x00030002,
+ RPI_FWREQ_GET_VOLTAGE = 0x00030003,
+ RPI_FWREQ_GET_MAX_CLOCK_RATE = 0x00030004,
+ RPI_FWREQ_GET_MAX_VOLTAGE = 0x00030005,
+ RPI_FWREQ_GET_TEMPERATURE = 0x00030006,
+ RPI_FWREQ_GET_MIN_CLOCK_RATE = 0x00030007,
+ RPI_FWREQ_GET_MIN_VOLTAGE = 0x00030008,
+ RPI_FWREQ_GET_TURBO = 0x00030009,
+ RPI_FWREQ_GET_MAX_TEMPERATURE = 0x0003000a,
+ RPI_FWREQ_GET_STC = 0x0003000b,
+ RPI_FWREQ_ALLOCATE_MEMORY = 0x0003000c,
+ RPI_FWREQ_LOCK_MEMORY = 0x0003000d,
+ RPI_FWREQ_UNLOCK_MEMORY = 0x0003000e,
+ RPI_FWREQ_RELEASE_MEMORY = 0x0003000f,
+ RPI_FWREQ_EXECUTE_CODE = 0x00030010,
+ RPI_FWREQ_EXECUTE_QPU = 0x00030011,
+ RPI_FWREQ_SET_ENABLE_QPU = 0x00030012,
+ RPI_FWREQ_GET_DISPMANX_RESOURCE_MEM_HANDLE = 0x00030014,
+ RPI_FWREQ_GET_EDID_BLOCK = 0x00030020,
+ RPI_FWREQ_GET_CUSTOMER_OTP = 0x00030021,
+ RPI_FWREQ_GET_EDID_BLOCK_DISPLAY = 0x00030023,
+ RPI_FWREQ_GET_DOMAIN_STATE = 0x00030030,
+ RPI_FWREQ_GET_THROTTLED = 0x00030046,
+ RPI_FWREQ_GET_CLOCK_MEASURED = 0x00030047,
+ RPI_FWREQ_NOTIFY_REBOOT = 0x00030048,
+ RPI_FWREQ_SET_CLOCK_STATE = 0x00038001,
+ RPI_FWREQ_SET_CLOCK_RATE = 0x00038002,
+ RPI_FWREQ_SET_VOLTAGE = 0x00038003,
+ RPI_FWREQ_SET_MAX_CLOCK_RATE = 0x00038004,
+ RPI_FWREQ_SET_MIN_CLOCK_RATE = 0x00038007,
+ RPI_FWREQ_SET_TURBO = 0x00038009,
+ RPI_FWREQ_SET_CUSTOMER_OTP = 0x00038021,
+ RPI_FWREQ_SET_DOMAIN_STATE = 0x00038030,
+ RPI_FWREQ_GET_GPIO_STATE = 0x00030041,
+ RPI_FWREQ_SET_GPIO_STATE = 0x00038041,
+ RPI_FWREQ_SET_SDHOST_CLOCK = 0x00038042,
+ RPI_FWREQ_GET_GPIO_CONFIG = 0x00030043,
+ RPI_FWREQ_SET_GPIO_CONFIG = 0x00038043,
+ RPI_FWREQ_GET_PERIPH_REG = 0x00030045,
+ RPI_FWREQ_SET_PERIPH_REG = 0x00038045,
+ RPI_FWREQ_GET_POE_HAT_VAL = 0x00030049,
+ RPI_FWREQ_SET_POE_HAT_VAL = 0x00038049,
+ RPI_FWREQ_SET_POE_HAT_VAL_OLD = 0x00030050,
+ RPI_FWREQ_NOTIFY_XHCI_RESET = 0x00030058,
+ RPI_FWREQ_GET_REBOOT_FLAGS = 0x00030064,
+ RPI_FWREQ_SET_REBOOT_FLAGS = 0x00038064,
+ RPI_FWREQ_NOTIFY_DISPLAY_DONE = 0x00030066,
+
+ /* Dispmanx TAGS */
+ RPI_FWREQ_FRAMEBUFFER_ALLOCATE = 0x00040001,
+ RPI_FWREQ_FRAMEBUFFER_BLANK = 0x00040002,
+ RPI_FWREQ_FRAMEBUFFER_GET_PHYSICAL_WIDTH_HEIGHT = 0x00040003,
+ RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_WIDTH_HEIGHT = 0x00040004,
+ RPI_FWREQ_FRAMEBUFFER_GET_DEPTH = 0x00040005,
+ RPI_FWREQ_FRAMEBUFFER_GET_PIXEL_ORDER = 0x00040006,
+ RPI_FWREQ_FRAMEBUFFER_GET_ALPHA_MODE = 0x00040007,
+ RPI_FWREQ_FRAMEBUFFER_GET_PITCH = 0x00040008,
+ RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_OFFSET = 0x00040009,
+ RPI_FWREQ_FRAMEBUFFER_GET_OVERSCAN = 0x0004000a,
+ RPI_FWREQ_FRAMEBUFFER_GET_PALETTE = 0x0004000b,
+ RPI_FWREQ_FRAMEBUFFER_GET_LAYER = 0x0004000c,
+ RPI_FWREQ_FRAMEBUFFER_GET_TRANSFORM = 0x0004000d,
+ RPI_FWREQ_FRAMEBUFFER_GET_VSYNC = 0x0004000e,
+ RPI_FWREQ_FRAMEBUFFER_GET_TOUCHBUF = 0x0004000f,
+ RPI_FWREQ_FRAMEBUFFER_GET_GPIOVIRTBUF = 0x00040010,
+ RPI_FWREQ_FRAMEBUFFER_RELEASE = 0x00048001,
+ RPI_FWREQ_FRAMEBUFFER_GET_DISPLAY_ID = 0x00040016,
+ RPI_FWREQ_FRAMEBUFFER_SET_DISPLAY_NUM = 0x00048013,
+ RPI_FWREQ_FRAMEBUFFER_GET_NUM_DISPLAYS = 0x00040013,
+ RPI_FWREQ_FRAMEBUFFER_GET_DISPLAY_SETTINGS = 0x00040014,
+ RPI_FWREQ_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT = 0x00044003,
+ RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT = 0x00044004,
+ RPI_FWREQ_FRAMEBUFFER_TEST_DEPTH = 0x00044005,
+ RPI_FWREQ_FRAMEBUFFER_TEST_PIXEL_ORDER = 0x00044006,
+ RPI_FWREQ_FRAMEBUFFER_TEST_ALPHA_MODE = 0x00044007,
+ RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_OFFSET = 0x00044009,
+ RPI_FWREQ_FRAMEBUFFER_TEST_OVERSCAN = 0x0004400a,
+ RPI_FWREQ_FRAMEBUFFER_TEST_PALETTE = 0x0004400b,
+ RPI_FWREQ_FRAMEBUFFER_TEST_LAYER = 0x0004400c,
+ RPI_FWREQ_FRAMEBUFFER_TEST_TRANSFORM = 0x0004400d,
+ RPI_FWREQ_FRAMEBUFFER_TEST_VSYNC = 0x0004400e,
+ RPI_FWREQ_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT = 0x00048003,
+ RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT = 0x00048004,
+ RPI_FWREQ_FRAMEBUFFER_SET_DEPTH = 0x00048005,
+ RPI_FWREQ_FRAMEBUFFER_SET_PIXEL_ORDER = 0x00048006,
+ RPI_FWREQ_FRAMEBUFFER_SET_ALPHA_MODE = 0x00048007,
+ RPI_FWREQ_FRAMEBUFFER_SET_PITCH = 0x00048008,
+ RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_OFFSET = 0x00048009,
+ RPI_FWREQ_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
+ RPI_FWREQ_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
+
+ RPI_FWREQ_FRAMEBUFFER_SET_TOUCHBUF = 0x0004801f,
+ RPI_FWREQ_FRAMEBUFFER_SET_GPIOVIRTBUF = 0x00048020,
+ RPI_FWREQ_FRAMEBUFFER_SET_VSYNC = 0x0004800e,
+ RPI_FWREQ_FRAMEBUFFER_SET_LAYER = 0x0004800c,
+ RPI_FWREQ_FRAMEBUFFER_SET_TRANSFORM = 0x0004800d,
+ RPI_FWREQ_FRAMEBUFFER_SET_BACKLIGHT = 0x0004800f,
+
+ RPI_FWREQ_VCHIQ_INIT = 0x00048010,
+
+ RPI_FWREQ_SET_PLANE = 0x00048015,
+ RPI_FWREQ_GET_DISPLAY_TIMING = 0x00040017,
+ RPI_FWREQ_SET_TIMING = 0x00048017,
+ RPI_FWREQ_GET_DISPLAY_CFG = 0x00040018,
+ RPI_FWREQ_SET_DISPLAY_POWER = 0x00048019,
+ RPI_FWREQ_GET_COMMAND_LINE = 0x00050001,
+ RPI_FWREQ_GET_DMA_CHANNELS = 0x00060001,
+};
+
+enum rpi_firmware_clk_id {
+ RPI_FIRMWARE_EMMC_CLK_ID = 1,
+ RPI_FIRMWARE_UART_CLK_ID,
+ RPI_FIRMWARE_ARM_CLK_ID,
+ RPI_FIRMWARE_CORE_CLK_ID,
+ RPI_FIRMWARE_V3D_CLK_ID,
+ RPI_FIRMWARE_H264_CLK_ID,
+ RPI_FIRMWARE_ISP_CLK_ID,
+ RPI_FIRMWARE_SDRAM_CLK_ID,
+ RPI_FIRMWARE_PIXEL_CLK_ID,
+ RPI_FIRMWARE_PWM_CLK_ID,
+ RPI_FIRMWARE_HEVC_CLK_ID,
+ RPI_FIRMWARE_EMMC2_CLK_ID,
+ RPI_FIRMWARE_M2MC_CLK_ID,
+ RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
+ RPI_FIRMWARE_VEC_CLK_ID,
+ RPI_FIRMWARE_NUM_CLK_ID,
+};
+
+struct rpi_firmware_property_tag_header {
+ uint32_t tag;
+ uint32_t buf_size;
+ uint32_t req_resp_size;
+};
+
+typedef struct rpi_firmware_prop_request {
+ struct rpi_firmware_property_tag_header hdr;
+ uint8_t payload[0];
+} rpi_firmware_prop_request_t;
+
+#endif /* INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_ */
diff --git a/include/hw/arm/raspi_platform.h b/include/hw/arm/raspi_platform.h
index 6467e88ae6..7bc4807fa5 100644
--- a/include/hw/arm/raspi_platform.h
+++ b/include/hw/arm/raspi_platform.h
@@ -18,38 +18,117 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ *
+ * Various undocumented addresses and names come from Herman Hermitage's VC4
+ * documentation:
+ * https://github.com/hermanhermitage/videocoreiv/wiki/MMIO-Register-map
*/
-#define MCORE_OFFSET 0x0000 /* Fake frame buffer device
- * (the multicore sync block) */
-#define IC0_OFFSET 0x2000
+#ifndef HW_ARM_RASPI_PLATFORM_H
+#define HW_ARM_RASPI_PLATFORM_H
+
+#include "hw/boards.h"
+#include "hw/arm/boot.h"
+
+/* Registered machine type (matches RPi Foundation bootloader and U-Boot) */
+#define MACH_TYPE_BCM2708 3138
+
+#define TYPE_RASPI_BASE_MACHINE MACHINE_TYPE_NAME("raspi-base")
+OBJECT_DECLARE_TYPE(RaspiBaseMachineState, RaspiBaseMachineClass,
+ RASPI_BASE_MACHINE)
+
+struct RaspiBaseMachineState {
+ /*< private >*/
+ MachineState parent_obj;
+ /*< public >*/
+ struct arm_boot_info binfo;
+};
+
+struct RaspiBaseMachineClass {
+ /*< private >*/
+ MachineClass parent_obj;
+ /*< public >*/
+ uint32_t board_rev;
+};
+
+/* Common functions for raspberry pi machines */
+const char *board_soc_type(uint32_t board_rev);
+void raspi_machine_init(MachineState *machine);
+
+typedef struct BCM283XBaseState BCM283XBaseState;
+void raspi_base_machine_init(MachineState *machine,
+ BCM283XBaseState *soc);
+
+void raspi_machine_class_common_init(MachineClass *mc,
+ uint32_t board_rev);
+uint64_t board_ram_size(uint32_t board_rev);
+
+#define MSYNC_OFFSET 0x0000 /* Multicore Sync Block */
+#define CCPT_OFFSET 0x1000 /* Compact Camera Port 2 TX */
+#define INTE_OFFSET 0x2000 /* VC Interrupt controller */
#define ST_OFFSET 0x3000 /* System Timer */
+#define TXP_OFFSET 0x4000 /* Transposer */
+#define JPEG_OFFSET 0x5000
#define MPHI_OFFSET 0x6000 /* Message-based Parallel Host Intf. */
#define DMA_OFFSET 0x7000 /* DMA controller, channels 0-14 */
-#define ARM_OFFSET 0xB000 /* BCM2708 ARM control block */
+#define ARBA_OFFSET 0x9000
+#define BRDG_OFFSET 0xa000 /* RPiVid ASB for BCM2838 (BCM2711) */
+#define ARM_OFFSET 0xB000 /* ARM control block */
#define ARMCTRL_OFFSET (ARM_OFFSET + 0x000)
#define ARMCTRL_IC_OFFSET (ARM_OFFSET + 0x200) /* Interrupt controller */
-#define ARMCTRL_TIMER0_1_OFFSET (ARM_OFFSET + 0x400) /* Timer 0 and 1 */
+#define ARMCTRL_TIMER0_1_OFFSET (ARM_OFFSET + 0x400) /* Timer 0 and 1 (SP804) */
#define ARMCTRL_0_SBM_OFFSET (ARM_OFFSET + 0x800) /* User 0 (ARM) Semaphores
* Doorbells & Mailboxes */
-#define PM_OFFSET 0x100000 /* Power Management, Reset controller
- * and Watchdog registers */
-#define PCM_CLOCK_OFFSET 0x101098
+#define PM_OFFSET 0x100000 /* Power Management */
+#define CPRMAN_OFFSET 0x101000 /* Clock Management */
+#define AVS_OFFSET 0x103000 /* Audio Video Standard */
#define RNG_OFFSET 0x104000
#define GPIO_OFFSET 0x200000
-#define UART0_OFFSET 0x201000
-#define MMCI0_OFFSET 0x202000
-#define I2S_OFFSET 0x203000
-#define SPI0_OFFSET 0x204000
+#define UART0_OFFSET 0x201000 /* PL011 */
+#define MMCI0_OFFSET 0x202000 /* Legacy MMC */
+#define I2S_OFFSET 0x203000 /* PCM */
+#define SPI0_OFFSET 0x204000 /* SPI master */
#define BSC0_OFFSET 0x205000 /* BSC0 I2C/TWI */
-#define UART1_OFFSET 0x215000
-#define EMMC_OFFSET 0x300000
+#define PIXV0_OFFSET 0x206000
+#define PIXV1_OFFSET 0x207000
+#define DPI_OFFSET 0x208000
+#define DSI0_OFFSET 0x209000 /* Display Serial Interface */
+#define PWM_OFFSET 0x20c000
+#define PERM_OFFSET 0x20d000
+#define TEC_OFFSET 0x20e000
+#define OTP_OFFSET 0x20f000
+#define SLIM_OFFSET 0x210000 /* SLIMbus */
+#define CPG_OFFSET 0x211000
+#define THERMAL_OFFSET 0x212000
+#define AVSP_OFFSET 0x213000
+#define BSC_SL_OFFSET 0x214000 /* SPI slave (bootrom) */
+#define AUX_OFFSET 0x215000 /* AUX: UART1/SPI1/SPI2 */
+#define EMMC1_OFFSET 0x300000
+#define EMMC2_OFFSET 0x340000
+#define HVS_OFFSET 0x400000
#define SMI_OFFSET 0x600000
+#define DSI1_OFFSET 0x700000
+#define UCAM_OFFSET 0x800000
+#define CMI_OFFSET 0x802000
#define BSC1_OFFSET 0x804000 /* BSC1 I2C/TWI */
-#define USB_OFFSET 0x980000 /* DTC_OTG USB controller */
+#define BSC2_OFFSET 0x805000 /* BSC2 I2C/TWI */
+#define VECA_OFFSET 0x806000
+#define PIXV2_OFFSET 0x807000
+#define HDMI_OFFSET 0x808000
+#define HDCP_OFFSET 0x809000
+#define ARBR0_OFFSET 0x80a000
+#define DBUS_OFFSET 0x900000
+#define AVE0_OFFSET 0x910000
+#define USB_OTG_OFFSET 0x980000 /* DTC_OTG USB controller */
+#define V3D_OFFSET 0xc00000
+#define SDRAMC_OFFSET 0xe00000
+#define L2CC_OFFSET 0xe01000 /* Level 2 Cache controller */
+#define L1CC_OFFSET 0xe02000 /* Level 1 Cache controller */
+#define ARBR1_OFFSET 0xe04000
#define DMA15_OFFSET 0xE05000 /* DMA controller, channel 15 */
+#define DCRC_OFFSET 0xe07000
+#define AXIP_OFFSET 0xe08000
/* GPU interrupts */
#define INTERRUPT_TIMER0 0
@@ -109,7 +188,7 @@
#define INTERRUPT_SPI 54
#define INTERRUPT_I2SPCM 55
#define INTERRUPT_SDIO 56
-#define INTERRUPT_UART 57
+#define INTERRUPT_UART0 57
#define INTERRUPT_SLIMBUS 58
#define INTERRUPT_VEC 59
#define INTERRUPT_CPG 60
@@ -126,3 +205,15 @@
#define INTERRUPT_VPU1_HALTED 5
#define INTERRUPT_ILLEGAL_TYPE0 6
#define INTERRUPT_ILLEGAL_TYPE1 7
+
+/* Clock rates */
+#define RPI_FIRMWARE_EMMC_CLK_RATE 50000000
+#define RPI_FIRMWARE_UART_CLK_RATE 3000000
+/*
+ * TODO: this is really SoC-specific; we might want to
+ * set it per-SoC if it turns out any guests care.
+ */
+#define RPI_FIRMWARE_CORE_CLK_RATE 350000000
+#define RPI_FIRMWARE_DEFAULT_CLK_RATE 700000000
+
+#endif
diff --git a/include/hw/arm/sharpsl.h b/include/hw/arm/sharpsl.h
index 5bf6db1fa2..e986b28c52 100644
--- a/include/hw/arm/sharpsl.h
+++ b/include/hw/arm/sharpsl.h
@@ -3,11 +3,11 @@
*
* This file is licensed under the GNU GPL.
*/
+
#ifndef QEMU_SHARPSL_H
#define QEMU_SHARPSL_H
-#define zaurus_printf(format, ...) \
- fprintf(stderr, "%s: " format, __func__, ##__VA_ARGS__)
+#include "exec/hwaddr.h"
/* zaurus.c */
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index b07cadd0ef..5ec2e6c1a4 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -21,12 +21,21 @@
#include "hw/sysbus.h"
#include "hw/pci/pci.h"
+#include "qom/object.h"
-#define SMMU_PCI_BUS_MAX 256
-#define SMMU_PCI_DEVFN_MAX 256
-#define SMMU_PCI_DEVFN(sid) (sid & 0xFF)
+#define SMMU_PCI_BUS_MAX 256
+#define SMMU_PCI_DEVFN_MAX 256
+#define SMMU_PCI_DEVFN(sid) (sid & 0xFF)
-#define SMMU_MAX_VA_BITS 48
+/* VMSAv8-64 Translation constants and functions */
+#define VMSA_LEVELS 4
+#define VMSA_MAX_S2_CONCAT 16
+
+#define VMSA_STRIDE(gran) ((gran) - VMSA_LEVELS + 1)
+#define VMSA_BIT_LVL(isz, strd, lvl) ((isz) - (strd) * \
+ (VMSA_LEVELS - (lvl)))
+#define VMSA_IDXMSK(isz, strd, lvl) ((1ULL << \
+ VMSA_BIT_LVL(isz, strd, lvl)) - 1)
/*
* Page table walk error types
@@ -41,6 +50,7 @@ typedef enum {
} SMMUPTWEventType;
typedef struct SMMUPTWEventInfo {
+ int stage;
SMMUPTWEventType type;
dma_addr_t addr; /* fetched address that induced an abort, if any */
} SMMUPTWEventInfo;
@@ -50,26 +60,51 @@ typedef struct SMMUTransTableInfo {
uint64_t ttb; /* TT base address */
uint8_t tsz; /* input range, ie. 2^(64 -tsz)*/
uint8_t granule_sz; /* granule page shift */
+ bool had; /* hierarchical attribute disable */
} SMMUTransTableInfo;
+typedef struct SMMUTLBEntry {
+ IOMMUTLBEntry entry;
+ uint8_t level;
+ uint8_t granule;
+} SMMUTLBEntry;
+
+/* Stage-2 configuration. */
+typedef struct SMMUS2Cfg {
+ uint8_t tsz; /* Size of IPA input region (S2T0SZ) */
+ uint8_t sl0; /* Start level of translation (S2SL0) */
+ bool affd; /* AF Fault Disable (S2AFFD) */
+ bool record_faults; /* Record fault events (S2R) */
+ uint8_t granule_sz; /* Granule page shift (based on S2TG) */
+ uint8_t eff_ps; /* Effective PA output range (based on S2PS) */
+ uint16_t vmid; /* Virtual Machine ID (S2VMID) */
+ uint64_t vttb; /* Address of translation table base (S2TTB) */
+} SMMUS2Cfg;
+
/*
* Generic structure populated by derived SMMU devices
* after decoding the configuration information and used as
* input to the page table walk
*/
typedef struct SMMUTransCfg {
+ /* Shared fields between stage-1 and stage-2. */
int stage; /* translation stage */
- bool aa64; /* arch64 or aarch32 translation table */
bool disabled; /* smmu is disabled */
bool bypassed; /* translation is bypassed */
bool aborted; /* translation is aborted */
+ bool affd; /* AF fault disable */
+ uint32_t iotlb_hits; /* counts IOTLB hits */
+ uint32_t iotlb_misses; /* counts IOTLB misses*/
+ /* Used by stage-1 only. */
+ bool aa64; /* arch64 or aarch32 translation table */
+ bool record_faults; /* record fault events */
uint64_t ttb; /* TT base address */
uint8_t oas; /* output address width */
uint8_t tbi; /* Top Byte Ignore */
uint16_t asid;
SMMUTransTableInfo tt[2];
- uint32_t iotlb_hits; /* counts IOTLB hits for this asid */
- uint32_t iotlb_misses; /* counts IOTLB misses for this asid */
+ /* Used by stage-2 only. */
+ struct SMMUS2Cfg s2cfg;
} SMMUTransCfg;
typedef struct SMMUDevice {
@@ -80,24 +115,23 @@ typedef struct SMMUDevice {
AddressSpace as;
uint32_t cfg_cache_hits;
uint32_t cfg_cache_misses;
+ QLIST_ENTRY(SMMUDevice) next;
} SMMUDevice;
-typedef struct SMMUNotifierNode {
- SMMUDevice *sdev;
- QLIST_ENTRY(SMMUNotifierNode) next;
-} SMMUNotifierNode;
-
typedef struct SMMUPciBus {
PCIBus *bus;
- SMMUDevice *pbdev[0]; /* Parent array is sparse, so dynamically alloc */
+ SMMUDevice *pbdev[]; /* Parent array is sparse, so dynamically alloc */
} SMMUPciBus;
typedef struct SMMUIOTLBKey {
uint64_t iova;
uint16_t asid;
+ uint16_t vmid;
+ uint8_t tg;
+ uint8_t level;
} SMMUIOTLBKey;
-typedef struct SMMUState {
+struct SMMUState {
/* <private> */
SysBusDevice dev;
const char *mrtypename;
@@ -108,12 +142,12 @@ typedef struct SMMUState {
GHashTable *iotlb;
SMMUPciBus *smmu_pcibus_by_bus_num[SMMU_PCI_BUS_MAX];
PCIBus *pci_bus;
- QLIST_HEAD(, SMMUNotifierNode) notifiers_list;
+ QLIST_HEAD(, SMMUDevice) devices_with_notifiers;
uint8_t bus_num;
PCIBus *primary_bus;
-} SMMUState;
+};
-typedef struct {
+struct SMMUBaseClass {
/* <private> */
SysBusDeviceClass parent_class;
@@ -121,14 +155,10 @@ typedef struct {
DeviceRealize parent_realize;
-} SMMUBaseClass;
+};
#define TYPE_ARM_SMMU "arm-smmu"
-#define ARM_SMMU(obj) OBJECT_CHECK(SMMUState, (obj), TYPE_ARM_SMMU)
-#define ARM_SMMU_CLASS(klass) \
- OBJECT_CLASS_CHECK(SMMUBaseClass, (klass), TYPE_ARM_SMMU)
-#define ARM_SMMU_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SMMUBaseClass, (obj), TYPE_ARM_SMMU)
+OBJECT_DECLARE_TYPE(SMMUState, SMMUBaseClass, ARM_SMMU)
/* Return the SMMUPciBus handle associated to a PCI bus number */
SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num);
@@ -144,7 +174,7 @@ static inline uint16_t smmu_get_sid(SMMUDevice *sdev)
* pair, according to @cfg translation config
*/
int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
- IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info);
+ SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info);
/**
* select_tt - compute which translation table shall be used according to
@@ -157,14 +187,18 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid);
#define SMMU_IOTLB_MAX_SIZE 256
+SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
+ SMMUTransTableInfo *tt, hwaddr iova);
+void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry);
+SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova,
+ uint8_t tg, uint8_t level);
void smmu_iotlb_inv_all(SMMUState *s);
void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid);
-void smmu_iotlb_inv_iova(SMMUState *s, uint16_t asid, dma_addr_t iova);
+void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid);
+void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
+ uint8_t tg, uint64_t num_pages, uint8_t ttl);
/* Unmap the range of all the notifiers registered to any IOMMU mr */
void smmu_inv_notifiers_all(SMMUState *s);
-/* Unmap the range of all the notifiers registered to @mr */
-void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr);
-
-#endif /* HW_ARM_SMMU_COMMON */
+#endif /* HW_ARM_SMMU_COMMON_H */
diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h
index 36b2f45253..d183a62766 100644
--- a/include/hw/arm/smmuv3.h
+++ b/include/hw/arm/smmuv3.h
@@ -20,7 +20,7 @@
#define HW_ARM_SMMUV3_H
#include "hw/arm/smmu-common.h"
-#include "hw/registerfields.h"
+#include "qom/object.h"
#define TYPE_SMMUV3_IOMMU_MEMORY_REGION "smmuv3-iommu-memory-region"
@@ -32,7 +32,7 @@ typedef struct SMMUQueue {
uint8_t log2size;
} SMMUQueue;
-typedef struct SMMUv3State {
+struct SMMUv3State {
SMMUState smmu_state;
uint32_t features;
@@ -41,9 +41,11 @@ typedef struct SMMUv3State {
uint32_t idr[6];
uint32_t iidr;
+ uint32_t aidr;
uint32_t cr[3];
uint32_t cr0ack;
uint32_t statusr;
+ uint32_t gbpa;
uint32_t irq_ctrl;
uint32_t gerror;
uint32_t gerrorn;
@@ -60,7 +62,8 @@ typedef struct SMMUv3State {
qemu_irq irq[4];
QemuMutex mutex;
-} SMMUv3State;
+ char *stage;
+};
typedef enum {
SMMU_IRQ_EVTQ,
@@ -69,20 +72,19 @@ typedef enum {
SMMU_IRQ_GERROR,
} SMMUIrq;
-typedef struct {
+struct SMMUv3Class {
/*< private >*/
SMMUBaseClass smmu_base_class;
/*< public >*/
DeviceRealize parent_realize;
- DeviceReset parent_reset;
-} SMMUv3Class;
+ ResettablePhases parent_phases;
+};
#define TYPE_ARM_SMMUV3 "arm-smmuv3"
-#define ARM_SMMUV3(obj) OBJECT_CHECK(SMMUv3State, (obj), TYPE_ARM_SMMUV3)
-#define ARM_SMMUV3_CLASS(klass) \
- OBJECT_CLASS_CHECK(SMMUv3Class, (klass), TYPE_ARM_SMMUV3)
-#define ARM_SMMUV3_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SMMUv3Class, (obj), TYPE_ARM_SMMUV3)
+OBJECT_DECLARE_TYPE(SMMUv3State, SMMUv3Class, ARM_SMMUV3)
+
+#define STAGE1_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S1P)
+#define STAGE2_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S2P)
#endif
diff --git a/include/hw/arm/soc_dma.h b/include/hw/arm/soc_dma.h
index fae322997e..e93a7499a8 100644
--- a/include/hw/arm/soc_dma.h
+++ b/include/hw/arm/soc_dma.h
@@ -21,8 +21,7 @@
#ifndef HW_SOC_DMA_H
#define HW_SOC_DMA_H
-#include "exec/memory.h"
-#include "hw/irq.h"
+#include "exec/hwaddr.h"
struct soc_dma_s;
struct soc_dma_ch_s;
diff --git a/include/qemu/acl.h b/include/hw/arm/stm32f100_soc.h
index 73d2a71c8d..a74d7b369c 100644
--- a/include/qemu/acl.h
+++ b/include/hw/arm/stm32f100_soc.h
@@ -1,7 +1,7 @@
/*
- * QEMU access control list management
+ * STM32F100 SoC
*
- * Copyright (C) 2009 Red Hat, Inc
+ * Copyright (c) 2021 Alexandre Iooss <erdnaxe@crans.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -22,45 +22,40 @@
* THE SOFTWARE.
*/
-#ifndef QEMU_ACL_H
-#define QEMU_ACL_H
+#ifndef HW_ARM_STM32F100_SOC_H
+#define HW_ARM_STM32F100_SOC_H
-#include "qemu/queue.h"
+#include "hw/char/stm32f2xx_usart.h"
+#include "hw/ssi/stm32f2xx_spi.h"
+#include "hw/arm/armv7m.h"
+#include "qom/object.h"
+#include "hw/clock.h"
-typedef struct qemu_acl_entry qemu_acl_entry;
-typedef struct qemu_acl qemu_acl;
+#define TYPE_STM32F100_SOC "stm32f100-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F100State, STM32F100_SOC)
-struct qemu_acl_entry {
- char *match;
- int deny;
+#define STM_NUM_USARTS 3
+#define STM_NUM_SPIS 2
- QTAILQ_ENTRY(qemu_acl_entry) next;
-};
-
-struct qemu_acl {
- char *aclname;
- unsigned int nentries;
- QTAILQ_HEAD(,qemu_acl_entry) entries;
- int defaultDeny;
-};
+#define FLASH_BASE_ADDRESS 0x08000000
+#define FLASH_SIZE (128 * 1024)
+#define SRAM_BASE_ADDRESS 0x20000000
+#define SRAM_SIZE (8 * 1024)
-qemu_acl *qemu_acl_init(const char *aclname);
+struct STM32F100State {
+ SysBusDevice parent_obj;
-qemu_acl *qemu_acl_find(const char *aclname);
+ ARMv7MState armv7m;
-int qemu_acl_party_is_allowed(qemu_acl *acl,
- const char *party);
+ STM32F2XXUsartState usart[STM_NUM_USARTS];
+ STM32F2XXSPIState spi[STM_NUM_SPIS];
-void qemu_acl_reset(qemu_acl *acl);
+ MemoryRegion sram;
+ MemoryRegion flash;
+ MemoryRegion flash_alias;
-int qemu_acl_append(qemu_acl *acl,
- int deny,
- const char *match);
-int qemu_acl_insert(qemu_acl *acl,
- int deny,
- const char *match,
- int index);
-int qemu_acl_remove(qemu_acl *acl,
- const char *match);
+ Clock *sysclk;
+ Clock *refclk;
+};
-#endif /* QEMU_ACL_H */
+#endif
diff --git a/include/hw/arm/stm32f205_soc.h b/include/hw/arm/stm32f205_soc.h
index 922a733f88..4f4c8bbebc 100644
--- a/include/hw/arm/stm32f205_soc.h
+++ b/include/hw/arm/stm32f205_soc.h
@@ -32,10 +32,11 @@
#include "hw/or-irq.h"
#include "hw/ssi/stm32f2xx_spi.h"
#include "hw/arm/armv7m.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_STM32F205_SOC "stm32f205-soc"
-#define STM32F205_SOC(obj) \
- OBJECT_CHECK(STM32F205State, (obj), TYPE_STM32F205_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F205State, STM32F205_SOC)
#define STM_NUM_USARTS 6
#define STM_NUM_TIMERS 4
@@ -47,12 +48,8 @@
#define SRAM_BASE_ADDRESS 0x20000000
#define SRAM_SIZE (128 * 1024)
-typedef struct STM32F205State {
- /*< private >*/
+struct STM32F205State {
SysBusDevice parent_obj;
- /*< public >*/
-
- char *cpu_type;
ARMv7MState armv7m;
@@ -62,7 +59,14 @@ typedef struct STM32F205State {
STM32F2XXADCState adc[STM_NUM_ADCS];
STM32F2XXSPIState spi[STM_NUM_SPIS];
- qemu_or_irq *adc_irqs;
-} STM32F205State;
+ OrIRQState *adc_irqs;
+
+ MemoryRegion sram;
+ MemoryRegion flash;
+ MemoryRegion flash_alias;
+
+ Clock *sysclk;
+ Clock *refclk;
+};
#endif
diff --git a/include/hw/arm/stm32f405_soc.h b/include/hw/arm/stm32f405_soc.h
new file mode 100644
index 0000000000..d15c03c4b5
--- /dev/null
+++ b/include/hw/arm/stm32f405_soc.h
@@ -0,0 +1,75 @@
+/*
+ * STM32F405 SoC
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_ARM_STM32F405_SOC_H
+#define HW_ARM_STM32F405_SOC_H
+
+#include "hw/misc/stm32f4xx_syscfg.h"
+#include "hw/timer/stm32f2xx_timer.h"
+#include "hw/char/stm32f2xx_usart.h"
+#include "hw/adc/stm32f2xx_adc.h"
+#include "hw/misc/stm32f4xx_exti.h"
+#include "hw/or-irq.h"
+#include "hw/ssi/stm32f2xx_spi.h"
+#include "hw/arm/armv7m.h"
+#include "qom/object.h"
+
+#define TYPE_STM32F405_SOC "stm32f405-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F405State, STM32F405_SOC)
+
+#define STM_NUM_USARTS 7
+#define STM_NUM_TIMERS 4
+#define STM_NUM_ADCS 6
+#define STM_NUM_SPIS 6
+
+#define FLASH_BASE_ADDRESS 0x08000000
+#define FLASH_SIZE (1024 * 1024)
+#define SRAM_BASE_ADDRESS 0x20000000
+#define SRAM_SIZE (128 * 1024)
+#define CCM_BASE_ADDRESS 0x10000000
+#define CCM_SIZE (64 * 1024)
+
+struct STM32F405State {
+ SysBusDevice parent_obj;
+
+ ARMv7MState armv7m;
+
+ STM32F4xxSyscfgState syscfg;
+ STM32F4xxExtiState exti;
+ STM32F2XXUsartState usart[STM_NUM_USARTS];
+ STM32F2XXTimerState timer[STM_NUM_TIMERS];
+ OrIRQState adc_irqs;
+ STM32F2XXADCState adc[STM_NUM_ADCS];
+ STM32F2XXSPIState spi[STM_NUM_SPIS];
+
+ MemoryRegion ccm;
+ MemoryRegion sram;
+ MemoryRegion flash;
+ MemoryRegion flash_alias;
+
+ Clock *sysclk;
+ Clock *refclk;
+};
+
+#endif
diff --git a/include/hw/arm/stm32l4x5_soc.h b/include/hw/arm/stm32l4x5_soc.h
new file mode 100644
index 0000000000..c243fb0e7f
--- /dev/null
+++ b/include/hw/arm/stm32l4x5_soc.h
@@ -0,0 +1,74 @@
+/*
+ * STM32L4x5 SoC family
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * This work is heavily inspired by the stm32f405_soc by Alistair Francis.
+ * Original code is licensed under the MIT License:
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ */
+
+/*
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html
+ */
+
+#ifndef HW_ARM_STM32L4x5_SOC_H
+#define HW_ARM_STM32L4x5_SOC_H
+
+#include "exec/memory.h"
+#include "hw/arm/armv7m.h"
+#include "hw/or-irq.h"
+#include "hw/misc/stm32l4x5_syscfg.h"
+#include "hw/misc/stm32l4x5_exti.h"
+#include "hw/misc/stm32l4x5_rcc.h"
+#include "hw/gpio/stm32l4x5_gpio.h"
+#include "hw/char/stm32l4x5_usart.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_SOC "stm32l4x5-soc"
+#define TYPE_STM32L4X5XC_SOC "stm32l4x5xc-soc"
+#define TYPE_STM32L4X5XE_SOC "stm32l4x5xe-soc"
+#define TYPE_STM32L4X5XG_SOC "stm32l4x5xg-soc"
+OBJECT_DECLARE_TYPE(Stm32l4x5SocState, Stm32l4x5SocClass, STM32L4X5_SOC)
+
+#define NUM_EXTI_OR_GATES 4
+
+#define STM_NUM_USARTS 3
+#define STM_NUM_UARTS 2
+
+struct Stm32l4x5SocState {
+ SysBusDevice parent_obj;
+
+ ARMv7MState armv7m;
+
+ Stm32l4x5ExtiState exti;
+ OrIRQState exti_or_gates[NUM_EXTI_OR_GATES];
+ Stm32l4x5SyscfgState syscfg;
+ Stm32l4x5RccState rcc;
+ Stm32l4x5GpioState gpio[NUM_GPIOS];
+ Stm32l4x5UsartBaseState usart[STM_NUM_USARTS];
+ Stm32l4x5UsartBaseState uart[STM_NUM_UARTS];
+ Stm32l4x5UsartBaseState lpuart;
+
+ MemoryRegion sram1;
+ MemoryRegion sram2;
+ MemoryRegion flash;
+ MemoryRegion flash_alias;
+};
+
+struct Stm32l4x5SocClass {
+ SysBusDeviceClass parent_class;
+
+ size_t flash_size;
+};
+
+#endif
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 4cc57a7ef6..bb486d36b1 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -30,28 +30,22 @@
#ifndef QEMU_ARM_VIRT_H
#define QEMU_ARM_VIRT_H
-#include "qemu-common.h"
#include "exec/hwaddr.h"
#include "qemu/notify.h"
#include "hw/boards.h"
-#include "hw/arm/arm.h"
+#include "hw/arm/boot.h"
+#include "hw/arm/bsa.h"
+#include "hw/block/flash.h"
#include "sysemu/kvm.h"
#include "hw/intc/arm_gicv3_common.h"
+#include "qom/object.h"
#define NUM_GICV2M_SPIS 64
#define NUM_VIRTIO_TRANSPORTS 32
#define NUM_SMMU_IRQS 4
-#define ARCH_GIC_MAINT_IRQ 9
-
-#define ARCH_TIMER_VIRT_IRQ 11
-#define ARCH_TIMER_S_EL1_IRQ 13
-#define ARCH_TIMER_NS_EL1_IRQ 14
-#define ARCH_TIMER_NS_EL2_IRQ 10
-
-#define VIRTUAL_PMU_IRQ 7
-
-#define PPI(irq) ((irq) + 16)
+/* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */
+#define PVTIME_SIZE_PER_CPU 64
enum {
VIRT_FLASH,
@@ -64,7 +58,6 @@ enum {
VIRT_GIC_VCPU,
VIRT_GIC_ITS,
VIRT_GIC_REDIST,
- VIRT_GIC_REDIST2,
VIRT_SMMU,
VIRT_UART,
VIRT_MMIO,
@@ -74,12 +67,23 @@ enum {
VIRT_PCIE_MMIO,
VIRT_PCIE_PIO,
VIRT_PCIE_ECAM,
- VIRT_PCIE_ECAM_HIGH,
VIRT_PLATFORM_BUS,
- VIRT_PCIE_MMIO_HIGH,
VIRT_GPIO,
VIRT_SECURE_UART,
VIRT_SECURE_MEM,
+ VIRT_SECURE_GPIO,
+ VIRT_PCDIMM_ACPI,
+ VIRT_ACPI_GED,
+ VIRT_NVDIMM_ACPI,
+ VIRT_PVTIME,
+ VIRT_LOWMEMMAP_LAST,
+};
+
+/* indices of IO regions located after the RAM */
+enum {
+ VIRT_HIGH_GIC_REDIST2 = VIRT_LOWMEMMAP_LAST,
+ VIRT_HIGH_PCIE_ECAM,
+ VIRT_HIGH_PCIE_MMIO,
};
typedef enum VirtIOMMUType {
@@ -88,67 +92,121 @@ typedef enum VirtIOMMUType {
VIRT_IOMMU_VIRTIO,
} VirtIOMMUType;
-typedef struct MemMapEntry {
- hwaddr base;
- hwaddr size;
-} MemMapEntry;
-
-typedef struct {
+typedef enum VirtMSIControllerType {
+ VIRT_MSI_CTRL_NONE,
+ VIRT_MSI_CTRL_GICV2M,
+ VIRT_MSI_CTRL_ITS,
+} VirtMSIControllerType;
+
+typedef enum VirtGICType {
+ VIRT_GIC_VERSION_MAX = 0,
+ VIRT_GIC_VERSION_HOST = 1,
+ /* The concrete GIC values have to match the GIC version number */
+ VIRT_GIC_VERSION_2 = 2,
+ VIRT_GIC_VERSION_3 = 3,
+ VIRT_GIC_VERSION_4 = 4,
+ VIRT_GIC_VERSION_NOSEL,
+} VirtGICType;
+
+#define VIRT_GIC_VERSION_2_MASK BIT(VIRT_GIC_VERSION_2)
+#define VIRT_GIC_VERSION_3_MASK BIT(VIRT_GIC_VERSION_3)
+#define VIRT_GIC_VERSION_4_MASK BIT(VIRT_GIC_VERSION_4)
+
+struct VirtMachineClass {
MachineClass parent;
bool disallow_affinity_adjustment;
bool no_its;
+ bool no_tcg_its;
bool no_pmu;
bool claim_edge_triggered_timers;
bool smbios_old_sys_ver;
+ bool no_highmem_compact;
bool no_highmem_ecam;
-} VirtMachineClass;
+ bool no_ged; /* Machines < 4.2 have no support for ACPI GED device */
+ bool kvm_no_adjvtime;
+ bool no_kvm_steal_time;
+ bool acpi_expose_flash;
+ bool no_secure_gpio;
+ /* Machines < 6.2 have no support for describing cpu topology to guest */
+ bool no_cpu_topology;
+ bool no_tcg_lpa2;
+ bool no_ns_el2_virt_timer_irq;
+};
-typedef struct {
+struct VirtMachineState {
MachineState parent;
Notifier machine_done;
DeviceState *platform_bus_dev;
FWCfgState *fw_cfg;
+ PFlashCFI01 *flash[2];
bool secure;
bool highmem;
+ bool highmem_compact;
bool highmem_ecam;
+ bool highmem_mmio;
+ bool highmem_redists;
bool its;
+ bool tcg_its;
bool virt;
- int32_t gic_version;
+ bool ras;
+ bool mte;
+ bool dtb_randomness;
+ OnOffAuto acpi;
+ VirtGICType gic_version;
VirtIOMMUType iommu;
+ bool default_bus_bypass_iommu;
+ VirtMSIControllerType msi_controller;
+ uint16_t virtio_iommu_bdf;
struct arm_boot_info bootinfo;
- const MemMapEntry *memmap;
+ MemMapEntry *memmap;
+ char *pciehb_nodename;
const int *irqmap;
- int smp_cpus;
- void *fdt;
int fdt_size;
uint32_t clock_phandle;
uint32_t gic_phandle;
uint32_t msi_phandle;
uint32_t iommu_phandle;
int psci_conduit;
-} VirtMachineState;
+ hwaddr highest_gpa;
+ DeviceState *gic;
+ DeviceState *acpi_dev;
+ Notifier powerdown_notifier;
+ PCIBus *bus;
+ char *oem_id;
+ char *oem_table_id;
+ bool ns_el2_virt_timer_irq;
+};
-#define VIRT_ECAM_ID(high) (high ? VIRT_PCIE_ECAM_HIGH : VIRT_PCIE_ECAM)
+#define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM)
#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
-#define VIRT_MACHINE(obj) \
- OBJECT_CHECK(VirtMachineState, (obj), TYPE_VIRT_MACHINE)
-#define VIRT_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtMachineClass, obj, TYPE_VIRT_MACHINE)
-#define VIRT_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtMachineClass, klass, TYPE_VIRT_MACHINE)
+OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE)
void virt_acpi_setup(VirtMachineState *vms);
+bool virt_is_acpi_enabled(VirtMachineState *vms);
+
+/* Return number of redistributors that fit in the specified region */
+static uint32_t virt_redist_capacity(VirtMachineState *vms, int region)
+{
+ uint32_t redist_size;
+
+ if (vms->gic_version == VIRT_GIC_VERSION_3) {
+ redist_size = GICV3_REDIST_SIZE;
+ } else {
+ redist_size = GICV4_REDIST_SIZE;
+ }
+ return vms->memmap[region].size / redist_size;
+}
/* Return the number of used redistributor regions */
static inline int virt_gicv3_redist_region_count(VirtMachineState *vms)
{
- uint32_t redist0_capacity =
- vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
+ uint32_t redist0_capacity = virt_redist_capacity(vms, VIRT_GIC_REDIST);
- assert(vms->gic_version == 3);
+ assert(vms->gic_version != VIRT_GIC_VERSION_2);
- return vms->smp_cpus > redist0_capacity ? 2 : 1;
+ return (MACHINE(vms)->smp.cpus > redist0_capacity &&
+ vms->highmem_redists) ? 2 : 1;
}
#endif /* QEMU_ARM_VIRT_H */
diff --git a/include/hw/arm/xen_arch_hvm.h b/include/hw/arm/xen_arch_hvm.h
new file mode 100644
index 0000000000..8fd645e723
--- /dev/null
+++ b/include/hw/arm/xen_arch_hvm.h
@@ -0,0 +1,9 @@
+#ifndef HW_XEN_ARCH_ARM_HVM_H
+#define HW_XEN_ARCH_ARM_HVM_H
+
+#include <xen/hvm/ioreq.h>
+void arch_handle_ioreq(XenIOState *state, ioreq_t *req);
+void arch_xen_set_memory(XenIOState *state,
+ MemoryRegionSection *section,
+ bool add);
+#endif
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
index ec7c859d08..025beb5532 100644
--- a/include/hw/arm/xlnx-versal.h
+++ b/include/hw/arm/xlnx-versal.h
@@ -13,18 +13,45 @@
#define XLNX_VERSAL_H
#include "hw/sysbus.h"
-#include "hw/arm/arm.h"
+#include "hw/cpu/cluster.h"
+#include "hw/or-irq.h"
+#include "hw/sd/sdhci.h"
#include "hw/intc/arm_gicv3.h"
+#include "hw/char/pl011.h"
+#include "hw/dma/xlnx-zdma.h"
+#include "hw/net/cadence_gem.h"
+#include "hw/rtc/xlnx-zynqmp-rtc.h"
+#include "qom/object.h"
+#include "hw/usb/xlnx-usb-subsystem.h"
+#include "hw/misc/xlnx-versal-xramc.h"
+#include "hw/nvram/xlnx-bbram.h"
+#include "hw/nvram/xlnx-versal-efuse.h"
+#include "hw/ssi/xlnx-versal-ospi.h"
+#include "hw/dma/xlnx_csu_dma.h"
+#include "hw/misc/xlnx-versal-crl.h"
+#include "hw/misc/xlnx-versal-pmc-iou-slcr.h"
+#include "hw/misc/xlnx-versal-trng.h"
+#include "hw/net/xlnx-versal-canfd.h"
+#include "hw/misc/xlnx-versal-cfu.h"
+#include "hw/misc/xlnx-versal-cframe-reg.h"
+#include "target/arm/cpu.h"
#define TYPE_XLNX_VERSAL "xlnx-versal"
-#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
+OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
#define XLNX_VERSAL_NR_ACPUS 2
+#define XLNX_VERSAL_NR_RCPUS 2
#define XLNX_VERSAL_NR_UARTS 2
#define XLNX_VERSAL_NR_GEMS 2
+#define XLNX_VERSAL_NR_ADMAS 8
+#define XLNX_VERSAL_NR_SDS 2
+#define XLNX_VERSAL_NR_XRAM 4
#define XLNX_VERSAL_NR_IRQS 192
+#define XLNX_VERSAL_NR_CANFD 2
+#define XLNX_VERSAL_CANFD_REF_CLK (24 * 1000 * 1000)
+#define XLNX_VERSAL_NR_CFRAME 15
-typedef struct Versal {
+struct Versal {
/*< private >*/
SysBusDevice parent_obj;
@@ -32,7 +59,8 @@ typedef struct Versal {
struct {
struct {
MemoryRegion mr;
- ARMCPU *cpu[XLNX_VERSAL_NR_ACPUS];
+ CPUClusterState cluster;
+ ARMCPU cpu[XLNX_VERSAL_NR_ACPUS];
GICv3State gic;
} apu;
} fpd;
@@ -48,16 +76,65 @@ typedef struct Versal {
MemoryRegion mr_ocm;
struct {
- SysBusDevice *uart[XLNX_VERSAL_NR_UARTS];
- SysBusDevice *gem[XLNX_VERSAL_NR_GEMS];
+ PL011State uart[XLNX_VERSAL_NR_UARTS];
+ CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
+ XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS];
+ VersalUsb2 usb;
+ CanBusState *canbus[XLNX_VERSAL_NR_CANFD];
+ XlnxVersalCANFDState canfd[XLNX_VERSAL_NR_CANFD];
} iou;
+
+ /* Real-time Processing Unit. */
+ struct {
+ MemoryRegion mr;
+ MemoryRegion mr_ps_alias;
+
+ CPUClusterState cluster;
+ ARMCPU cpu[XLNX_VERSAL_NR_RCPUS];
+ } rpu;
+
+ struct {
+ OrIRQState irq_orgate;
+ XlnxXramCtrl ctrl[XLNX_VERSAL_NR_XRAM];
+ } xram;
+
+ XlnxVersalCRL crl;
} lpd;
+ /* The Platform Management Controller subsystem. */
+ struct {
+ struct {
+ SDHCIState sd[XLNX_VERSAL_NR_SDS];
+ XlnxVersalPmcIouSlcr slcr;
+
+ struct {
+ XlnxVersalOspi ospi;
+ XlnxCSUDMA dma_src;
+ XlnxCSUDMA dma_dst;
+ MemoryRegion linear_mr;
+ OrIRQState irq_orgate;
+ } ospi;
+ } iou;
+
+ XlnxZynqMPRTC rtc;
+ XlnxVersalTRng trng;
+ XlnxBBRam bbram;
+ XlnxEFuse efuse;
+ XlnxVersalEFuseCtrl efuse_ctrl;
+ XlnxVersalEFuseCache efuse_cache;
+ XlnxVersalCFUAPB cfu_apb;
+ XlnxVersalCFUFDRO cfu_fdro;
+ XlnxVersalCFUSFR cfu_sfr;
+ XlnxVersalCFrameReg cframe[XLNX_VERSAL_NR_CFRAME];
+ XlnxVersalCFrameBcastReg cframe_bcast;
+
+ OrIRQState apb_irq_orgate;
+ } pmc;
+
struct {
MemoryRegion *mr_ddr;
- uint32_t psci_conduit;
} cfg;
-} Versal;
+};
/* Memory-map and IRQ definitions. Copied a subset from
* auto-generated files. */
@@ -68,12 +145,26 @@ typedef struct Versal {
#define VERSAL_TIMER_NS_EL1_IRQ 14
#define VERSAL_TIMER_NS_EL2_IRQ 10
+#define VERSAL_CRL_IRQ 10
#define VERSAL_UART0_IRQ_0 18
#define VERSAL_UART1_IRQ_0 19
+#define VERSAL_CANFD0_IRQ_0 20
+#define VERSAL_CANFD1_IRQ_0 21
+#define VERSAL_USB0_IRQ_0 22
#define VERSAL_GEM0_IRQ_0 56
#define VERSAL_GEM0_WAKE_IRQ_0 57
#define VERSAL_GEM1_IRQ_0 58
#define VERSAL_GEM1_WAKE_IRQ_0 59
+#define VERSAL_ADMA_IRQ_0 60
+#define VERSAL_XRAM_IRQ_0 79
+#define VERSAL_CFU_IRQ_0 120
+#define VERSAL_PMC_APB_IRQ 121
+#define VERSAL_OSPI_IRQ 124
+#define VERSAL_SD0_IRQ_0 126
+#define VERSAL_EFUSE_IRQ 139
+#define VERSAL_TRNG_IRQ 141
+#define VERSAL_RTC_ALARM_IRQ 142
+#define VERSAL_RTC_SECONDS_IRQ 143
/* Architecturally reserved IRQs suitable for virtualization. */
#define VERSAL_RSVD_IRQ_FIRST 111
@@ -91,14 +182,32 @@ typedef struct Versal {
#define MM_UART1 0xff010000U
#define MM_UART1_SIZE 0x10000
+#define MM_CANFD0 0xff060000U
+#define MM_CANFD0_SIZE 0x10000
+#define MM_CANFD1 0xff070000U
+#define MM_CANFD1_SIZE 0x10000
+
#define MM_GEM0 0xff0c0000U
#define MM_GEM0_SIZE 0x10000
#define MM_GEM1 0xff0d0000U
#define MM_GEM1_SIZE 0x10000
+#define MM_ADMA_CH0 0xffa80000U
+#define MM_ADMA_CH0_SIZE 0x10000
+
#define MM_OCM 0xfffc0000U
#define MM_OCM_SIZE 0x40000
+#define MM_XRAM 0xfe800000
+#define MM_XRAMC 0xff8e0000
+#define MM_XRAMC_SIZE 0x10000
+
+#define MM_USB2_CTRL_REGS 0xFF9D0000
+#define MM_USB2_CTRL_REGS_SIZE 0x10000
+
+#define MM_USB_0 0xFE200000
+#define MM_USB_0_SIZE 0x10000
+
#define MM_TOP_DDR 0x0
#define MM_TOP_DDR_SIZE 0x80000000U
#define MM_TOP_DDR_2 0x800000000ULL
@@ -119,4 +228,110 @@ typedef struct Versal {
#define MM_IOU_SCNTRS_SIZE 0x10000
#define MM_FPD_CRF 0xfd1a0000U
#define MM_FPD_CRF_SIZE 0x140000
+#define MM_FPD_FPD_APU 0xfd5c0000
+#define MM_FPD_FPD_APU_SIZE 0x100
+
+#define MM_PMC_PMC_IOU_SLCR 0xf1060000
+#define MM_PMC_PMC_IOU_SLCR_SIZE 0x10000
+
+#define MM_PMC_OSPI 0xf1010000
+#define MM_PMC_OSPI_SIZE 0x10000
+
+#define MM_PMC_OSPI_DAC 0xc0000000
+#define MM_PMC_OSPI_DAC_SIZE 0x20000000
+
+#define MM_PMC_OSPI_DMA_DST 0xf1011800
+#define MM_PMC_OSPI_DMA_SRC 0xf1011000
+
+#define MM_PMC_SD0 0xf1040000U
+#define MM_PMC_SD0_SIZE 0x10000
+#define MM_PMC_BBRAM_CTRL 0xf11f0000
+#define MM_PMC_BBRAM_CTRL_SIZE 0x00050
+#define MM_PMC_EFUSE_CTRL 0xf1240000
+#define MM_PMC_EFUSE_CTRL_SIZE 0x00104
+#define MM_PMC_EFUSE_CACHE 0xf1250000
+#define MM_PMC_EFUSE_CACHE_SIZE 0x00C00
+
+#define MM_PMC_CFU_APB 0xf12b0000
+#define MM_PMC_CFU_APB_SIZE 0x10000
+#define MM_PMC_CFU_STREAM 0xf12c0000
+#define MM_PMC_CFU_STREAM_SIZE 0x1000
+#define MM_PMC_CFU_SFR 0xf12c1000
+#define MM_PMC_CFU_SFR_SIZE 0x1000
+#define MM_PMC_CFU_FDRO 0xf12c2000
+#define MM_PMC_CFU_FDRO_SIZE 0x1000
+#define MM_PMC_CFU_STREAM_2 0xf1f80000
+#define MM_PMC_CFU_STREAM_2_SIZE 0x40000
+
+#define MM_PMC_CFRAME0_REG 0xf12d0000
+#define MM_PMC_CFRAME0_REG_SIZE 0x1000
+#define MM_PMC_CFRAME0_FDRI 0xf12d1000
+#define MM_PMC_CFRAME0_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME1_REG 0xf12d2000
+#define MM_PMC_CFRAME1_REG_SIZE 0x1000
+#define MM_PMC_CFRAME1_FDRI 0xf12d3000
+#define MM_PMC_CFRAME1_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME2_REG 0xf12d4000
+#define MM_PMC_CFRAME2_REG_SIZE 0x1000
+#define MM_PMC_CFRAME2_FDRI 0xf12d5000
+#define MM_PMC_CFRAME2_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME3_REG 0xf12d6000
+#define MM_PMC_CFRAME3_REG_SIZE 0x1000
+#define MM_PMC_CFRAME3_FDRI 0xf12d7000
+#define MM_PMC_CFRAME3_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME4_REG 0xf12d8000
+#define MM_PMC_CFRAME4_REG_SIZE 0x1000
+#define MM_PMC_CFRAME4_FDRI 0xf12d9000
+#define MM_PMC_CFRAME4_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME5_REG 0xf12da000
+#define MM_PMC_CFRAME5_REG_SIZE 0x1000
+#define MM_PMC_CFRAME5_FDRI 0xf12db000
+#define MM_PMC_CFRAME5_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME6_REG 0xf12dc000
+#define MM_PMC_CFRAME6_REG_SIZE 0x1000
+#define MM_PMC_CFRAME6_FDRI 0xf12dd000
+#define MM_PMC_CFRAME6_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME7_REG 0xf12de000
+#define MM_PMC_CFRAME7_REG_SIZE 0x1000
+#define MM_PMC_CFRAME7_FDRI 0xf12df000
+#define MM_PMC_CFRAME7_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME8_REG 0xf12e0000
+#define MM_PMC_CFRAME8_REG_SIZE 0x1000
+#define MM_PMC_CFRAME8_FDRI 0xf12e1000
+#define MM_PMC_CFRAME8_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME9_REG 0xf12e2000
+#define MM_PMC_CFRAME9_REG_SIZE 0x1000
+#define MM_PMC_CFRAME9_FDRI 0xf12e3000
+#define MM_PMC_CFRAME9_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME10_REG 0xf12e4000
+#define MM_PMC_CFRAME10_REG_SIZE 0x1000
+#define MM_PMC_CFRAME10_FDRI 0xf12e5000
+#define MM_PMC_CFRAME10_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME11_REG 0xf12e6000
+#define MM_PMC_CFRAME11_REG_SIZE 0x1000
+#define MM_PMC_CFRAME11_FDRI 0xf12e7000
+#define MM_PMC_CFRAME11_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME12_REG 0xf12e8000
+#define MM_PMC_CFRAME12_REG_SIZE 0x1000
+#define MM_PMC_CFRAME12_FDRI 0xf12e9000
+#define MM_PMC_CFRAME12_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME13_REG 0xf12ea000
+#define MM_PMC_CFRAME13_REG_SIZE 0x1000
+#define MM_PMC_CFRAME13_FDRI 0xf12eb000
+#define MM_PMC_CFRAME13_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME14_REG 0xf12ec000
+#define MM_PMC_CFRAME14_REG_SIZE 0x1000
+#define MM_PMC_CFRAME14_FDRI 0xf12ed000
+#define MM_PMC_CFRAME14_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME_BCAST_REG 0xf12ee000
+#define MM_PMC_CFRAME_BCAST_REG_SIZE 0x1000
+#define MM_PMC_CFRAME_BCAST_FDRI 0xf12ef000
+#define MM_PMC_CFRAME_BCAST_FDRI_SIZE 0x1000
+
+#define MM_PMC_CRP 0xf1260000U
+#define MM_PMC_CRP_SIZE 0x10000
+#define MM_PMC_RTC 0xf12a0000
+#define MM_PMC_RTC_SIZE 0x10000
+#define MM_PMC_TRNG 0xf1230000
+#define MM_PMC_TRNG_SIZE 0x10000
#endif
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index 591515c760..48f7948092 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -16,35 +16,47 @@
*/
#ifndef XLNX_ZYNQMP_H
+#define XLNX_ZYNQMP_H
-#include "qemu-common.h"
-#include "hw/arm/arm.h"
#include "hw/intc/arm_gic.h"
#include "hw/net/cadence_gem.h"
#include "hw/char/cadence_uart.h"
-#include "hw/ide/pci.h"
-#include "hw/ide/ahci.h"
+#include "hw/net/xlnx-zynqmp-can.h"
+#include "hw/ide/ahci-sysbus.h"
#include "hw/sd/sdhci.h"
#include "hw/ssi/xilinx_spips.h"
#include "hw/dma/xlnx_dpdma.h"
#include "hw/dma/xlnx-zdma.h"
#include "hw/display/xlnx_dp.h"
#include "hw/intc/xlnx-zynqmp-ipi.h"
-#include "hw/timer/xlnx-zynqmp-rtc.h"
+#include "hw/rtc/xlnx-zynqmp-rtc.h"
#include "hw/cpu/cluster.h"
-
-#define TYPE_XLNX_ZYNQMP "xlnx,zynqmp"
-#define XLNX_ZYNQMP(obj) OBJECT_CHECK(XlnxZynqMPState, (obj), \
- TYPE_XLNX_ZYNQMP)
+#include "target/arm/cpu.h"
+#include "qom/object.h"
+#include "net/can_emu.h"
+#include "hw/dma/xlnx_csu_dma.h"
+#include "hw/nvram/xlnx-bbram.h"
+#include "hw/nvram/xlnx-zynqmp-efuse.h"
+#include "hw/or-irq.h"
+#include "hw/misc/xlnx-zynqmp-apu-ctrl.h"
+#include "hw/misc/xlnx-zynqmp-crf.h"
+#include "hw/timer/cadence_ttc.h"
+#include "hw/usb/hcd-dwc3.h"
+
+#define TYPE_XLNX_ZYNQMP "xlnx-zynqmp"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
#define XLNX_ZYNQMP_NUM_APU_CPUS 4
#define XLNX_ZYNQMP_NUM_RPU_CPUS 2
#define XLNX_ZYNQMP_NUM_GEMS 4
#define XLNX_ZYNQMP_NUM_UARTS 2
+#define XLNX_ZYNQMP_NUM_CAN 2
+#define XLNX_ZYNQMP_CAN_REF_CLK (24 * 1000 * 1000)
#define XLNX_ZYNQMP_NUM_SDHCI 2
#define XLNX_ZYNQMP_NUM_SPIS 2
#define XLNX_ZYNQMP_NUM_GDMA_CH 8
#define XLNX_ZYNQMP_NUM_ADMA_CH 8
+#define XLNX_ZYNQMP_NUM_USB 2
#define XLNX_ZYNQMP_NUM_QSPI_BUS 2
#define XLNX_ZYNQMP_NUM_QSPI_BUS_CS 2
@@ -56,7 +68,8 @@
#define XLNX_ZYNQMP_GIC_REGIONS 6
-/* ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
+/*
+ * ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
* and under-decodes the 64k region. This mirrors the 4k regions to every 4k
* aligned address in the 64k region. To implement each GIC region needs a
* number of memory region aliases.
@@ -73,7 +86,14 @@
#define XLNX_ZYNQMP_MAX_RAM_SIZE (XLNX_ZYNQMP_MAX_LOW_RAM_SIZE + \
XLNX_ZYNQMP_MAX_HIGH_RAM_SIZE)
-typedef struct XlnxZynqMPState {
+#define XLNX_ZYNQMP_NUM_TTC 4
+
+/*
+ * Unimplemented mmio regions needed to boot some images.
+ */
+#define XLNX_ZYNQMP_NUM_UNIMP_AREAS 1
+
+struct XlnxZynqMPState {
/*< private >*/
DeviceState parent_obj;
@@ -89,9 +109,15 @@ typedef struct XlnxZynqMPState {
MemoryRegion *ddr_ram;
MemoryRegion ddr_ram_low, ddr_ram_high;
+ XlnxBBRam bbram;
+ XlnxEFuse efuse;
+ XlnxZynqMPEFuse efuse_ctrl;
+
+ MemoryRegion mr_unimp[XLNX_ZYNQMP_NUM_UNIMP_AREAS];
CadenceGEMState gem[XLNX_ZYNQMP_NUM_GEMS];
CadenceUARTState uart[XLNX_ZYNQMP_NUM_UARTS];
+ XlnxZynqMPCANState can[XLNX_ZYNQMP_NUM_CAN];
SysbusAHCIState sata;
SDHCIState sdhci[XLNX_ZYNQMP_NUM_SDHCI];
XilinxSPIPS spi[XLNX_ZYNQMP_NUM_SPIS];
@@ -102,6 +128,12 @@ typedef struct XlnxZynqMPState {
XlnxZynqMPRTC rtc;
XlnxZDMA gdma[XLNX_ZYNQMP_NUM_GDMA_CH];
XlnxZDMA adma[XLNX_ZYNQMP_NUM_ADMA_CH];
+ XlnxCSUDMA qspi_dma;
+ OrIRQState qspi_irq_orgate;
+ XlnxZynqMPAPUCtrl apu_ctrl;
+ XlnxZynqMPCRF crf;
+ CadenceTTCState ttc[XLNX_ZYNQMP_NUM_TTC];
+ USBDWC3 usb[XLNX_ZYNQMP_NUM_USB];
char *boot_cpu;
ARMCPU *boot_cpu_ptr;
@@ -110,9 +142,9 @@ typedef struct XlnxZynqMPState {
bool secure;
/* Has the ARM Virtualization extensions? */
bool virt;
- /* Has the RPU subsystem? */
- bool has_rpu;
-} XlnxZynqMPState;
-#define XLNX_ZYNQMP_H
+ /* CAN bus. */
+ CanBusState *canbus[XLNX_ZYNQMP_NUM_CAN];
+};
+
#endif
diff --git a/include/hw/audio/asc.h b/include/hw/audio/asc.h
new file mode 100644
index 0000000000..04fac270b6
--- /dev/null
+++ b/include/hw/audio/asc.h
@@ -0,0 +1,85 @@
+/*
+ * QEMU Apple Sound Chip emulation
+ *
+ * Apple Sound Chip (ASC) 344S0063
+ * Enhanced Apple Sound Chip (EASC) 343S1063
+ *
+ * Copyright (c) 2012-2018 Laurent Vivier <laurent@vivier.eu>
+ * Copyright (c) 2022 Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_AUDIO_ASC_H
+#define HW_AUDIO_ASC_H
+
+#include "hw/sysbus.h"
+#include "audio/audio.h"
+
+#define ASC_FREQ 22257
+
+enum {
+ ASC_TYPE_ASC = 0, /* original discrete Apple Sound Chip */
+ ASC_TYPE_EASC = 1 /* discrete Enhanced Apple Sound Chip */
+};
+
+#define ASC_FIFO_OFFSET 0x0
+#define ASC_FIFO_SIZE 0x400
+
+#define ASC_REG_OFFSET 0x800
+#define ASC_REG_SIZE 0x60
+
+#define ASC_EXTREG_OFFSET 0xf00
+#define ASC_EXTREG_SIZE 0x20
+
+typedef struct ASCFIFOState {
+ int index;
+
+ MemoryRegion mem_fifo;
+ uint8_t fifo[ASC_FIFO_SIZE];
+ uint8_t int_status;
+
+ int cnt;
+ int wptr;
+ int rptr;
+
+ MemoryRegion mem_extregs;
+ uint8_t extregs[ASC_EXTREG_SIZE];
+
+ int xa_cnt;
+ uint8_t xa_val;
+ uint8_t xa_flags;
+ int16_t xa_last[2];
+} ASCFIFOState;
+
+struct ASCState {
+ SysBusDevice parent_obj;
+
+ uint8_t type;
+ MemoryRegion asc;
+ MemoryRegion mem_fifo;
+ MemoryRegion mem_regs;
+ MemoryRegion mem_extregs;
+
+ QEMUSoundCard card;
+ SWVoiceOut *voice;
+ uint8_t *mixbuf;
+ int samples;
+ int shift;
+
+ uint8_t *silentbuf;
+
+ /* Time when we were last able to generate samples */
+ int64_t fifo_empty_ns;
+
+ qemu_irq irq;
+
+ ASCFIFOState fifos[2];
+
+ uint8_t regs[ASC_REG_SIZE];
+};
+
+#define TYPE_ASC "apple-sound-chip"
+OBJECT_DECLARE_SIMPLE_TYPE(ASCState, ASC)
+
+#endif
diff --git a/include/hw/audio/pcspk.h b/include/hw/audio/pcspk.h
index 172afbf146..6be75a6b86 100644
--- a/include/hw/audio/pcspk.h
+++ b/include/hw/audio/pcspk.h
@@ -25,23 +25,6 @@
#ifndef HW_PCSPK_H
#define HW_PCSPK_H
-#include "hw/hw.h"
-#include "hw/isa/isa.h"
-
#define TYPE_PC_SPEAKER "isa-pcspk"
-static inline ISADevice *pcspk_init(ISABus *bus, ISADevice *pit)
-{
- DeviceState *dev;
- ISADevice *isadev;
-
- isadev = isa_create(bus, TYPE_PC_SPEAKER);
- dev = DEVICE(isadev);
- qdev_prop_set_uint32(dev, "iobase", 0x61);
- object_property_set_link(OBJECT(dev), OBJECT(pit), "pit", NULL);
- qdev_init_nofail(dev);
-
- return isadev;
-}
-
#endif /* HW_PCSPK_H */
diff --git a/include/hw/audio/soundhw.h b/include/hw/audio/soundhw.h
index 119f7d78d5..474c5ff94e 100644
--- a/include/hw/audio/soundhw.h
+++ b/include/hw/audio/soundhw.h
@@ -1,13 +1,13 @@
-#ifndef HW_AUDIO_H
-#define HW_AUDIO_H
-
-void isa_register_soundhw(const char *name, const char *descr,
- int (*init_isa)(ISABus *bus));
+#ifndef HW_SOUNDHW_H
+#define HW_SOUNDHW_H
void pci_register_soundhw(const char *name, const char *descr,
- int (*init_pci)(PCIBus *bus));
+ int (*init_pci)(PCIBus *bus, const char *audiodev));
+void deprecated_register_soundhw(const char *name, const char *descr,
+ int isa, const char *typename);
void soundhw_init(void);
-void select_soundhw(const char *optarg);
+void show_valid_soundhw(void);
+void select_soundhw(const char *name, const char *audiodev);
#endif
diff --git a/include/hw/audio/virtio-snd.h b/include/hw/audio/virtio-snd.h
new file mode 100644
index 0000000000..8dafedb276
--- /dev/null
+++ b/include/hw/audio/virtio-snd.h
@@ -0,0 +1,250 @@
+/*
+ * VIRTIO Sound Device conforming to
+ *
+ * "Virtual I/O Device (VIRTIO) Version 1.2
+ * Committee Specification Draft 01
+ * 09 May 2022"
+ *
+ * Copyright (c) 2023 Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
+ * Copyright (C) 2019 OpenSynergy GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QEMU_VIRTIO_SOUND_H
+#define QEMU_VIRTIO_SOUND_H
+
+#include "hw/virtio/virtio.h"
+#include "audio/audio.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_snd.h"
+
+#define TYPE_VIRTIO_SND "virtio-sound-device"
+#define VIRTIO_SND(obj) \
+ OBJECT_CHECK(VirtIOSound, (obj), TYPE_VIRTIO_SND)
+
+/* CONFIGURATION SPACE */
+
+typedef struct virtio_snd_config virtio_snd_config;
+
+/* COMMON DEFINITIONS */
+
+/* common header for request/response*/
+typedef struct virtio_snd_hdr virtio_snd_hdr;
+
+/* event notification */
+typedef struct virtio_snd_event virtio_snd_event;
+
+/* common control request to query an item information */
+typedef struct virtio_snd_query_info virtio_snd_query_info;
+
+/* JACK CONTROL MESSAGES */
+
+typedef struct virtio_snd_jack_hdr virtio_snd_jack_hdr;
+
+/* jack information structure */
+typedef struct virtio_snd_jack_info virtio_snd_jack_info;
+
+/* jack remapping control request */
+typedef struct virtio_snd_jack_remap virtio_snd_jack_remap;
+
+/*
+ * PCM CONTROL MESSAGES
+ */
+typedef struct virtio_snd_pcm_hdr virtio_snd_pcm_hdr;
+
+/* PCM stream info structure */
+typedef struct virtio_snd_pcm_info virtio_snd_pcm_info;
+
+/* set PCM stream params */
+typedef struct virtio_snd_pcm_set_params virtio_snd_pcm_set_params;
+
+/* I/O request header */
+typedef struct virtio_snd_pcm_xfer virtio_snd_pcm_xfer;
+
+/* I/O request status */
+typedef struct virtio_snd_pcm_status virtio_snd_pcm_status;
+
+/* device structs */
+
+typedef struct VirtIOSound VirtIOSound;
+
+typedef struct VirtIOSoundPCMStream VirtIOSoundPCMStream;
+
+typedef struct virtio_snd_ctrl_command virtio_snd_ctrl_command;
+
+typedef struct VirtIOSoundPCM VirtIOSoundPCM;
+
+typedef struct VirtIOSoundPCMBuffer VirtIOSoundPCMBuffer;
+
+/*
+ * The VirtIO sound spec reuses layouts and values from the High Definition
+ * Audio spec (virtio/v1.2: 5.14 Sound Device). This struct handles each I/O
+ * message's buffer (virtio/v1.2: 5.14.6.8 PCM I/O Messages).
+ *
+ * In the case of TX (i.e. playback) buffers, we defer reading the raw PCM data
+ * from the virtqueue until QEMU's sound backsystem calls the output callback.
+ * This is tracked by the `bool populated;` field, which is set to true when
+ * data has been read into our own buffer for consumption.
+ *
+ * VirtIOSoundPCMBuffer has a dynamic size since it includes the raw PCM data
+ * in its allocation. It must be initialized and destroyed as follows:
+ *
+ * size_t size = [[derived from owned VQ element descriptor sizes]];
+ * buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer) + size);
+ * buffer->elem = [[owned VQ element]];
+ *
+ * [..]
+ *
+ * g_free(buffer->elem);
+ * g_free(buffer);
+ */
+struct VirtIOSoundPCMBuffer {
+ QSIMPLEQ_ENTRY(VirtIOSoundPCMBuffer) entry;
+ VirtQueueElement *elem;
+ VirtQueue *vq;
+ size_t size;
+ /*
+ * In TX / Plaback, `offset` represents the first unused position inside
+ * `data`. If `offset == size` then there are no unused data left.
+ */
+ uint64_t offset;
+ /* Used for the TX queue for lazy I/O copy from `elem` */
+ bool populated;
+ /*
+ * VirtIOSoundPCMBuffer is an unsized type because it ends with an array of
+ * bytes. The size of `data` is determined from the I/O message's read-only
+ * or write-only size when allocating VirtIOSoundPCMBuffer.
+ */
+ uint8_t data[];
+};
+
+struct VirtIOSoundPCM {
+ VirtIOSound *snd;
+ /*
+ * PCM parameters are a separate field instead of a VirtIOSoundPCMStream
+ * field, because the operation of PCM control requests is first
+ * VIRTIO_SND_R_PCM_SET_PARAMS and then VIRTIO_SND_R_PCM_PREPARE; this
+ * means that some times we get parameters without having an allocated
+ * stream yet.
+ */
+ virtio_snd_pcm_set_params *pcm_params;
+ VirtIOSoundPCMStream **streams;
+};
+
+struct VirtIOSoundPCMStream {
+ VirtIOSoundPCM *pcm;
+ virtio_snd_pcm_info info;
+ virtio_snd_pcm_set_params params;
+ uint32_t id;
+ /* channel position values (VIRTIO_SND_CHMAP_XXX) */
+ uint8_t positions[VIRTIO_SND_CHMAP_MAX_SIZE];
+ VirtIOSound *s;
+ bool flushing;
+ audsettings as;
+ union {
+ SWVoiceIn *in;
+ SWVoiceOut *out;
+ } voice;
+ QemuMutex queue_mutex;
+ bool active;
+ QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) queue;
+};
+
+/*
+ * PCM stream state machine.
+ * -------------------------
+ *
+ * 5.14.6.6.1 PCM Command Lifecycle
+ * ================================
+ *
+ * A PCM stream has the following command lifecycle:
+ * - `SET PARAMETERS`
+ * The driver negotiates the stream parameters (format, transport, etc) with
+ * the device.
+ * Possible valid transitions: `SET PARAMETERS`, `PREPARE`.
+ * - `PREPARE`
+ * The device prepares the stream (allocates resources, etc).
+ * Possible valid transitions: `SET PARAMETERS`, `PREPARE`, `START`,
+ * `RELEASE`. Output only: the driver transfers data for pre-buffing.
+ * - `START`
+ * The device starts the stream (unmute, putting into running state, etc).
+ * Possible valid transitions: `STOP`.
+ * The driver transfers data to/from the stream.
+ * - `STOP`
+ * The device stops the stream (mute, putting into non-running state, etc).
+ * Possible valid transitions: `START`, `RELEASE`.
+ * - `RELEASE`
+ * The device releases the stream (frees resources, etc).
+ * Possible valid transitions: `SET PARAMETERS`, `PREPARE`.
+ *
+ * +---------------+ +---------+ +---------+ +-------+ +-------+
+ * | SetParameters | | Prepare | | Release | | Start | | Stop |
+ * +---------------+ +---------+ +---------+ +-------+ +-------+
+ * |- | | | |
+ * || | | | |
+ * |< | | | |
+ * |------------->| | | |
+ * |<-------------| | | |
+ * | |- | | |
+ * | || | | |
+ * | |< | | |
+ * | |--------------------->| |
+ * | |---------->| | |
+ * | | | |-------->|
+ * | | | |<--------|
+ * | | |<-------------------|
+ * |<-------------------------| | |
+ * | |<----------| | |
+ *
+ * CTRL in the VirtIOSound device
+ * ==============================
+ *
+ * The control messages that affect the state of a stream arrive in the
+ * `virtio_snd_handle_ctrl()` queue callback and are of type `struct
+ * virtio_snd_ctrl_command`. They are stored in a queue field in the device
+ * type, `VirtIOSound`. This allows deferring the CTRL request completion if
+ * it's not immediately possible due to locking/state reasons.
+ *
+ * The CTRL message is finally handled in `process_cmd()`.
+ */
+struct VirtIOSound {
+ VirtIODevice parent_obj;
+
+ VirtQueue *queues[VIRTIO_SND_VQ_MAX];
+ uint64_t features;
+ VirtIOSoundPCM *pcm;
+ QEMUSoundCard card;
+ VMChangeStateEntry *vmstate;
+ virtio_snd_config snd_conf;
+ QemuMutex cmdq_mutex;
+ QTAILQ_HEAD(, virtio_snd_ctrl_command) cmdq;
+ bool processing_cmdq;
+ /*
+ * Convenience queue to keep track of invalid tx/rx queue messages inside
+ * the tx/rx callbacks.
+ *
+ * In the callbacks as a first step we are emptying the virtqueue to handle
+ * each message and we cannot add an invalid message back to the queue: we
+ * would re-process it in subsequent loop iterations.
+ *
+ * Instead, we add them to this queue and after finishing examining every
+ * virtqueue element, we inform the guest for each invalid message.
+ *
+ * This queue must be empty at all times except for inside the tx/rx
+ * callbacks.
+ */
+ QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid;
+};
+
+struct virtio_snd_ctrl_command {
+ VirtQueueElement *elem;
+ VirtQueue *vq;
+ virtio_snd_hdr ctrl;
+ virtio_snd_hdr resp;
+ size_t payload_size;
+ QTAILQ_ENTRY(virtio_snd_ctrl_command) next;
+};
+#endif
diff --git a/include/hw/audio/wm8750.h b/include/hw/audio/wm8750.h
index e12cb886d1..f7bafd5e38 100644
--- a/include/hw/audio/wm8750.h
+++ b/include/hw/audio/wm8750.h
@@ -14,7 +14,6 @@
#ifndef HW_DAC_WM8750_H
#define HW_DAC_WM8750_H
-#include "hw/hw.h"
#define TYPE_WM8750 "wm8750"
#define TYPE_MV88W8618_AUDIO "mv88w8618_audio"
diff --git a/include/hw/block/block.h b/include/hw/block/block.h
index e9f9e2223f..de3946a5f1 100644
--- a/include/hw/block/block.h
+++ b/include/hw/block/block.h
@@ -11,23 +11,27 @@
#ifndef HW_BLOCK_H
#define HW_BLOCK_H
-#include "qemu-common.h"
+#include "exec/hwaddr.h"
#include "qapi/qapi-types-block-core.h"
+#include "hw/qdev-properties-system.h"
/* Configuration */
typedef struct BlockConf {
BlockBackend *blk;
- uint16_t physical_block_size;
- uint16_t logical_block_size;
- uint16_t min_io_size;
+ OnOffAuto backend_defaults;
+ uint32_t physical_block_size;
+ uint32_t logical_block_size;
+ uint32_t min_io_size;
uint32_t opt_io_size;
int32_t bootindex;
uint32_t discard_granularity;
/* geometry, not all devices use this */
uint32_t cyls, heads, secs;
+ uint32_t lcyls, lheads, lsecs;
OnOffAuto wce;
bool share_rw;
+ OnOffAuto account_invalid, account_failed;
BlockdevOnError rerror;
BlockdevOnError werror;
} BlockConf;
@@ -45,24 +49,36 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf)
return exp;
}
-#define DEFINE_BLOCK_PROPERTIES(_state, _conf) \
- DEFINE_PROP_DRIVE("drive", _state, _conf.blk), \
+#define DEFINE_BLOCK_PROPERTIES_BASE(_state, _conf) \
+ DEFINE_PROP_ON_OFF_AUTO("backend_defaults", _state, \
+ _conf.backend_defaults, ON_OFF_AUTO_AUTO), \
DEFINE_PROP_BLOCKSIZE("logical_block_size", _state, \
_conf.logical_block_size), \
DEFINE_PROP_BLOCKSIZE("physical_block_size", _state, \
_conf.physical_block_size), \
- DEFINE_PROP_UINT16("min_io_size", _state, _conf.min_io_size, 0), \
- DEFINE_PROP_UINT32("opt_io_size", _state, _conf.opt_io_size, 0), \
- DEFINE_PROP_UINT32("discard_granularity", _state, \
- _conf.discard_granularity, -1), \
- DEFINE_PROP_ON_OFF_AUTO("write-cache", _state, _conf.wce, \
- ON_OFF_AUTO_AUTO), \
- DEFINE_PROP_BOOL("share-rw", _state, _conf.share_rw, false)
-
-#define DEFINE_BLOCK_CHS_PROPERTIES(_state, _conf) \
- DEFINE_PROP_UINT32("cyls", _state, _conf.cyls, 0), \
- DEFINE_PROP_UINT32("heads", _state, _conf.heads, 0), \
- DEFINE_PROP_UINT32("secs", _state, _conf.secs, 0)
+ DEFINE_PROP_SIZE32("min_io_size", _state, _conf.min_io_size, 0), \
+ DEFINE_PROP_SIZE32("opt_io_size", _state, _conf.opt_io_size, 0), \
+ DEFINE_PROP_SIZE32("discard_granularity", _state, \
+ _conf.discard_granularity, -1), \
+ DEFINE_PROP_ON_OFF_AUTO("write-cache", _state, _conf.wce, \
+ ON_OFF_AUTO_AUTO), \
+ DEFINE_PROP_BOOL("share-rw", _state, _conf.share_rw, false), \
+ DEFINE_PROP_ON_OFF_AUTO("account-invalid", _state, \
+ _conf.account_invalid, ON_OFF_AUTO_AUTO), \
+ DEFINE_PROP_ON_OFF_AUTO("account-failed", _state, \
+ _conf.account_failed, ON_OFF_AUTO_AUTO)
+
+#define DEFINE_BLOCK_PROPERTIES(_state, _conf) \
+ DEFINE_PROP_DRIVE("drive", _state, _conf.blk), \
+ DEFINE_BLOCK_PROPERTIES_BASE(_state, _conf)
+
+#define DEFINE_BLOCK_CHS_PROPERTIES(_state, _conf) \
+ DEFINE_PROP_UINT32("cyls", _state, _conf.cyls, 0), \
+ DEFINE_PROP_UINT32("heads", _state, _conf.heads, 0), \
+ DEFINE_PROP_UINT32("secs", _state, _conf.secs, 0), \
+ DEFINE_PROP_UINT32("lcyls", _state, _conf.lcyls, 0), \
+ DEFINE_PROP_UINT32("lheads", _state, _conf.lheads, 0), \
+ DEFINE_PROP_UINT32("lsecs", _state, _conf.lsecs, 0)
#define DEFINE_BLOCK_ERROR_PROPERTIES(_state, _conf) \
DEFINE_PROP_BLOCKDEV_ON_ERROR("rerror", _state, _conf.rerror, \
@@ -70,12 +86,17 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf)
DEFINE_PROP_BLOCKDEV_ON_ERROR("werror", _state, _conf.werror, \
BLOCKDEV_ON_ERROR_AUTO)
+/* Backend access helpers */
+
+bool blk_check_size_and_read_all(BlockBackend *blk, DeviceState *dev,
+ void *buf, hwaddr size, Error **errp);
+
/* Configuration helpers */
bool blkconf_geometry(BlockConf *conf, int *trans,
unsigned cyls_max, unsigned heads_max, unsigned secs_max,
Error **errp);
-void blkconf_blocksizes(BlockConf *conf);
+bool blkconf_blocksizes(BlockConf *conf, Error **errp);
bool blkconf_apply_backend_options(BlockConf *conf, bool readonly,
bool resizable, Error **errp);
diff --git a/include/hw/block/fdc.h b/include/hw/block/fdc.h
index 3b813c7f7d..c367c5efea 100644
--- a/include/hw/block/fdc.h
+++ b/include/hw/block/fdc.h
@@ -1,7 +1,7 @@
#ifndef HW_FDC_H
#define HW_FDC_H
-#include "qemu-common.h"
+#include "exec/hwaddr.h"
#include "qapi/qapi-types-block.h"
/* fdc.c */
@@ -9,14 +9,15 @@
#define TYPE_ISA_FDC "isa-fdc"
-ISADevice *fdctrl_init_isa(ISABus *bus, DriveInfo **fds);
-void fdctrl_init_sysbus(qemu_irq irq, int dma_chann,
- hwaddr mmio_base, DriveInfo **fds);
+void isa_fdc_init_drives(ISADevice *fdc, DriveInfo **fds);
+void fdctrl_init_sysbus(qemu_irq irq, hwaddr mmio_base, DriveInfo **fds);
void sun4m_fdctrl_init(qemu_irq irq, hwaddr io_base,
DriveInfo **fds, qemu_irq *fdc_tc);
+void isa_fdc_set_iobase(ISADevice *fdc, hwaddr iobase);
+void isa_fdc_set_enabled(ISADevice *fdc, bool enabled);
+
FloppyDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i);
-void isa_fdc_get_drive_max_chs(FloppyDriveType type,
- uint8_t *maxc, uint8_t *maxh, uint8_t *maxs);
+int cmos_get_fd_drive_type(FloppyDriveType fd0);
#endif
diff --git a/include/hw/block/flash.h b/include/hw/block/flash.h
index 67c3aa329e..2b5ccd92f4 100644
--- a/include/hw/block/flash.h
+++ b/include/hw/block/flash.h
@@ -3,34 +3,46 @@
/* NOR flash devices */
-#include "exec/memory.h"
+#include "exec/hwaddr.h"
+#include "qom/object.h"
-#define TYPE_CFI_PFLASH01 "cfi.pflash01"
-#define TYPE_CFI_PFLASH02 "cfi.pflash02"
+/* pflash_cfi01.c */
-typedef struct pflash_t pflash_t;
+#define TYPE_PFLASH_CFI01 "cfi.pflash01"
+OBJECT_DECLARE_SIMPLE_TYPE(PFlashCFI01, PFLASH_CFI01)
-/* pflash_cfi01.c */
-pflash_t *pflash_cfi01_register(hwaddr base,
- DeviceState *qdev, const char *name,
- hwaddr size,
- BlockBackend *blk,
- uint32_t sector_len, int nb_blocs, int width,
- uint16_t id0, uint16_t id1,
- uint16_t id2, uint16_t id3, int be);
+
+PFlashCFI01 *pflash_cfi01_register(hwaddr base,
+ const char *name,
+ hwaddr size,
+ BlockBackend *blk,
+ uint32_t sector_len,
+ int width,
+ uint16_t id0, uint16_t id1,
+ uint16_t id2, uint16_t id3,
+ int be);
+BlockBackend *pflash_cfi01_get_blk(PFlashCFI01 *fl);
+MemoryRegion *pflash_cfi01_get_memory(PFlashCFI01 *fl);
+void pflash_cfi01_legacy_drive(PFlashCFI01 *dev, DriveInfo *dinfo);
/* pflash_cfi02.c */
-pflash_t *pflash_cfi02_register(hwaddr base,
- DeviceState *qdev, const char *name,
- hwaddr size,
- BlockBackend *blk, uint32_t sector_len,
- int nb_blocs, int nb_mappings, int width,
- uint16_t id0, uint16_t id1,
- uint16_t id2, uint16_t id3,
- uint16_t unlock_addr0, uint16_t unlock_addr1,
- int be);
-
-MemoryRegion *pflash_cfi01_get_memory(pflash_t *fl);
+
+#define TYPE_PFLASH_CFI02 "cfi.pflash02"
+OBJECT_DECLARE_SIMPLE_TYPE(PFlashCFI02, PFLASH_CFI02)
+
+
+PFlashCFI02 *pflash_cfi02_register(hwaddr base,
+ const char *name,
+ hwaddr size,
+ BlockBackend *blk,
+ uint32_t sector_len,
+ int nb_mappings,
+ int width,
+ uint16_t id0, uint16_t id1,
+ uint16_t id2, uint16_t id3,
+ uint16_t unlock_addr0,
+ uint16_t unlock_addr1,
+ int be);
/* nand.c */
DeviceState *nand_init(BlockBackend *blk, int manf_id, int chip_id);
@@ -41,27 +53,33 @@ void nand_setio(DeviceState *dev, uint32_t value);
uint32_t nand_getio(DeviceState *dev);
uint32_t nand_getbuswidth(DeviceState *dev);
-#define NAND_MFR_TOSHIBA 0x98
-#define NAND_MFR_SAMSUNG 0xec
-#define NAND_MFR_FUJITSU 0x04
-#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-#define NAND_MFR_HYNIX 0xad
-#define NAND_MFR_MICRON 0x2c
+#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_STMICRO 0x20
+#define NAND_MFR_HYNIX 0xad
+#define NAND_MFR_MICRON 0x2c
/* onenand.c */
void *onenand_raw_otp(DeviceState *onenand_device);
/* ecc.c */
typedef struct {
- uint8_t cp; /* Column parity */
- uint16_t lp[2]; /* Line parity */
+ uint8_t cp; /* Column parity */
+ uint16_t lp[2]; /* Line parity */
uint16_t count;
} ECCState;
uint8_t ecc_digest(ECCState *s, uint8_t sample);
void ecc_reset(ECCState *s);
-extern VMStateDescription vmstate_ecc_state;
+extern const VMStateDescription vmstate_ecc_state;
+
+/* m25p80.c */
+
+#define TYPE_M25P80 "m25p80-generic"
+
+BlockBackend *m25p80_get_blk(DeviceState *dev);
#endif
diff --git a/include/hw/block/swim.h b/include/hw/block/swim.h
new file mode 100644
index 0000000000..5f567e8d59
--- /dev/null
+++ b/include/hw/block/swim.h
@@ -0,0 +1,72 @@
+/*
+ * QEMU Macintosh floppy disk controller emulator (SWIM)
+ *
+ * Copyright (c) 2014-2018 Laurent Vivier <laurent@vivier.eu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef SWIM_H
+#define SWIM_H
+
+#include "hw/block/block.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define SWIM_MAX_FD 2
+
+typedef struct SWIMCtrl SWIMCtrl;
+
+#define TYPE_SWIM_DRIVE "swim-drive"
+OBJECT_DECLARE_SIMPLE_TYPE(SWIMDrive, SWIM_DRIVE)
+
+struct SWIMDrive {
+ DeviceState qdev;
+ int32_t unit;
+ BlockConf conf;
+};
+
+#define TYPE_SWIM_BUS "swim-bus"
+OBJECT_DECLARE_SIMPLE_TYPE(SWIMBus, SWIM_BUS)
+
+struct SWIMBus {
+ BusState bus;
+ struct SWIMCtrl *ctrl;
+};
+
+typedef struct FDrive {
+ SWIMCtrl *swimctrl;
+ BlockBackend *blk;
+ BlockConf *conf;
+} FDrive;
+
+struct SWIMCtrl {
+ MemoryRegion swim;
+ MemoryRegion iwm;
+ MemoryRegion ism;
+ FDrive drives[SWIM_MAX_FD];
+ int mode;
+ /* IWM mode */
+ int iwm_switch;
+ uint8_t iwm_latches;
+ uint8_t iwmregs[8];
+ /* SWIM mode */
+ uint8_t ismregs[16];
+ uint8_t swim_phase;
+ uint8_t swim_mode;
+ uint8_t swim_status;
+ uint8_t pram[16];
+ uint8_t pram_idx;
+ SWIMBus bus;
+};
+
+#define TYPE_SWIM "swim"
+OBJECT_DECLARE_SIMPLE_TYPE(Swim, SWIM)
+
+struct Swim {
+ SysBusDevice parent_obj;
+ SWIMCtrl ctrl;
+};
+#endif
diff --git a/include/hw/boards.h b/include/hw/boards.h
index 02f114085f..69c1ba45cf 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -3,43 +3,13 @@
#ifndef HW_BOARDS_H
#define HW_BOARDS_H
+#include "exec/memory.h"
+#include "sysemu/hostmem.h"
#include "sysemu/blockdev.h"
-#include "sysemu/accel.h"
-#include "hw/qdev.h"
+#include "qapi/qapi-types-machine.h"
+#include "qemu/module.h"
#include "qom/object.h"
-#include "qom/cpu.h"
-
-/**
- * memory_region_allocate_system_memory - Allocate a board's main memory
- * @mr: the #MemoryRegion to be initialized
- * @owner: the object that tracks the region's reference count
- * @name: name of the memory region
- * @ram_size: size of the region in bytes
- *
- * This function allocates the main memory for a board model, and
- * initializes @mr appropriately. It also arranges for the memory
- * to be migrated (by calling vmstate_register_ram_global()).
- *
- * Memory allocated via this function will be backed with the memory
- * backend the user provided using "-mem-path" or "-numa node,memdev=..."
- * if appropriate; this is typically used to cause host huge pages to be
- * used. This function should therefore be called by a board exactly once,
- * for the primary or largest RAM area it implements.
- *
- * For boards where the major RAM is split into two parts in the memory
- * map, you can deal with this by calling memory_region_allocate_system_memory()
- * once to get a MemoryRegion with enough RAM for both parts, and then
- * creating alias MemoryRegions via memory_region_init_alias() which
- * alias into different parts of the RAM MemoryRegion and can be mapped
- * into the memory map in the appropriate places.
- *
- * Smaller pieces of memory (display RAM, static RAMs, etc) don't need
- * to be backed via the -mem-path memory backend and can simply
- * be created via memory_region_init_ram().
- */
-void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
- const char *name,
- uint64_t ram_size);
+#include "hw/core/cpu.h"
#define TYPE_MACHINE_SUFFIX "-machine"
@@ -50,32 +20,94 @@ void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
#define TYPE_MACHINE "machine"
#undef MACHINE /* BSD defines it and QEMU does not use it */
-#define MACHINE(obj) \
- OBJECT_CHECK(MachineState, (obj), TYPE_MACHINE)
-#define MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(MachineClass, (obj), TYPE_MACHINE)
-#define MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(MachineClass, (klass), TYPE_MACHINE)
-
-MachineClass *find_default_machine(void);
+OBJECT_DECLARE_TYPE(MachineState, MachineClass, MACHINE)
+
extern MachineState *current_machine;
-void machine_run_board_init(MachineState *machine);
+/**
+ * machine_class_default_cpu_type: Return the machine default CPU type.
+ * @mc: Machine class
+ */
+const char *machine_class_default_cpu_type(MachineClass *mc);
+
+void machine_add_audiodev_property(MachineClass *mc);
+void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp);
bool machine_usb(MachineState *machine);
-bool machine_kernel_irqchip_allowed(MachineState *machine);
-bool machine_kernel_irqchip_required(MachineState *machine);
-bool machine_kernel_irqchip_split(MachineState *machine);
-int machine_kvm_shadow_mem(MachineState *machine);
int machine_phandle_start(MachineState *machine);
bool machine_dump_guest_core(MachineState *machine);
bool machine_mem_merge(MachineState *machine);
+bool machine_require_guest_memfd(MachineState *machine);
HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine);
void machine_set_cpu_numa_node(MachineState *machine,
const CpuInstanceProperties *props,
Error **errp);
+void machine_parse_smp_config(MachineState *ms,
+ const SMPConfiguration *config, Error **errp);
+unsigned int machine_topo_get_cores_per_socket(const MachineState *ms);
+unsigned int machine_topo_get_threads_per_socket(const MachineState *ms);
+void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size);
+/**
+ * machine_class_allow_dynamic_sysbus_dev: Add type to list of valid devices
+ * @mc: Machine class
+ * @type: type to allow (should be a subtype of TYPE_SYS_BUS_DEVICE)
+ *
+ * Add the QOM type @type to the list of devices of which are subtypes
+ * of TYPE_SYS_BUS_DEVICE but which are still permitted to be dynamically
+ * created (eg by the user on the command line with -device).
+ * By default if the user tries to create any devices on the command line
+ * that are subtypes of TYPE_SYS_BUS_DEVICE they will get an error message;
+ * for the special cases which are permitted for this machine model, the
+ * machine model class init code must call this function to add them
+ * to the list of specifically permitted devices.
+ */
void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type);
+/**
+ * device_type_is_dynamic_sysbus: Check if type is an allowed sysbus device
+ * type for the machine class.
+ * @mc: Machine class
+ * @type: type to check (should be a subtype of TYPE_SYS_BUS_DEVICE)
+ *
+ * Returns: true if @type is a type in the machine's list of
+ * dynamically pluggable sysbus devices; otherwise false.
+ *
+ * Check if the QOM type @type is in the list of allowed sysbus device
+ * types (see machine_class_allowed_dynamic_sysbus_dev()).
+ * Note that if @type has a parent type in the list, it is allowed too.
+ */
+bool device_type_is_dynamic_sysbus(MachineClass *mc, const char *type);
+
+/**
+ * device_is_dynamic_sysbus: test whether device is a dynamic sysbus device
+ * @mc: Machine class
+ * @dev: device to check
+ *
+ * Returns: true if @dev is a sysbus device on the machine's list
+ * of dynamically pluggable sysbus devices; otherwise false.
+ *
+ * This function checks whether @dev is a valid dynamic sysbus device,
+ * by first confirming that it is a sysbus device and then checking it
+ * against the list of permitted dynamic sysbus devices which has been
+ * set up by the machine using machine_class_allow_dynamic_sysbus_dev().
+ *
+ * It is valid to call this with something that is not a subclass of
+ * TYPE_SYS_BUS_DEVICE; the function will return false in this case.
+ * This allows hotplug callback functions to be written as:
+ * if (device_is_dynamic_sysbus(mc, dev)) {
+ * handle dynamic sysbus case;
+ * } else if (some other kind of hotplug) {
+ * handle that;
+ * }
+ */
+bool device_is_dynamic_sysbus(MachineClass *mc, DeviceState *dev);
+
+/*
+ * Checks that backend isn't used, preps it for exclusive usage and
+ * returns migratable MemoryRegion provided by backend.
+ */
+MemoryRegion *machine_consume_memdev(MachineState *machine,
+ HostMemoryBackend *backend);
/**
* CPUArchId:
@@ -85,11 +117,11 @@ void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type);
* @props - CPU object properties, initialized by board
* #vcpus_count - number of threads provided by @cpu object
*/
-typedef struct {
+typedef struct CPUArchId {
uint64_t arch_id;
int64_t vcpus_count;
CpuInstanceProperties props;
- Object *cpu;
+ CPUState *cpu;
const char *type;
} CPUArchId;
@@ -100,16 +132,37 @@ typedef struct {
*/
typedef struct {
int len;
- CPUArchId cpus[0];
+ CPUArchId cpus[];
} CPUArchIdList;
/**
+ * SMPCompatProps:
+ * @prefer_sockets - whether sockets are preferred over cores in smp parsing
+ * @dies_supported - whether dies are supported by the machine
+ * @clusters_supported - whether clusters are supported by the machine
+ * @has_clusters - whether clusters are explicitly specified in the user
+ * provided SMP configuration
+ * @books_supported - whether books are supported by the machine
+ * @drawers_supported - whether drawers are supported by the machine
+ */
+typedef struct {
+ bool prefer_sockets;
+ bool dies_supported;
+ bool clusters_supported;
+ bool has_clusters;
+ bool books_supported;
+ bool drawers_supported;
+} SMPCompatProps;
+
+/**
* MachineClass:
* @deprecation_reason: If set, the machine is marked as deprecated. The
* string should provide some clear information about what to use instead.
* @max_cpus: maximum number of CPUs supported. Default: 1
* @min_cpus: minimum number of CPUs supported. Default: 1
* @default_cpus: number of CPUs instantiated if none are specified. Default: 1
+ * @is_default:
+ * If true QEMU will use this machine by default if no '-M' option is given.
* @get_hotplug_handler: this function is called during bus-less
* device hotplug. If defined it returns pointer to an instance
* of HotplugHandler object, which handles hotplug operation
@@ -117,7 +170,7 @@ typedef struct {
* any actions to be performed by hotplug handler.
* @cpu_index_to_instance_props:
* used to provide @cpu_index to socket/core/thread number mapping, allowing
- * legacy code to perform maping from cpu_index to topology properties
+ * legacy code to perform mapping from cpu_index to topology properties
* Returns: tuple of socket/core/thread ids given cpu_index belongs to.
* used to provide @cpu_index to socket number mapping, allowing
* a machine to group CPU threads belonging to the same socket/package
@@ -156,6 +209,32 @@ typedef struct {
* should instead use "unimplemented-device" for all memory ranges where
* the guest will attempt to probe for a device that QEMU doesn't
* implement and a stub device is required.
+ * @kvm_type:
+ * Return the type of KVM corresponding to the kvm-type string option or
+ * computed based on other criteria such as the host kernel capabilities.
+ * kvm-type may be NULL if it is not needed.
+ * @numa_mem_supported:
+ * true if '--numa node.mem' option is supported and false otherwise
+ * @hotplug_allowed:
+ * If the hook is provided, then it'll be called for each device
+ * hotplug to check whether the device hotplug is allowed. Return
+ * true to grant allowance or false to reject the hotplug. When
+ * false is returned, an error must be set to show the reason of
+ * the rejection. If the hook is not provided, all hotplug will be
+ * allowed.
+ * @default_ram_id:
+ * Specifies initial RAM MemoryRegion name to be used for default backend
+ * creation if user explicitly hasn't specified backend with "memory-backend"
+ * property.
+ * It also will be used as a way to option into "-m" option support.
+ * If it's not set by board, '-m' will be ignored and generic code will
+ * not create default RAM MemoryRegion.
+ * @fixup_ram_size:
+ * Amends user provided ram size (with -m option) using machine
+ * specific algorithm. To be used by old machine types for compat
+ * purposes only.
+ * Applies only to default memory backend, i.e., explicit memory backend
+ * wasn't used.
*/
struct MachineClass {
/*< private >*/
@@ -169,9 +248,9 @@ struct MachineClass {
const char *deprecation_reason;
void (*init)(MachineState *state);
- void (*reset)(void);
- void (*hot_add_cpu)(const int64_t id, Error **errp);
- int (*kvm_type)(const char *arg);
+ void (*reset)(MachineState *state, ShutdownCause reason);
+ void (*wakeup)(MachineState *state);
+ int (*kvm_type)(MachineState *machine, const char *arg);
BlockInterfaceType block_default_type;
int units_per_default_bus;
@@ -180,16 +259,16 @@ struct MachineClass {
int default_cpus;
unsigned int no_serial:1,
no_parallel:1,
- use_virtcon:1,
no_floppy:1,
no_cdrom:1,
no_sdcard:1,
pci_allow_0_address:1,
legacy_fw_cfg_order:1;
- int is_default;
+ bool is_default;
const char *default_machine_opts;
const char *default_boot_order;
const char *default_display;
+ const char *default_nic;
GPtrArray *compat_props;
const char *hw_version;
ram_addr_t default_ram_size;
@@ -201,74 +280,134 @@ struct MachineClass {
bool has_hotpluggable_cpus;
bool ignore_memory_transaction_failures;
int numa_mem_align_shift;
- const char **valid_cpu_types;
+ const char * const *valid_cpu_types;
strList *allowed_dynamic_sysbus_devices;
bool auto_enable_numa_with_memhp;
- void (*numa_auto_assign_ram)(MachineClass *mc, NodeInfo *nodes,
- int nb_nodes, ram_addr_t size);
+ bool auto_enable_numa_with_memdev;
bool ignore_boot_device_suffixes;
+ bool smbus_no_migration_support;
+ bool nvdimm_supported;
+ bool numa_mem_supported;
+ bool auto_enable_numa;
+ bool cpu_cluster_has_numa_boundary;
+ SMPCompatProps smp_props;
+ const char *default_ram_id;
HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
DeviceState *dev);
+ bool (*hotplug_allowed)(MachineState *state, DeviceState *dev,
+ Error **errp);
CpuInstanceProperties (*cpu_index_to_instance_props)(MachineState *machine,
unsigned cpu_index);
const CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine);
int64_t (*get_default_cpu_node_id)(const MachineState *ms, int idx);
+ ram_addr_t (*fixup_ram_size)(ram_addr_t size);
};
/**
* DeviceMemoryState:
* @base: address in guest physical address space where the memory
* address space for memory devices starts
- * @mr: address space container for memory devices
+ * @mr: memory region container for memory devices
+ * @as: address space for memory devices
+ * @listener: memory listener used to track used memslots in the address space
+ * @dimm_size: the sum of plugged DIMMs' sizes
+ * @used_region_size: the part of @mr already used by memory devices
+ * @required_memslots: the number of memslots required by memory devices
+ * @used_memslots: the number of memslots currently used by memory devices
+ * @memslot_auto_decision_active: whether any plugged memory device
+ * automatically decided to use more than
+ * one memslot
*/
typedef struct DeviceMemoryState {
hwaddr base;
MemoryRegion mr;
+ AddressSpace as;
+ MemoryListener listener;
+ uint64_t dimm_size;
+ uint64_t used_region_size;
+ unsigned int required_memslots;
+ unsigned int used_memslots;
+ unsigned int memslot_auto_decision_active;
} DeviceMemoryState;
/**
+ * CpuTopology:
+ * @cpus: the number of present logical processors on the machine
+ * @drawers: the number of drawers on the machine
+ * @books: the number of books in one drawer
+ * @sockets: the number of sockets in one book
+ * @dies: the number of dies in one socket
+ * @clusters: the number of clusters in one die
+ * @cores: the number of cores in one cluster
+ * @threads: the number of threads in one core
+ * @max_cpus: the maximum number of logical processors on the machine
+ */
+typedef struct CpuTopology {
+ unsigned int cpus;
+ unsigned int drawers;
+ unsigned int books;
+ unsigned int sockets;
+ unsigned int dies;
+ unsigned int clusters;
+ unsigned int cores;
+ unsigned int threads;
+ unsigned int max_cpus;
+} CpuTopology;
+
+/**
* MachineState:
*/
struct MachineState {
/*< private >*/
Object parent_obj;
- Notifier sysbus_notifier;
/*< public >*/
- char *accel;
- bool kernel_irqchip_allowed;
- bool kernel_irqchip_required;
- bool kernel_irqchip_split;
- int kvm_shadow_mem;
+ void *fdt;
char *dtb;
char *dumpdtb;
int phandle_start;
char *dt_compatible;
bool dump_guest_core;
bool mem_merge;
+ bool require_guest_memfd;
bool usb;
bool usb_disabled;
- bool igd_gfx_passthru;
char *firmware;
bool iommu;
bool suppress_vmdesc;
- bool enforce_config_section;
bool enable_graphics;
- char *memory_encryption;
+ ConfidentialGuestSupport *cgs;
+ HostMemoryBackend *memdev;
+ /*
+ * convenience alias to ram_memdev_id backend memory region
+ * or to numa container memory region
+ */
+ MemoryRegion *ram;
DeviceMemoryState *device_memory;
+ /*
+ * Included in MachineState for simplicity, but not supported
+ * unless machine_add_audiodev_property is called. Boards
+ * that have embedded audio devices can call it from the
+ * machine init function and forward the property to the device.
+ */
+ char *audiodev;
+
ram_addr_t ram_size;
ram_addr_t maxram_size;
uint64_t ram_slots;
- const char *boot_order;
+ BootConfiguration boot_config;
char *kernel_filename;
char *kernel_cmdline;
char *initrd_filename;
const char *cpu_type;
AccelState *accelerator;
CPUArchIdList *possible_cpus;
+ CpuTopology smp;
+ struct NVDIMMState *nvdimms_state;
+ struct NumaState *numa_state;
};
#define DEFINE_MACHINE(namestr, machine_initfn) \
@@ -288,6 +427,54 @@ struct MachineState {
} \
type_init(machine_initfn##_register_types)
+extern GlobalProperty hw_compat_9_0[];
+extern const size_t hw_compat_9_0_len;
+
+extern GlobalProperty hw_compat_8_2[];
+extern const size_t hw_compat_8_2_len;
+
+extern GlobalProperty hw_compat_8_1[];
+extern const size_t hw_compat_8_1_len;
+
+extern GlobalProperty hw_compat_8_0[];
+extern const size_t hw_compat_8_0_len;
+
+extern GlobalProperty hw_compat_7_2[];
+extern const size_t hw_compat_7_2_len;
+
+extern GlobalProperty hw_compat_7_1[];
+extern const size_t hw_compat_7_1_len;
+
+extern GlobalProperty hw_compat_7_0[];
+extern const size_t hw_compat_7_0_len;
+
+extern GlobalProperty hw_compat_6_2[];
+extern const size_t hw_compat_6_2_len;
+
+extern GlobalProperty hw_compat_6_1[];
+extern const size_t hw_compat_6_1_len;
+
+extern GlobalProperty hw_compat_6_0[];
+extern const size_t hw_compat_6_0_len;
+
+extern GlobalProperty hw_compat_5_2[];
+extern const size_t hw_compat_5_2_len;
+
+extern GlobalProperty hw_compat_5_1[];
+extern const size_t hw_compat_5_1_len;
+
+extern GlobalProperty hw_compat_5_0[];
+extern const size_t hw_compat_5_0_len;
+
+extern GlobalProperty hw_compat_4_2[];
+extern const size_t hw_compat_4_2_len;
+
+extern GlobalProperty hw_compat_4_1[];
+extern const size_t hw_compat_4_1_len;
+
+extern GlobalProperty hw_compat_4_0[];
+extern const size_t hw_compat_4_0_len;
+
extern GlobalProperty hw_compat_3_1[];
extern const size_t hw_compat_3_1_len;
diff --git a/include/hw/bt.h b/include/hw/bt.h
deleted file mode 100644
index b5e11d4d43..0000000000
--- a/include/hw/bt.h
+++ /dev/null
@@ -1,2178 +0,0 @@
-/*
- * QEMU Bluetooth HCI helpers.
- *
- * Copyright (C) 2007 OpenMoko, Inc.
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * Useful definitions taken from BlueZ project's headers.
- * Copyright (C) 2000-2001 Qualcomm Incorporated
- * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
- * Copyright (C) 2002-2006 Marcel Holtmann <marcel@holtmann.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef HW_BT_H
-#define HW_BT_H
-
-#include "hw/irq.h"
-
-/* BD Address */
-typedef struct {
- uint8_t b[6];
-} QEMU_PACKED bdaddr_t;
-
-#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
-#define BDADDR_ALL (&(bdaddr_t) {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}})
-#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}})
-
-/* Copy, swap, convert BD Address */
-static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
-{
- return memcmp(ba1, ba2, sizeof(bdaddr_t));
-}
-static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
-{
- memcpy(dst, src, sizeof(bdaddr_t));
-}
-
-#define BAINIT(orig) { .b = { \
- (orig)->b[0], (orig)->b[1], (orig)->b[2], \
- (orig)->b[3], (orig)->b[4], (orig)->b[5], \
-}, }
-
-/* The twisted structures of a bluetooth environment */
-struct bt_device_s;
-struct bt_scatternet_s;
-struct bt_piconet_s;
-struct bt_link_s;
-
-struct bt_scatternet_s {
- struct bt_device_s *slave;
-};
-
-struct bt_link_s {
- struct bt_device_s *slave, *host;
- uint16_t handle; /* Master (host) side handle */
- uint16_t acl_interval;
- enum {
- acl_active,
- acl_hold,
- acl_sniff,
- acl_parked,
- } acl_mode;
-};
-
-struct bt_device_s {
- int lt_addr;
- bdaddr_t bd_addr;
- int mtu;
- int setup;
- struct bt_scatternet_s *net;
-
- uint8_t key[16];
- int key_present;
- uint8_t class[3];
-
- uint8_t reject_reason;
-
- uint64_t lmp_caps;
- const char *lmp_name;
- void (*lmp_connection_request)(struct bt_link_s *link);
- void (*lmp_connection_complete)(struct bt_link_s *link);
- void (*lmp_disconnect_master)(struct bt_link_s *link);
- void (*lmp_disconnect_slave)(struct bt_link_s *link);
- void (*lmp_acl_data)(struct bt_link_s *link, const uint8_t *data,
- int start, int len);
- void (*lmp_acl_resp)(struct bt_link_s *link, const uint8_t *data,
- int start, int len);
- void (*lmp_mode_change)(struct bt_link_s *link);
-
- void (*handle_destroy)(struct bt_device_s *device);
- struct bt_device_s *next; /* Next in the piconet/scatternet */
-
- int inquiry_scan;
- int page_scan;
-
- uint16_t clkoff; /* Note: Always little-endian */
-};
-
-extern struct HCIInfo null_hci;
-/* bt.c */
-void bt_device_init(struct bt_device_s *dev, struct bt_scatternet_s *net);
-void bt_device_done(struct bt_device_s *dev);
-struct bt_scatternet_s *qemu_find_bt_vlan(int id);
-
-/* bt-hci.c */
-struct HCIInfo *bt_new_hci(struct bt_scatternet_s *net);
-struct HCIInfo *hci_init(const char *str);
-
-/* bt-vhci.c */
-void bt_vhci_init(struct HCIInfo *info);
-
-/* bt-hci-csr.c */
-enum {
- csrhci_pin_reset,
- csrhci_pin_wakeup,
- __csrhci_pins,
-};
-qemu_irq *csrhci_pins_get(Chardev *chr);
-Chardev *uart_hci_init(void);
-
-/* bt-l2cap.c */
-struct bt_l2cap_device_s;
-struct bt_l2cap_conn_params_s;
-struct bt_l2cap_psm_s;
-void bt_l2cap_device_init(struct bt_l2cap_device_s *dev,
- struct bt_scatternet_s *net);
-void bt_l2cap_device_done(struct bt_l2cap_device_s *dev);
-void bt_l2cap_psm_register(struct bt_l2cap_device_s *dev, int psm,
- int min_mtu, int (*new_channel)(struct bt_l2cap_device_s *dev,
- struct bt_l2cap_conn_params_s *params));
-
-struct bt_l2cap_device_s {
- struct bt_device_s device;
- struct bt_l2cap_psm_s *first_psm;
-};
-
-struct bt_l2cap_conn_params_s {
- /* Input */
- uint8_t *(*sdu_out)(struct bt_l2cap_conn_params_s *chan, int len);
- void (*sdu_submit)(struct bt_l2cap_conn_params_s *chan);
- int remote_mtu;
- /* Output */
- void *opaque;
- void (*sdu_in)(void *opaque, const uint8_t *data, int len);
- void (*close)(void *opaque);
-};
-
-enum bt_l2cap_psm_predef {
- BT_PSM_SDP = 0x0001,
- BT_PSM_RFCOMM = 0x0003,
- BT_PSM_TELEPHONY = 0x0005,
- BT_PSM_TCS = 0x0007,
- BT_PSM_BNEP = 0x000f,
- BT_PSM_HID_CTRL = 0x0011,
- BT_PSM_HID_INTR = 0x0013,
- BT_PSM_UPNP = 0x0015,
- BT_PSM_AVCTP = 0x0017,
- BT_PSM_AVDTP = 0x0019,
-};
-
-/* bt-sdp.c */
-void bt_l2cap_sdp_init(struct bt_l2cap_device_s *dev);
-
-/* bt-hid.c */
-struct bt_device_s *bt_keyboard_init(struct bt_scatternet_s *net);
-
-/* Link Management Protocol layer defines */
-
-#define LLID_ACLU_CONT 0x1
-#define LLID_ACLU_START 0x2
-#define LLID_ACLC 0x3
-
-enum lmp_pdu_type {
- LMP_NAME_REQ = 0x0001,
- LMP_NAME_RES = 0x0002,
- LMP_ACCEPTED = 0x0003,
- LMP_NOT_ACCEPTED = 0x0004,
- LMP_CLKOFFSET_REQ = 0x0005,
- LMP_CLKOFFSET_RES = 0x0006,
- LMP_DETACH = 0x0007,
- LMP_IN_RAND = 0x0008,
- LMP_COMB_KEY = 0x0009,
- LMP_UNIT_KEY = 0x000a,
- LMP_AU_RAND = 0x000b,
- LMP_SRES = 0x000c,
- LMP_TEMP_RAND = 0x000d,
- LMP_TEMP_KEY = 0x000e,
- LMP_CRYPT_MODE_REQ = 0x000f,
- LMP_CRYPT_KEY_SIZE_REQ = 0x0010,
- LMP_START_ENCRYPT_REQ = 0x0011,
- LMP_STOP_ENCRYPT_REQ = 0x0012,
- LMP_SWITCH_REQ = 0x0013,
- LMP_HOLD = 0x0014,
- LMP_HOLD_REQ = 0x0015,
- LMP_SNIFF_REQ = 0x0017,
- LMP_UNSNIFF_REQ = 0x0018,
- LMP_LMP_PARK_REQ = 0x0019,
- LMP_SET_BCAST_SCAN_WND = 0x001b,
- LMP_MODIFY_BEACON = 0x001c,
- LMP_UNPARK_BD_ADDR_REQ = 0x001d,
- LMP_UNPARK_PM_ADDR_REQ = 0x001e,
- LMP_INCR_POWER_REQ = 0x001f,
- LMP_DECR_POWER_REQ = 0x0020,
- LMP_MAX_POWER = 0x0021,
- LMP_MIN_POWER = 0x0022,
- LMP_AUTO_RATE = 0x0023,
- LMP_PREFERRED_RATE = 0x0024,
- LMP_VERSION_REQ = 0x0025,
- LMP_VERSION_RES = 0x0026,
- LMP_FEATURES_REQ = 0x0027,
- LMP_FEATURES_RES = 0x0028,
- LMP_QUALITY_OF_SERVICE = 0x0029,
- LMP_QOS_REQ = 0x002a,
- LMP_RM_SCO_LINK_REQ = 0x002b,
- LMP_SCO_LINK_REQ = 0x002c,
- LMP_MAX_SLOT = 0x002d,
- LMP_MAX_SLOT_REQ = 0x002e,
- LMP_TIMING_ACCURACY_REQ = 0x002f,
- LMP_TIMING_ACCURACY_RES = 0x0030,
- LMP_SETUP_COMPLETE = 0x0031,
- LMP_USE_SEMIPERM_KEY = 0x0032,
- LMP_HOST_CONNECTION_REQ = 0x0033,
- LMP_SLOT_OFFSET = 0x0034,
- LMP_PAGE_MODE_REQ = 0x0035,
- LMP_PAGE_SCAN_MODE_REQ = 0x0036,
- LMP_SUPERVISION_TIMEOUT = 0x0037,
- LMP_TEST_ACTIVATE = 0x0038,
- LMP_TEST_CONTROL = 0x0039,
- LMP_CRYPT_KEY_MASK_REQ = 0x003a,
- LMP_CRYPT_KEY_MASK_RES = 0x003b,
- LMP_SET_AFH = 0x003c,
- LMP_ACCEPTED_EXT = 0x7f01,
- LMP_NOT_ACCEPTED_EXT = 0x7f02,
- LMP_FEATURES_REQ_EXT = 0x7f03,
- LMP_FEATURES_RES_EXT = 0x7f04,
- LMP_PACKET_TYPE_TBL_REQ = 0x7f0b,
- LMP_ESCO_LINK_REQ = 0x7f0c,
- LMP_RM_ESCO_LINK_REQ = 0x7f0d,
- LMP_CHANNEL_CLASS_REQ = 0x7f10,
- LMP_CHANNEL_CLASS = 0x7f11,
-};
-
-/* Host Controller Interface layer defines */
-
-enum hci_packet_type {
- HCI_COMMAND_PKT = 0x01,
- HCI_ACLDATA_PKT = 0x02,
- HCI_SCODATA_PKT = 0x03,
- HCI_EVENT_PKT = 0x04,
- HCI_VENDOR_PKT = 0xff,
-};
-
-enum bt_packet_type {
- HCI_2DH1 = 1 << 1,
- HCI_3DH1 = 1 << 2,
- HCI_DM1 = 1 << 3,
- HCI_DH1 = 1 << 4,
- HCI_2DH3 = 1 << 8,
- HCI_3DH3 = 1 << 9,
- HCI_DM3 = 1 << 10,
- HCI_DH3 = 1 << 11,
- HCI_2DH5 = 1 << 12,
- HCI_3DH5 = 1 << 13,
- HCI_DM5 = 1 << 14,
- HCI_DH5 = 1 << 15,
-};
-
-enum sco_packet_type {
- HCI_HV1 = 1 << 5,
- HCI_HV2 = 1 << 6,
- HCI_HV3 = 1 << 7,
-};
-
-enum ev_packet_type {
- HCI_EV3 = 1 << 3,
- HCI_EV4 = 1 << 4,
- HCI_EV5 = 1 << 5,
- HCI_2EV3 = 1 << 6,
- HCI_3EV3 = 1 << 7,
- HCI_2EV5 = 1 << 8,
- HCI_3EV5 = 1 << 9,
-};
-
-enum hci_error_code {
- HCI_SUCCESS = 0x00,
- HCI_UNKNOWN_COMMAND = 0x01,
- HCI_NO_CONNECTION = 0x02,
- HCI_HARDWARE_FAILURE = 0x03,
- HCI_PAGE_TIMEOUT = 0x04,
- HCI_AUTHENTICATION_FAILURE = 0x05,
- HCI_PIN_OR_KEY_MISSING = 0x06,
- HCI_MEMORY_FULL = 0x07,
- HCI_CONNECTION_TIMEOUT = 0x08,
- HCI_MAX_NUMBER_OF_CONNECTIONS = 0x09,
- HCI_MAX_NUMBER_OF_SCO_CONNECTIONS = 0x0a,
- HCI_ACL_CONNECTION_EXISTS = 0x0b,
- HCI_COMMAND_DISALLOWED = 0x0c,
- HCI_REJECTED_LIMITED_RESOURCES = 0x0d,
- HCI_REJECTED_SECURITY = 0x0e,
- HCI_REJECTED_PERSONAL = 0x0f,
- HCI_HOST_TIMEOUT = 0x10,
- HCI_UNSUPPORTED_FEATURE = 0x11,
- HCI_INVALID_PARAMETERS = 0x12,
- HCI_OE_USER_ENDED_CONNECTION = 0x13,
- HCI_OE_LOW_RESOURCES = 0x14,
- HCI_OE_POWER_OFF = 0x15,
- HCI_CONNECTION_TERMINATED = 0x16,
- HCI_REPEATED_ATTEMPTS = 0x17,
- HCI_PAIRING_NOT_ALLOWED = 0x18,
- HCI_UNKNOWN_LMP_PDU = 0x19,
- HCI_UNSUPPORTED_REMOTE_FEATURE = 0x1a,
- HCI_SCO_OFFSET_REJECTED = 0x1b,
- HCI_SCO_INTERVAL_REJECTED = 0x1c,
- HCI_AIR_MODE_REJECTED = 0x1d,
- HCI_INVALID_LMP_PARAMETERS = 0x1e,
- HCI_UNSPECIFIED_ERROR = 0x1f,
- HCI_UNSUPPORTED_LMP_PARAMETER_VALUE = 0x20,
- HCI_ROLE_CHANGE_NOT_ALLOWED = 0x21,
- HCI_LMP_RESPONSE_TIMEOUT = 0x22,
- HCI_LMP_ERROR_TRANSACTION_COLLISION = 0x23,
- HCI_LMP_PDU_NOT_ALLOWED = 0x24,
- HCI_ENCRYPTION_MODE_NOT_ACCEPTED = 0x25,
- HCI_UNIT_LINK_KEY_USED = 0x26,
- HCI_QOS_NOT_SUPPORTED = 0x27,
- HCI_INSTANT_PASSED = 0x28,
- HCI_PAIRING_NOT_SUPPORTED = 0x29,
- HCI_TRANSACTION_COLLISION = 0x2a,
- HCI_QOS_UNACCEPTABLE_PARAMETER = 0x2c,
- HCI_QOS_REJECTED = 0x2d,
- HCI_CLASSIFICATION_NOT_SUPPORTED = 0x2e,
- HCI_INSUFFICIENT_SECURITY = 0x2f,
- HCI_PARAMETER_OUT_OF_RANGE = 0x30,
- HCI_ROLE_SWITCH_PENDING = 0x32,
- HCI_SLOT_VIOLATION = 0x34,
- HCI_ROLE_SWITCH_FAILED = 0x35,
-};
-
-enum acl_flag_bits {
- ACL_CONT = 1 << 0,
- ACL_START = 1 << 1,
- ACL_ACTIVE_BCAST = 1 << 2,
- ACL_PICO_BCAST = 1 << 3,
-};
-
-enum baseband_link_type {
- SCO_LINK = 0x00,
- ACL_LINK = 0x01,
-};
-
-enum lmp_feature_bits0 {
- LMP_3SLOT = 1 << 0,
- LMP_5SLOT = 1 << 1,
- LMP_ENCRYPT = 1 << 2,
- LMP_SOFFSET = 1 << 3,
- LMP_TACCURACY = 1 << 4,
- LMP_RSWITCH = 1 << 5,
- LMP_HOLD_MODE = 1 << 6,
- LMP_SNIFF_MODE = 1 << 7,
-};
-
-enum lmp_feature_bits1 {
- LMP_PARK = 1 << 0,
- LMP_RSSI = 1 << 1,
- LMP_QUALITY = 1 << 2,
- LMP_SCO = 1 << 3,
- LMP_HV2 = 1 << 4,
- LMP_HV3 = 1 << 5,
- LMP_ULAW = 1 << 6,
- LMP_ALAW = 1 << 7,
-};
-
-enum lmp_feature_bits2 {
- LMP_CVSD = 1 << 0,
- LMP_PSCHEME = 1 << 1,
- LMP_PCONTROL = 1 << 2,
- LMP_TRSP_SCO = 1 << 3,
- LMP_BCAST_ENC = 1 << 7,
-};
-
-enum lmp_feature_bits3 {
- LMP_EDR_ACL_2M = 1 << 1,
- LMP_EDR_ACL_3M = 1 << 2,
- LMP_ENH_ISCAN = 1 << 3,
- LMP_ILACE_ISCAN = 1 << 4,
- LMP_ILACE_PSCAN = 1 << 5,
- LMP_RSSI_INQ = 1 << 6,
- LMP_ESCO = 1 << 7,
-};
-
-enum lmp_feature_bits4 {
- LMP_EV4 = 1 << 0,
- LMP_EV5 = 1 << 1,
- LMP_AFH_CAP_SLV = 1 << 3,
- LMP_AFH_CLS_SLV = 1 << 4,
- LMP_EDR_3SLOT = 1 << 7,
-};
-
-enum lmp_feature_bits5 {
- LMP_EDR_5SLOT = 1 << 0,
- LMP_SNIFF_SUBR = 1 << 1,
- LMP_AFH_CAP_MST = 1 << 3,
- LMP_AFH_CLS_MST = 1 << 4,
- LMP_EDR_ESCO_2M = 1 << 5,
- LMP_EDR_ESCO_3M = 1 << 6,
- LMP_EDR_3S_ESCO = 1 << 7,
-};
-
-enum lmp_feature_bits6 {
- LMP_EXT_INQ = 1 << 0,
-};
-
-enum lmp_feature_bits7 {
- LMP_EXT_FEAT = 1 << 7,
-};
-
-enum hci_link_policy {
- HCI_LP_RSWITCH = 1 << 0,
- HCI_LP_HOLD = 1 << 1,
- HCI_LP_SNIFF = 1 << 2,
- HCI_LP_PARK = 1 << 3,
-};
-
-enum hci_link_mode {
- HCI_LM_ACCEPT = 1 << 15,
- HCI_LM_MASTER = 1 << 0,
- HCI_LM_AUTH = 1 << 1,
- HCI_LM_ENCRYPT = 1 << 2,
- HCI_LM_TRUSTED = 1 << 3,
- HCI_LM_RELIABLE = 1 << 4,
- HCI_LM_SECURE = 1 << 5,
-};
-
-/* HCI Commands */
-
-/* Link Control */
-#define OGF_LINK_CTL 0x01
-
-#define OCF_INQUIRY 0x0001
-typedef struct {
- uint8_t lap[3];
- uint8_t length; /* 1.28s units */
- uint8_t num_rsp;
-} QEMU_PACKED inquiry_cp;
-#define INQUIRY_CP_SIZE 5
-
-typedef struct {
- uint8_t status;
- bdaddr_t bdaddr;
-} QEMU_PACKED status_bdaddr_rp;
-#define STATUS_BDADDR_RP_SIZE 7
-
-#define OCF_INQUIRY_CANCEL 0x0002
-
-#define OCF_PERIODIC_INQUIRY 0x0003
-typedef struct {
- uint16_t max_period; /* 1.28s units */
- uint16_t min_period; /* 1.28s units */
- uint8_t lap[3];
- uint8_t length; /* 1.28s units */
- uint8_t num_rsp;
-} QEMU_PACKED periodic_inquiry_cp;
-#define PERIODIC_INQUIRY_CP_SIZE 9
-
-#define OCF_EXIT_PERIODIC_INQUIRY 0x0004
-
-#define OCF_CREATE_CONN 0x0005
-typedef struct {
- bdaddr_t bdaddr;
- uint16_t pkt_type;
- uint8_t pscan_rep_mode;
- uint8_t pscan_mode;
- uint16_t clock_offset;
- uint8_t role_switch;
-} QEMU_PACKED create_conn_cp;
-#define CREATE_CONN_CP_SIZE 13
-
-#define OCF_DISCONNECT 0x0006
-typedef struct {
- uint16_t handle;
- uint8_t reason;
-} QEMU_PACKED disconnect_cp;
-#define DISCONNECT_CP_SIZE 3
-
-#define OCF_ADD_SCO 0x0007
-typedef struct {
- uint16_t handle;
- uint16_t pkt_type;
-} QEMU_PACKED add_sco_cp;
-#define ADD_SCO_CP_SIZE 4
-
-#define OCF_CREATE_CONN_CANCEL 0x0008
-typedef struct {
- bdaddr_t bdaddr;
-} QEMU_PACKED create_conn_cancel_cp;
-#define CREATE_CONN_CANCEL_CP_SIZE 6
-
-typedef struct {
- uint8_t status;
- bdaddr_t bdaddr;
-} QEMU_PACKED create_conn_cancel_rp;
-#define CREATE_CONN_CANCEL_RP_SIZE 7
-
-#define OCF_ACCEPT_CONN_REQ 0x0009
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t role;
-} QEMU_PACKED accept_conn_req_cp;
-#define ACCEPT_CONN_REQ_CP_SIZE 7
-
-#define OCF_REJECT_CONN_REQ 0x000A
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t reason;
-} QEMU_PACKED reject_conn_req_cp;
-#define REJECT_CONN_REQ_CP_SIZE 7
-
-#define OCF_LINK_KEY_REPLY 0x000B
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t link_key[16];
-} QEMU_PACKED link_key_reply_cp;
-#define LINK_KEY_REPLY_CP_SIZE 22
-
-#define OCF_LINK_KEY_NEG_REPLY 0x000C
-
-#define OCF_PIN_CODE_REPLY 0x000D
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t pin_len;
- uint8_t pin_code[16];
-} QEMU_PACKED pin_code_reply_cp;
-#define PIN_CODE_REPLY_CP_SIZE 23
-
-#define OCF_PIN_CODE_NEG_REPLY 0x000E
-
-#define OCF_SET_CONN_PTYPE 0x000F
-typedef struct {
- uint16_t handle;
- uint16_t pkt_type;
-} QEMU_PACKED set_conn_ptype_cp;
-#define SET_CONN_PTYPE_CP_SIZE 4
-
-#define OCF_AUTH_REQUESTED 0x0011
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED auth_requested_cp;
-#define AUTH_REQUESTED_CP_SIZE 2
-
-#define OCF_SET_CONN_ENCRYPT 0x0013
-typedef struct {
- uint16_t handle;
- uint8_t encrypt;
-} QEMU_PACKED set_conn_encrypt_cp;
-#define SET_CONN_ENCRYPT_CP_SIZE 3
-
-#define OCF_CHANGE_CONN_LINK_KEY 0x0015
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED change_conn_link_key_cp;
-#define CHANGE_CONN_LINK_KEY_CP_SIZE 2
-
-#define OCF_MASTER_LINK_KEY 0x0017
-typedef struct {
- uint8_t key_flag;
-} QEMU_PACKED master_link_key_cp;
-#define MASTER_LINK_KEY_CP_SIZE 1
-
-#define OCF_REMOTE_NAME_REQ 0x0019
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t pscan_rep_mode;
- uint8_t pscan_mode;
- uint16_t clock_offset;
-} QEMU_PACKED remote_name_req_cp;
-#define REMOTE_NAME_REQ_CP_SIZE 10
-
-#define OCF_REMOTE_NAME_REQ_CANCEL 0x001A
-typedef struct {
- bdaddr_t bdaddr;
-} QEMU_PACKED remote_name_req_cancel_cp;
-#define REMOTE_NAME_REQ_CANCEL_CP_SIZE 6
-
-typedef struct {
- uint8_t status;
- bdaddr_t bdaddr;
-} QEMU_PACKED remote_name_req_cancel_rp;
-#define REMOTE_NAME_REQ_CANCEL_RP_SIZE 7
-
-#define OCF_READ_REMOTE_FEATURES 0x001B
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED read_remote_features_cp;
-#define READ_REMOTE_FEATURES_CP_SIZE 2
-
-#define OCF_READ_REMOTE_EXT_FEATURES 0x001C
-typedef struct {
- uint16_t handle;
- uint8_t page_num;
-} QEMU_PACKED read_remote_ext_features_cp;
-#define READ_REMOTE_EXT_FEATURES_CP_SIZE 3
-
-#define OCF_READ_REMOTE_VERSION 0x001D
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED read_remote_version_cp;
-#define READ_REMOTE_VERSION_CP_SIZE 2
-
-#define OCF_READ_CLOCK_OFFSET 0x001F
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED read_clock_offset_cp;
-#define READ_CLOCK_OFFSET_CP_SIZE 2
-
-#define OCF_READ_LMP_HANDLE 0x0020
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED read_lmp_handle_cp;
-#define READ_LMP_HANDLE_CP_SIZE 2
-
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t lmp_handle;
- uint32_t reserved;
-} QEMU_PACKED read_lmp_handle_rp;
-#define READ_LMP_HANDLE_RP_SIZE 8
-
-#define OCF_SETUP_SYNC_CONN 0x0028
-typedef struct {
- uint16_t handle;
- uint32_t tx_bandwidth;
- uint32_t rx_bandwidth;
- uint16_t max_latency;
- uint16_t voice_setting;
- uint8_t retrans_effort;
- uint16_t pkt_type;
-} QEMU_PACKED setup_sync_conn_cp;
-#define SETUP_SYNC_CONN_CP_SIZE 17
-
-#define OCF_ACCEPT_SYNC_CONN_REQ 0x0029
-typedef struct {
- bdaddr_t bdaddr;
- uint32_t tx_bandwidth;
- uint32_t rx_bandwidth;
- uint16_t max_latency;
- uint16_t voice_setting;
- uint8_t retrans_effort;
- uint16_t pkt_type;
-} QEMU_PACKED accept_sync_conn_req_cp;
-#define ACCEPT_SYNC_CONN_REQ_CP_SIZE 21
-
-#define OCF_REJECT_SYNC_CONN_REQ 0x002A
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t reason;
-} QEMU_PACKED reject_sync_conn_req_cp;
-#define REJECT_SYNC_CONN_REQ_CP_SIZE 7
-
-/* Link Policy */
-#define OGF_LINK_POLICY 0x02
-
-#define OCF_HOLD_MODE 0x0001
-typedef struct {
- uint16_t handle;
- uint16_t max_interval;
- uint16_t min_interval;
-} QEMU_PACKED hold_mode_cp;
-#define HOLD_MODE_CP_SIZE 6
-
-#define OCF_SNIFF_MODE 0x0003
-typedef struct {
- uint16_t handle;
- uint16_t max_interval;
- uint16_t min_interval;
- uint16_t attempt;
- uint16_t timeout;
-} QEMU_PACKED sniff_mode_cp;
-#define SNIFF_MODE_CP_SIZE 10
-
-#define OCF_EXIT_SNIFF_MODE 0x0004
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED exit_sniff_mode_cp;
-#define EXIT_SNIFF_MODE_CP_SIZE 2
-
-#define OCF_PARK_MODE 0x0005
-typedef struct {
- uint16_t handle;
- uint16_t max_interval;
- uint16_t min_interval;
-} QEMU_PACKED park_mode_cp;
-#define PARK_MODE_CP_SIZE 6
-
-#define OCF_EXIT_PARK_MODE 0x0006
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED exit_park_mode_cp;
-#define EXIT_PARK_MODE_CP_SIZE 2
-
-#define OCF_QOS_SETUP 0x0007
-typedef struct {
- uint8_t service_type; /* 1 = best effort */
- uint32_t token_rate; /* Byte per seconds */
- uint32_t peak_bandwidth; /* Byte per seconds */
- uint32_t latency; /* Microseconds */
- uint32_t delay_variation; /* Microseconds */
-} QEMU_PACKED hci_qos;
-#define HCI_QOS_CP_SIZE 17
-typedef struct {
- uint16_t handle;
- uint8_t flags; /* Reserved */
- hci_qos qos;
-} QEMU_PACKED qos_setup_cp;
-#define QOS_SETUP_CP_SIZE (3 + HCI_QOS_CP_SIZE)
-
-#define OCF_ROLE_DISCOVERY 0x0009
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED role_discovery_cp;
-#define ROLE_DISCOVERY_CP_SIZE 2
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t role;
-} QEMU_PACKED role_discovery_rp;
-#define ROLE_DISCOVERY_RP_SIZE 4
-
-#define OCF_SWITCH_ROLE 0x000B
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t role;
-} QEMU_PACKED switch_role_cp;
-#define SWITCH_ROLE_CP_SIZE 7
-
-#define OCF_READ_LINK_POLICY 0x000C
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED read_link_policy_cp;
-#define READ_LINK_POLICY_CP_SIZE 2
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint16_t policy;
-} QEMU_PACKED read_link_policy_rp;
-#define READ_LINK_POLICY_RP_SIZE 5
-
-#define OCF_WRITE_LINK_POLICY 0x000D
-typedef struct {
- uint16_t handle;
- uint16_t policy;
-} QEMU_PACKED write_link_policy_cp;
-#define WRITE_LINK_POLICY_CP_SIZE 4
-typedef struct {
- uint8_t status;
- uint16_t handle;
-} QEMU_PACKED write_link_policy_rp;
-#define WRITE_LINK_POLICY_RP_SIZE 3
-
-#define OCF_READ_DEFAULT_LINK_POLICY 0x000E
-
-#define OCF_WRITE_DEFAULT_LINK_POLICY 0x000F
-
-#define OCF_FLOW_SPECIFICATION 0x0010
-
-#define OCF_SNIFF_SUBRATE 0x0011
-typedef struct {
- uint16_t handle;
- uint16_t max_remote_latency;
- uint16_t max_local_latency;
- uint16_t min_remote_timeout;
- uint16_t min_local_timeout;
-} QEMU_PACKED sniff_subrate_cp;
-#define SNIFF_SUBRATE_CP_SIZE 10
-
-/* Host Controller and Baseband */
-#define OGF_HOST_CTL 0x03
-
-#define OCF_SET_EVENT_MASK 0x0001
-typedef struct {
- uint8_t mask[8];
-} QEMU_PACKED set_event_mask_cp;
-#define SET_EVENT_MASK_CP_SIZE 8
-
-#define OCF_RESET 0x0003
-
-#define OCF_SET_EVENT_FLT 0x0005
-typedef struct {
- uint8_t flt_type;
- uint8_t cond_type;
- uint8_t condition[0];
-} QEMU_PACKED set_event_flt_cp;
-#define SET_EVENT_FLT_CP_SIZE 2
-
-enum bt_filter_type {
- FLT_CLEAR_ALL = 0x00,
- FLT_INQ_RESULT = 0x01,
- FLT_CONN_SETUP = 0x02,
-};
-enum inq_result_cond_type {
- INQ_RESULT_RETURN_ALL = 0x00,
- INQ_RESULT_RETURN_CLASS = 0x01,
- INQ_RESULT_RETURN_BDADDR = 0x02,
-};
-enum conn_setup_cond_type {
- CONN_SETUP_ALLOW_ALL = 0x00,
- CONN_SETUP_ALLOW_CLASS = 0x01,
- CONN_SETUP_ALLOW_BDADDR = 0x02,
-};
-enum conn_setup_cond {
- CONN_SETUP_AUTO_OFF = 0x01,
- CONN_SETUP_AUTO_ON = 0x02,
-};
-
-#define OCF_FLUSH 0x0008
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED flush_cp;
-#define FLUSH_CP_SIZE 2
-
-typedef struct {
- uint8_t status;
- uint16_t handle;
-} QEMU_PACKED flush_rp;
-#define FLUSH_RP_SIZE 3
-
-#define OCF_READ_PIN_TYPE 0x0009
-typedef struct {
- uint8_t status;
- uint8_t pin_type;
-} QEMU_PACKED read_pin_type_rp;
-#define READ_PIN_TYPE_RP_SIZE 2
-
-#define OCF_WRITE_PIN_TYPE 0x000A
-typedef struct {
- uint8_t pin_type;
-} QEMU_PACKED write_pin_type_cp;
-#define WRITE_PIN_TYPE_CP_SIZE 1
-
-#define OCF_CREATE_NEW_UNIT_KEY 0x000B
-
-#define OCF_READ_STORED_LINK_KEY 0x000D
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t read_all;
-} QEMU_PACKED read_stored_link_key_cp;
-#define READ_STORED_LINK_KEY_CP_SIZE 7
-typedef struct {
- uint8_t status;
- uint16_t max_keys;
- uint16_t num_keys;
-} QEMU_PACKED read_stored_link_key_rp;
-#define READ_STORED_LINK_KEY_RP_SIZE 5
-
-#define OCF_WRITE_STORED_LINK_KEY 0x0011
-typedef struct {
- uint8_t num_keys;
- /* variable length part */
-} QEMU_PACKED write_stored_link_key_cp;
-#define WRITE_STORED_LINK_KEY_CP_SIZE 1
-typedef struct {
- uint8_t status;
- uint8_t num_keys;
-} QEMU_PACKED write_stored_link_key_rp;
-#define READ_WRITE_LINK_KEY_RP_SIZE 2
-
-#define OCF_DELETE_STORED_LINK_KEY 0x0012
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t delete_all;
-} QEMU_PACKED delete_stored_link_key_cp;
-#define DELETE_STORED_LINK_KEY_CP_SIZE 7
-typedef struct {
- uint8_t status;
- uint16_t num_keys;
-} QEMU_PACKED delete_stored_link_key_rp;
-#define DELETE_STORED_LINK_KEY_RP_SIZE 3
-
-#define OCF_CHANGE_LOCAL_NAME 0x0013
-typedef struct {
- char name[248];
-} QEMU_PACKED change_local_name_cp;
-#define CHANGE_LOCAL_NAME_CP_SIZE 248
-
-#define OCF_READ_LOCAL_NAME 0x0014
-typedef struct {
- uint8_t status;
- char name[248];
-} QEMU_PACKED read_local_name_rp;
-#define READ_LOCAL_NAME_RP_SIZE 249
-
-#define OCF_READ_CONN_ACCEPT_TIMEOUT 0x0015
-typedef struct {
- uint8_t status;
- uint16_t timeout;
-} QEMU_PACKED read_conn_accept_timeout_rp;
-#define READ_CONN_ACCEPT_TIMEOUT_RP_SIZE 3
-
-#define OCF_WRITE_CONN_ACCEPT_TIMEOUT 0x0016
-typedef struct {
- uint16_t timeout;
-} QEMU_PACKED write_conn_accept_timeout_cp;
-#define WRITE_CONN_ACCEPT_TIMEOUT_CP_SIZE 2
-
-#define OCF_READ_PAGE_TIMEOUT 0x0017
-typedef struct {
- uint8_t status;
- uint16_t timeout;
-} QEMU_PACKED read_page_timeout_rp;
-#define READ_PAGE_TIMEOUT_RP_SIZE 3
-
-#define OCF_WRITE_PAGE_TIMEOUT 0x0018
-typedef struct {
- uint16_t timeout;
-} QEMU_PACKED write_page_timeout_cp;
-#define WRITE_PAGE_TIMEOUT_CP_SIZE 2
-
-#define OCF_READ_SCAN_ENABLE 0x0019
-typedef struct {
- uint8_t status;
- uint8_t enable;
-} QEMU_PACKED read_scan_enable_rp;
-#define READ_SCAN_ENABLE_RP_SIZE 2
-
-#define OCF_WRITE_SCAN_ENABLE 0x001A
-typedef struct {
- uint8_t scan_enable;
-} QEMU_PACKED write_scan_enable_cp;
-#define WRITE_SCAN_ENABLE_CP_SIZE 1
-
-enum scan_enable_bits {
- SCAN_DISABLED = 0,
- SCAN_INQUIRY = 1 << 0,
- SCAN_PAGE = 1 << 1,
-};
-
-#define OCF_READ_PAGE_ACTIVITY 0x001B
-typedef struct {
- uint8_t status;
- uint16_t interval;
- uint16_t window;
-} QEMU_PACKED read_page_activity_rp;
-#define READ_PAGE_ACTIVITY_RP_SIZE 5
-
-#define OCF_WRITE_PAGE_ACTIVITY 0x001C
-typedef struct {
- uint16_t interval;
- uint16_t window;
-} QEMU_PACKED write_page_activity_cp;
-#define WRITE_PAGE_ACTIVITY_CP_SIZE 4
-
-#define OCF_READ_INQ_ACTIVITY 0x001D
-typedef struct {
- uint8_t status;
- uint16_t interval;
- uint16_t window;
-} QEMU_PACKED read_inq_activity_rp;
-#define READ_INQ_ACTIVITY_RP_SIZE 5
-
-#define OCF_WRITE_INQ_ACTIVITY 0x001E
-typedef struct {
- uint16_t interval;
- uint16_t window;
-} QEMU_PACKED write_inq_activity_cp;
-#define WRITE_INQ_ACTIVITY_CP_SIZE 4
-
-#define OCF_READ_AUTH_ENABLE 0x001F
-
-#define OCF_WRITE_AUTH_ENABLE 0x0020
-
-#define AUTH_DISABLED 0x00
-#define AUTH_ENABLED 0x01
-
-#define OCF_READ_ENCRYPT_MODE 0x0021
-
-#define OCF_WRITE_ENCRYPT_MODE 0x0022
-
-#define ENCRYPT_DISABLED 0x00
-#define ENCRYPT_P2P 0x01
-#define ENCRYPT_BOTH 0x02
-
-#define OCF_READ_CLASS_OF_DEV 0x0023
-typedef struct {
- uint8_t status;
- uint8_t dev_class[3];
-} QEMU_PACKED read_class_of_dev_rp;
-#define READ_CLASS_OF_DEV_RP_SIZE 4
-
-#define OCF_WRITE_CLASS_OF_DEV 0x0024
-typedef struct {
- uint8_t dev_class[3];
-} QEMU_PACKED write_class_of_dev_cp;
-#define WRITE_CLASS_OF_DEV_CP_SIZE 3
-
-#define OCF_READ_VOICE_SETTING 0x0025
-typedef struct {
- uint8_t status;
- uint16_t voice_setting;
-} QEMU_PACKED read_voice_setting_rp;
-#define READ_VOICE_SETTING_RP_SIZE 3
-
-#define OCF_WRITE_VOICE_SETTING 0x0026
-typedef struct {
- uint16_t voice_setting;
-} QEMU_PACKED write_voice_setting_cp;
-#define WRITE_VOICE_SETTING_CP_SIZE 2
-
-#define OCF_READ_AUTOMATIC_FLUSH_TIMEOUT 0x0027
-
-#define OCF_WRITE_AUTOMATIC_FLUSH_TIMEOUT 0x0028
-
-#define OCF_READ_NUM_BROADCAST_RETRANS 0x0029
-
-#define OCF_WRITE_NUM_BROADCAST_RETRANS 0x002A
-
-#define OCF_READ_HOLD_MODE_ACTIVITY 0x002B
-
-#define OCF_WRITE_HOLD_MODE_ACTIVITY 0x002C
-
-#define OCF_READ_TRANSMIT_POWER_LEVEL 0x002D
-typedef struct {
- uint16_t handle;
- uint8_t type;
-} QEMU_PACKED read_transmit_power_level_cp;
-#define READ_TRANSMIT_POWER_LEVEL_CP_SIZE 3
-typedef struct {
- uint8_t status;
- uint16_t handle;
- int8_t level;
-} QEMU_PACKED read_transmit_power_level_rp;
-#define READ_TRANSMIT_POWER_LEVEL_RP_SIZE 4
-
-#define OCF_HOST_BUFFER_SIZE 0x0033
-typedef struct {
- uint16_t acl_mtu;
- uint8_t sco_mtu;
- uint16_t acl_max_pkt;
- uint16_t sco_max_pkt;
-} QEMU_PACKED host_buffer_size_cp;
-#define HOST_BUFFER_SIZE_CP_SIZE 7
-
-#define OCF_HOST_NUMBER_OF_COMPLETED_PACKETS 0x0035
-
-#define OCF_READ_LINK_SUPERVISION_TIMEOUT 0x0036
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint16_t link_sup_to;
-} QEMU_PACKED read_link_supervision_timeout_rp;
-#define READ_LINK_SUPERVISION_TIMEOUT_RP_SIZE 5
-
-#define OCF_WRITE_LINK_SUPERVISION_TIMEOUT 0x0037
-typedef struct {
- uint16_t handle;
- uint16_t link_sup_to;
-} QEMU_PACKED write_link_supervision_timeout_cp;
-#define WRITE_LINK_SUPERVISION_TIMEOUT_CP_SIZE 4
-typedef struct {
- uint8_t status;
- uint16_t handle;
-} QEMU_PACKED write_link_supervision_timeout_rp;
-#define WRITE_LINK_SUPERVISION_TIMEOUT_RP_SIZE 3
-
-#define OCF_READ_NUM_SUPPORTED_IAC 0x0038
-
-#define MAX_IAC_LAP 0x40
-#define OCF_READ_CURRENT_IAC_LAP 0x0039
-typedef struct {
- uint8_t status;
- uint8_t num_current_iac;
- uint8_t lap[MAX_IAC_LAP][3];
-} QEMU_PACKED read_current_iac_lap_rp;
-#define READ_CURRENT_IAC_LAP_RP_SIZE 2+3*MAX_IAC_LAP
-
-#define OCF_WRITE_CURRENT_IAC_LAP 0x003A
-typedef struct {
- uint8_t num_current_iac;
- uint8_t lap[MAX_IAC_LAP][3];
-} QEMU_PACKED write_current_iac_lap_cp;
-#define WRITE_CURRENT_IAC_LAP_CP_SIZE 1+3*MAX_IAC_LAP
-
-#define OCF_READ_PAGE_SCAN_PERIOD_MODE 0x003B
-
-#define OCF_WRITE_PAGE_SCAN_PERIOD_MODE 0x003C
-
-#define OCF_READ_PAGE_SCAN_MODE 0x003D
-
-#define OCF_WRITE_PAGE_SCAN_MODE 0x003E
-
-#define OCF_SET_AFH_CLASSIFICATION 0x003F
-typedef struct {
- uint8_t map[10];
-} QEMU_PACKED set_afh_classification_cp;
-#define SET_AFH_CLASSIFICATION_CP_SIZE 10
-typedef struct {
- uint8_t status;
-} QEMU_PACKED set_afh_classification_rp;
-#define SET_AFH_CLASSIFICATION_RP_SIZE 1
-
-#define OCF_READ_INQUIRY_SCAN_TYPE 0x0042
-typedef struct {
- uint8_t status;
- uint8_t type;
-} QEMU_PACKED read_inquiry_scan_type_rp;
-#define READ_INQUIRY_SCAN_TYPE_RP_SIZE 2
-
-#define OCF_WRITE_INQUIRY_SCAN_TYPE 0x0043
-typedef struct {
- uint8_t type;
-} QEMU_PACKED write_inquiry_scan_type_cp;
-#define WRITE_INQUIRY_SCAN_TYPE_CP_SIZE 1
-typedef struct {
- uint8_t status;
-} QEMU_PACKED write_inquiry_scan_type_rp;
-#define WRITE_INQUIRY_SCAN_TYPE_RP_SIZE 1
-
-#define OCF_READ_INQUIRY_MODE 0x0044
-typedef struct {
- uint8_t status;
- uint8_t mode;
-} QEMU_PACKED read_inquiry_mode_rp;
-#define READ_INQUIRY_MODE_RP_SIZE 2
-
-#define OCF_WRITE_INQUIRY_MODE 0x0045
-typedef struct {
- uint8_t mode;
-} QEMU_PACKED write_inquiry_mode_cp;
-#define WRITE_INQUIRY_MODE_CP_SIZE 1
-typedef struct {
- uint8_t status;
-} QEMU_PACKED write_inquiry_mode_rp;
-#define WRITE_INQUIRY_MODE_RP_SIZE 1
-
-#define OCF_READ_PAGE_SCAN_TYPE 0x0046
-
-#define OCF_WRITE_PAGE_SCAN_TYPE 0x0047
-
-#define OCF_READ_AFH_MODE 0x0048
-typedef struct {
- uint8_t status;
- uint8_t mode;
-} QEMU_PACKED read_afh_mode_rp;
-#define READ_AFH_MODE_RP_SIZE 2
-
-#define OCF_WRITE_AFH_MODE 0x0049
-typedef struct {
- uint8_t mode;
-} QEMU_PACKED write_afh_mode_cp;
-#define WRITE_AFH_MODE_CP_SIZE 1
-typedef struct {
- uint8_t status;
-} QEMU_PACKED write_afh_mode_rp;
-#define WRITE_AFH_MODE_RP_SIZE 1
-
-#define OCF_READ_EXT_INQUIRY_RESPONSE 0x0051
-typedef struct {
- uint8_t status;
- uint8_t fec;
- uint8_t data[240];
-} QEMU_PACKED read_ext_inquiry_response_rp;
-#define READ_EXT_INQUIRY_RESPONSE_RP_SIZE 242
-
-#define OCF_WRITE_EXT_INQUIRY_RESPONSE 0x0052
-typedef struct {
- uint8_t fec;
- uint8_t data[240];
-} QEMU_PACKED write_ext_inquiry_response_cp;
-#define WRITE_EXT_INQUIRY_RESPONSE_CP_SIZE 241
-typedef struct {
- uint8_t status;
-} QEMU_PACKED write_ext_inquiry_response_rp;
-#define WRITE_EXT_INQUIRY_RESPONSE_RP_SIZE 1
-
-/* Informational Parameters */
-#define OGF_INFO_PARAM 0x04
-
-#define OCF_READ_LOCAL_VERSION 0x0001
-typedef struct {
- uint8_t status;
- uint8_t hci_ver;
- uint16_t hci_rev;
- uint8_t lmp_ver;
- uint16_t manufacturer;
- uint16_t lmp_subver;
-} QEMU_PACKED read_local_version_rp;
-#define READ_LOCAL_VERSION_RP_SIZE 9
-
-#define OCF_READ_LOCAL_COMMANDS 0x0002
-typedef struct {
- uint8_t status;
- uint8_t commands[64];
-} QEMU_PACKED read_local_commands_rp;
-#define READ_LOCAL_COMMANDS_RP_SIZE 65
-
-#define OCF_READ_LOCAL_FEATURES 0x0003
-typedef struct {
- uint8_t status;
- uint8_t features[8];
-} QEMU_PACKED read_local_features_rp;
-#define READ_LOCAL_FEATURES_RP_SIZE 9
-
-#define OCF_READ_LOCAL_EXT_FEATURES 0x0004
-typedef struct {
- uint8_t page_num;
-} QEMU_PACKED read_local_ext_features_cp;
-#define READ_LOCAL_EXT_FEATURES_CP_SIZE 1
-typedef struct {
- uint8_t status;
- uint8_t page_num;
- uint8_t max_page_num;
- uint8_t features[8];
-} QEMU_PACKED read_local_ext_features_rp;
-#define READ_LOCAL_EXT_FEATURES_RP_SIZE 11
-
-#define OCF_READ_BUFFER_SIZE 0x0005
-typedef struct {
- uint8_t status;
- uint16_t acl_mtu;
- uint8_t sco_mtu;
- uint16_t acl_max_pkt;
- uint16_t sco_max_pkt;
-} QEMU_PACKED read_buffer_size_rp;
-#define READ_BUFFER_SIZE_RP_SIZE 8
-
-#define OCF_READ_COUNTRY_CODE 0x0007
-typedef struct {
- uint8_t status;
- uint8_t country_code;
-} QEMU_PACKED read_country_code_rp;
-#define READ_COUNTRY_CODE_RP_SIZE 2
-
-#define OCF_READ_BD_ADDR 0x0009
-typedef struct {
- uint8_t status;
- bdaddr_t bdaddr;
-} QEMU_PACKED read_bd_addr_rp;
-#define READ_BD_ADDR_RP_SIZE 7
-
-/* Status params */
-#define OGF_STATUS_PARAM 0x05
-
-#define OCF_READ_FAILED_CONTACT_COUNTER 0x0001
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t counter;
-} QEMU_PACKED read_failed_contact_counter_rp;
-#define READ_FAILED_CONTACT_COUNTER_RP_SIZE 4
-
-#define OCF_RESET_FAILED_CONTACT_COUNTER 0x0002
-typedef struct {
- uint8_t status;
- uint16_t handle;
-} QEMU_PACKED reset_failed_contact_counter_rp;
-#define RESET_FAILED_CONTACT_COUNTER_RP_SIZE 3
-
-#define OCF_READ_LINK_QUALITY 0x0003
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED read_link_quality_cp;
-#define READ_LINK_QUALITY_CP_SIZE 2
-
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t link_quality;
-} QEMU_PACKED read_link_quality_rp;
-#define READ_LINK_QUALITY_RP_SIZE 4
-
-#define OCF_READ_RSSI 0x0005
-typedef struct {
- uint8_t status;
- uint16_t handle;
- int8_t rssi;
-} QEMU_PACKED read_rssi_rp;
-#define READ_RSSI_RP_SIZE 4
-
-#define OCF_READ_AFH_MAP 0x0006
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t mode;
- uint8_t map[10];
-} QEMU_PACKED read_afh_map_rp;
-#define READ_AFH_MAP_RP_SIZE 14
-
-#define OCF_READ_CLOCK 0x0007
-typedef struct {
- uint16_t handle;
- uint8_t which_clock;
-} QEMU_PACKED read_clock_cp;
-#define READ_CLOCK_CP_SIZE 3
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint32_t clock;
- uint16_t accuracy;
-} QEMU_PACKED read_clock_rp;
-#define READ_CLOCK_RP_SIZE 9
-
-/* Testing commands */
-#define OGF_TESTING_CMD 0x3e
-
-/* Vendor specific commands */
-#define OGF_VENDOR_CMD 0x3f
-
-/* HCI Events */
-
-#define EVT_INQUIRY_COMPLETE 0x01
-
-#define EVT_INQUIRY_RESULT 0x02
-typedef struct {
- uint8_t num_responses;
- bdaddr_t bdaddr;
- uint8_t pscan_rep_mode;
- uint8_t pscan_period_mode;
- uint8_t pscan_mode;
- uint8_t dev_class[3];
- uint16_t clock_offset;
-} QEMU_PACKED inquiry_info;
-#define INQUIRY_INFO_SIZE 15
-
-#define EVT_CONN_COMPLETE 0x03
-typedef struct {
- uint8_t status;
- uint16_t handle;
- bdaddr_t bdaddr;
- uint8_t link_type;
- uint8_t encr_mode;
-} QEMU_PACKED evt_conn_complete;
-#define EVT_CONN_COMPLETE_SIZE 11
-
-#define EVT_CONN_REQUEST 0x04
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t dev_class[3];
- uint8_t link_type;
-} QEMU_PACKED evt_conn_request;
-#define EVT_CONN_REQUEST_SIZE 10
-
-#define EVT_DISCONN_COMPLETE 0x05
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t reason;
-} QEMU_PACKED evt_disconn_complete;
-#define EVT_DISCONN_COMPLETE_SIZE 4
-
-#define EVT_AUTH_COMPLETE 0x06
-typedef struct {
- uint8_t status;
- uint16_t handle;
-} QEMU_PACKED evt_auth_complete;
-#define EVT_AUTH_COMPLETE_SIZE 3
-
-#define EVT_REMOTE_NAME_REQ_COMPLETE 0x07
-typedef struct {
- uint8_t status;
- bdaddr_t bdaddr;
- char name[248];
-} QEMU_PACKED evt_remote_name_req_complete;
-#define EVT_REMOTE_NAME_REQ_COMPLETE_SIZE 255
-
-#define EVT_ENCRYPT_CHANGE 0x08
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t encrypt;
-} QEMU_PACKED evt_encrypt_change;
-#define EVT_ENCRYPT_CHANGE_SIZE 4
-
-#define EVT_CHANGE_CONN_LINK_KEY_COMPLETE 0x09
-typedef struct {
- uint8_t status;
- uint16_t handle;
-} QEMU_PACKED evt_change_conn_link_key_complete;
-#define EVT_CHANGE_CONN_LINK_KEY_COMPLETE_SIZE 3
-
-#define EVT_MASTER_LINK_KEY_COMPLETE 0x0A
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t key_flag;
-} QEMU_PACKED evt_master_link_key_complete;
-#define EVT_MASTER_LINK_KEY_COMPLETE_SIZE 4
-
-#define EVT_READ_REMOTE_FEATURES_COMPLETE 0x0B
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t features[8];
-} QEMU_PACKED evt_read_remote_features_complete;
-#define EVT_READ_REMOTE_FEATURES_COMPLETE_SIZE 11
-
-#define EVT_READ_REMOTE_VERSION_COMPLETE 0x0C
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t lmp_ver;
- uint16_t manufacturer;
- uint16_t lmp_subver;
-} QEMU_PACKED evt_read_remote_version_complete;
-#define EVT_READ_REMOTE_VERSION_COMPLETE_SIZE 8
-
-#define EVT_QOS_SETUP_COMPLETE 0x0D
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t flags; /* Reserved */
- hci_qos qos;
-} QEMU_PACKED evt_qos_setup_complete;
-#define EVT_QOS_SETUP_COMPLETE_SIZE (4 + HCI_QOS_CP_SIZE)
-
-#define EVT_CMD_COMPLETE 0x0E
-typedef struct {
- uint8_t ncmd;
- uint16_t opcode;
-} QEMU_PACKED evt_cmd_complete;
-#define EVT_CMD_COMPLETE_SIZE 3
-
-#define EVT_CMD_STATUS 0x0F
-typedef struct {
- uint8_t status;
- uint8_t ncmd;
- uint16_t opcode;
-} QEMU_PACKED evt_cmd_status;
-#define EVT_CMD_STATUS_SIZE 4
-
-#define EVT_HARDWARE_ERROR 0x10
-typedef struct {
- uint8_t code;
-} QEMU_PACKED evt_hardware_error;
-#define EVT_HARDWARE_ERROR_SIZE 1
-
-#define EVT_FLUSH_OCCURRED 0x11
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED evt_flush_occurred;
-#define EVT_FLUSH_OCCURRED_SIZE 2
-
-#define EVT_ROLE_CHANGE 0x12
-typedef struct {
- uint8_t status;
- bdaddr_t bdaddr;
- uint8_t role;
-} QEMU_PACKED evt_role_change;
-#define EVT_ROLE_CHANGE_SIZE 8
-
-#define EVT_NUM_COMP_PKTS 0x13
-typedef struct {
- uint8_t num_hndl;
- struct {
- uint16_t handle;
- uint16_t num_packets;
- } connection[0];
-} QEMU_PACKED evt_num_comp_pkts;
-#define EVT_NUM_COMP_PKTS_SIZE(num_hndl) (1 + 4 * (num_hndl))
-
-#define EVT_MODE_CHANGE 0x14
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t mode;
- uint16_t interval;
-} QEMU_PACKED evt_mode_change;
-#define EVT_MODE_CHANGE_SIZE 6
-
-#define EVT_RETURN_LINK_KEYS 0x15
-typedef struct {
- uint8_t num_keys;
- /* variable length part */
-} QEMU_PACKED evt_return_link_keys;
-#define EVT_RETURN_LINK_KEYS_SIZE 1
-
-#define EVT_PIN_CODE_REQ 0x16
-typedef struct {
- bdaddr_t bdaddr;
-} QEMU_PACKED evt_pin_code_req;
-#define EVT_PIN_CODE_REQ_SIZE 6
-
-#define EVT_LINK_KEY_REQ 0x17
-typedef struct {
- bdaddr_t bdaddr;
-} QEMU_PACKED evt_link_key_req;
-#define EVT_LINK_KEY_REQ_SIZE 6
-
-#define EVT_LINK_KEY_NOTIFY 0x18
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t link_key[16];
- uint8_t key_type;
-} QEMU_PACKED evt_link_key_notify;
-#define EVT_LINK_KEY_NOTIFY_SIZE 23
-
-#define EVT_LOOPBACK_COMMAND 0x19
-
-#define EVT_DATA_BUFFER_OVERFLOW 0x1A
-typedef struct {
- uint8_t link_type;
-} QEMU_PACKED evt_data_buffer_overflow;
-#define EVT_DATA_BUFFER_OVERFLOW_SIZE 1
-
-#define EVT_MAX_SLOTS_CHANGE 0x1B
-typedef struct {
- uint16_t handle;
- uint8_t max_slots;
-} QEMU_PACKED evt_max_slots_change;
-#define EVT_MAX_SLOTS_CHANGE_SIZE 3
-
-#define EVT_READ_CLOCK_OFFSET_COMPLETE 0x1C
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint16_t clock_offset;
-} QEMU_PACKED evt_read_clock_offset_complete;
-#define EVT_READ_CLOCK_OFFSET_COMPLETE_SIZE 5
-
-#define EVT_CONN_PTYPE_CHANGED 0x1D
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint16_t ptype;
-} QEMU_PACKED evt_conn_ptype_changed;
-#define EVT_CONN_PTYPE_CHANGED_SIZE 5
-
-#define EVT_QOS_VIOLATION 0x1E
-typedef struct {
- uint16_t handle;
-} QEMU_PACKED evt_qos_violation;
-#define EVT_QOS_VIOLATION_SIZE 2
-
-#define EVT_PSCAN_REP_MODE_CHANGE 0x20
-typedef struct {
- bdaddr_t bdaddr;
- uint8_t pscan_rep_mode;
-} QEMU_PACKED evt_pscan_rep_mode_change;
-#define EVT_PSCAN_REP_MODE_CHANGE_SIZE 7
-
-#define EVT_FLOW_SPEC_COMPLETE 0x21
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t flags;
- uint8_t direction;
- hci_qos qos;
-} QEMU_PACKED evt_flow_spec_complete;
-#define EVT_FLOW_SPEC_COMPLETE_SIZE (5 + HCI_QOS_CP_SIZE)
-
-#define EVT_INQUIRY_RESULT_WITH_RSSI 0x22
-typedef struct {
- uint8_t num_responses;
- bdaddr_t bdaddr;
- uint8_t pscan_rep_mode;
- uint8_t pscan_period_mode;
- uint8_t dev_class[3];
- uint16_t clock_offset;
- int8_t rssi;
-} QEMU_PACKED inquiry_info_with_rssi;
-#define INQUIRY_INFO_WITH_RSSI_SIZE 15
-typedef struct {
- uint8_t num_responses;
- bdaddr_t bdaddr;
- uint8_t pscan_rep_mode;
- uint8_t pscan_period_mode;
- uint8_t pscan_mode;
- uint8_t dev_class[3];
- uint16_t clock_offset;
- int8_t rssi;
-} QEMU_PACKED inquiry_info_with_rssi_and_pscan_mode;
-#define INQUIRY_INFO_WITH_RSSI_AND_PSCAN_MODE_SIZE 16
-
-#define EVT_READ_REMOTE_EXT_FEATURES_COMPLETE 0x23
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t page_num;
- uint8_t max_page_num;
- uint8_t features[8];
-} QEMU_PACKED evt_read_remote_ext_features_complete;
-#define EVT_READ_REMOTE_EXT_FEATURES_COMPLETE_SIZE 13
-
-#define EVT_SYNC_CONN_COMPLETE 0x2C
-typedef struct {
- uint8_t status;
- uint16_t handle;
- bdaddr_t bdaddr;
- uint8_t link_type;
- uint8_t trans_interval;
- uint8_t retrans_window;
- uint16_t rx_pkt_len;
- uint16_t tx_pkt_len;
- uint8_t air_mode;
-} QEMU_PACKED evt_sync_conn_complete;
-#define EVT_SYNC_CONN_COMPLETE_SIZE 17
-
-#define EVT_SYNC_CONN_CHANGED 0x2D
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint8_t trans_interval;
- uint8_t retrans_window;
- uint16_t rx_pkt_len;
- uint16_t tx_pkt_len;
-} QEMU_PACKED evt_sync_conn_changed;
-#define EVT_SYNC_CONN_CHANGED_SIZE 9
-
-#define EVT_SNIFF_SUBRATE 0x2E
-typedef struct {
- uint8_t status;
- uint16_t handle;
- uint16_t max_remote_latency;
- uint16_t max_local_latency;
- uint16_t min_remote_timeout;
- uint16_t min_local_timeout;
-} QEMU_PACKED evt_sniff_subrate;
-#define EVT_SNIFF_SUBRATE_SIZE 11
-
-#define EVT_TESTING 0xFE
-
-#define EVT_VENDOR 0xFF
-
-/* Command opcode pack/unpack */
-#define cmd_opcode_pack(ogf, ocf) (uint16_t)((ocf & 0x03ff)|(ogf << 10))
-#define cmd_opcode_ogf(op) (op >> 10)
-#define cmd_opcode_ocf(op) (op & 0x03ff)
-
-/* ACL handle and flags pack/unpack */
-#define acl_handle_pack(h, f) (uint16_t)(((h) & 0x0fff)|((f) << 12))
-#define acl_handle(h) ((h) & 0x0fff)
-#define acl_flags(h) ((h) >> 12)
-
-/* HCI Packet structures */
-#define HCI_COMMAND_HDR_SIZE 3
-#define HCI_EVENT_HDR_SIZE 2
-#define HCI_ACL_HDR_SIZE 4
-#define HCI_SCO_HDR_SIZE 3
-
-struct hci_command_hdr {
- uint16_t opcode; /* OCF & OGF */
- uint8_t plen;
-} QEMU_PACKED;
-
-struct hci_event_hdr {
- uint8_t evt;
- uint8_t plen;
-} QEMU_PACKED;
-
-struct hci_acl_hdr {
- uint16_t handle; /* Handle & Flags(PB, BC) */
- uint16_t dlen;
-} QEMU_PACKED;
-
-struct hci_sco_hdr {
- uint16_t handle;
- uint8_t dlen;
-} QEMU_PACKED;
-
-/* L2CAP layer defines */
-
-enum bt_l2cap_lm_bits {
- L2CAP_LM_MASTER = 1 << 0,
- L2CAP_LM_AUTH = 1 << 1,
- L2CAP_LM_ENCRYPT = 1 << 2,
- L2CAP_LM_TRUSTED = 1 << 3,
- L2CAP_LM_RELIABLE = 1 << 4,
- L2CAP_LM_SECURE = 1 << 5,
-};
-
-enum bt_l2cap_cid_predef {
- L2CAP_CID_INVALID = 0x0000,
- L2CAP_CID_SIGNALLING= 0x0001,
- L2CAP_CID_GROUP = 0x0002,
- L2CAP_CID_ALLOC = 0x0040,
-};
-
-/* L2CAP command codes */
-enum bt_l2cap_cmd {
- L2CAP_COMMAND_REJ = 1,
- L2CAP_CONN_REQ,
- L2CAP_CONN_RSP,
- L2CAP_CONF_REQ,
- L2CAP_CONF_RSP,
- L2CAP_DISCONN_REQ,
- L2CAP_DISCONN_RSP,
- L2CAP_ECHO_REQ,
- L2CAP_ECHO_RSP,
- L2CAP_INFO_REQ,
- L2CAP_INFO_RSP,
-};
-
-enum bt_l2cap_sar_bits {
- L2CAP_SAR_NO_SEG = 0,
- L2CAP_SAR_START,
- L2CAP_SAR_END,
- L2CAP_SAR_CONT,
-};
-
-/* L2CAP structures */
-typedef struct {
- uint16_t len;
- uint16_t cid;
- uint8_t data[0];
-} QEMU_PACKED l2cap_hdr;
-#define L2CAP_HDR_SIZE 4
-
-typedef struct {
- uint8_t code;
- uint8_t ident;
- uint16_t len;
-} QEMU_PACKED l2cap_cmd_hdr;
-#define L2CAP_CMD_HDR_SIZE 4
-
-typedef struct {
- uint16_t reason;
-} QEMU_PACKED l2cap_cmd_rej;
-#define L2CAP_CMD_REJ_SIZE 2
-
-typedef struct {
- uint16_t dcid;
- uint16_t scid;
-} QEMU_PACKED l2cap_cmd_rej_cid;
-#define L2CAP_CMD_REJ_CID_SIZE 4
-
-/* reject reason */
-enum bt_l2cap_rej_reason {
- L2CAP_REJ_CMD_NOT_UNDERSTOOD = 0,
- L2CAP_REJ_SIG_TOOBIG,
- L2CAP_REJ_CID_INVAL,
-};
-
-typedef struct {
- uint16_t psm;
- uint16_t scid;
-} QEMU_PACKED l2cap_conn_req;
-#define L2CAP_CONN_REQ_SIZE 4
-
-typedef struct {
- uint16_t dcid;
- uint16_t scid;
- uint16_t result;
- uint16_t status;
-} QEMU_PACKED l2cap_conn_rsp;
-#define L2CAP_CONN_RSP_SIZE 8
-
-/* connect result */
-enum bt_l2cap_conn_res {
- L2CAP_CR_SUCCESS = 0,
- L2CAP_CR_PEND,
- L2CAP_CR_BAD_PSM,
- L2CAP_CR_SEC_BLOCK,
- L2CAP_CR_NO_MEM,
-};
-
-/* connect status */
-enum bt_l2cap_conn_stat {
- L2CAP_CS_NO_INFO = 0,
- L2CAP_CS_AUTHEN_PEND,
- L2CAP_CS_AUTHOR_PEND,
-};
-
-typedef struct {
- uint16_t dcid;
- uint16_t flags;
- uint8_t data[0];
-} QEMU_PACKED l2cap_conf_req;
-#define L2CAP_CONF_REQ_SIZE(datalen) (4 + (datalen))
-
-typedef struct {
- uint16_t scid;
- uint16_t flags;
- uint16_t result;
- uint8_t data[0];
-} QEMU_PACKED l2cap_conf_rsp;
-#define L2CAP_CONF_RSP_SIZE(datalen) (6 + datalen)
-
-enum bt_l2cap_conf_res {
- L2CAP_CONF_SUCCESS = 0,
- L2CAP_CONF_UNACCEPT,
- L2CAP_CONF_REJECT,
- L2CAP_CONF_UNKNOWN,
-};
-
-typedef struct {
- uint8_t type;
- uint8_t len;
- uint8_t val[0];
-} QEMU_PACKED l2cap_conf_opt;
-#define L2CAP_CONF_OPT_SIZE 2
-
-enum bt_l2cap_conf_val {
- L2CAP_CONF_MTU = 1,
- L2CAP_CONF_FLUSH_TO,
- L2CAP_CONF_QOS,
- L2CAP_CONF_RFC,
- L2CAP_CONF_RFC_MODE = L2CAP_CONF_RFC,
-};
-
-typedef struct {
- uint8_t flags;
- uint8_t service_type;
- uint32_t token_rate;
- uint32_t token_bucket_size;
- uint32_t peak_bandwidth;
- uint32_t latency;
- uint32_t delay_variation;
-} QEMU_PACKED l2cap_conf_opt_qos;
-#define L2CAP_CONF_OPT_QOS_SIZE 22
-
-enum bt_l2cap_conf_opt_qos_st {
- L2CAP_CONF_QOS_NO_TRAFFIC = 0x00,
- L2CAP_CONF_QOS_BEST_EFFORT,
- L2CAP_CONF_QOS_GUARANTEED,
-};
-
-#define L2CAP_CONF_QOS_WILDCARD 0xffffffff
-
-enum bt_l2cap_mode {
- L2CAP_MODE_BASIC = 0,
- L2CAP_MODE_RETRANS = 1,
- L2CAP_MODE_FLOWCTL = 2,
-};
-
-typedef struct {
- uint16_t dcid;
- uint16_t scid;
-} QEMU_PACKED l2cap_disconn_req;
-#define L2CAP_DISCONN_REQ_SIZE 4
-
-typedef struct {
- uint16_t dcid;
- uint16_t scid;
-} QEMU_PACKED l2cap_disconn_rsp;
-#define L2CAP_DISCONN_RSP_SIZE 4
-
-typedef struct {
- uint16_t type;
-} QEMU_PACKED l2cap_info_req;
-#define L2CAP_INFO_REQ_SIZE 2
-
-typedef struct {
- uint16_t type;
- uint16_t result;
- uint8_t data[0];
-} QEMU_PACKED l2cap_info_rsp;
-#define L2CAP_INFO_RSP_SIZE 4
-
-/* info type */
-enum bt_l2cap_info_type {
- L2CAP_IT_CL_MTU = 1,
- L2CAP_IT_FEAT_MASK,
-};
-
-/* info result */
-enum bt_l2cap_info_result {
- L2CAP_IR_SUCCESS = 0,
- L2CAP_IR_NOTSUPP,
-};
-
-/* Service Discovery Protocol defines */
-/* Note that all multibyte values in lower layer protocols (above in this file)
- * are little-endian while SDP is big-endian. */
-
-/* Protocol UUIDs */
-enum sdp_proto_uuid {
- SDP_UUID = 0x0001,
- UDP_UUID = 0x0002,
- RFCOMM_UUID = 0x0003,
- TCP_UUID = 0x0004,
- TCS_BIN_UUID = 0x0005,
- TCS_AT_UUID = 0x0006,
- OBEX_UUID = 0x0008,
- IP_UUID = 0x0009,
- FTP_UUID = 0x000a,
- HTTP_UUID = 0x000c,
- WSP_UUID = 0x000e,
- BNEP_UUID = 0x000f,
- UPNP_UUID = 0x0010,
- HIDP_UUID = 0x0011,
- HCRP_CTRL_UUID = 0x0012,
- HCRP_DATA_UUID = 0x0014,
- HCRP_NOTE_UUID = 0x0016,
- AVCTP_UUID = 0x0017,
- AVDTP_UUID = 0x0019,
- CMTP_UUID = 0x001b,
- UDI_UUID = 0x001d,
- MCAP_CTRL_UUID = 0x001e,
- MCAP_DATA_UUID = 0x001f,
- L2CAP_UUID = 0x0100,
-};
-
-/*
- * Service class identifiers of standard services and service groups
- */
-enum service_class_id {
- SDP_SERVER_SVCLASS_ID = 0x1000,
- BROWSE_GRP_DESC_SVCLASS_ID = 0x1001,
- PUBLIC_BROWSE_GROUP = 0x1002,
- SERIAL_PORT_SVCLASS_ID = 0x1101,
- LAN_ACCESS_SVCLASS_ID = 0x1102,
- DIALUP_NET_SVCLASS_ID = 0x1103,
- IRMC_SYNC_SVCLASS_ID = 0x1104,
- OBEX_OBJPUSH_SVCLASS_ID = 0x1105,
- OBEX_FILETRANS_SVCLASS_ID = 0x1106,
- IRMC_SYNC_CMD_SVCLASS_ID = 0x1107,
- HEADSET_SVCLASS_ID = 0x1108,
- CORDLESS_TELEPHONY_SVCLASS_ID = 0x1109,
- AUDIO_SOURCE_SVCLASS_ID = 0x110a,
- AUDIO_SINK_SVCLASS_ID = 0x110b,
- AV_REMOTE_TARGET_SVCLASS_ID = 0x110c,
- ADVANCED_AUDIO_SVCLASS_ID = 0x110d,
- AV_REMOTE_SVCLASS_ID = 0x110e,
- VIDEO_CONF_SVCLASS_ID = 0x110f,
- INTERCOM_SVCLASS_ID = 0x1110,
- FAX_SVCLASS_ID = 0x1111,
- HEADSET_AGW_SVCLASS_ID = 0x1112,
- WAP_SVCLASS_ID = 0x1113,
- WAP_CLIENT_SVCLASS_ID = 0x1114,
- PANU_SVCLASS_ID = 0x1115,
- NAP_SVCLASS_ID = 0x1116,
- GN_SVCLASS_ID = 0x1117,
- DIRECT_PRINTING_SVCLASS_ID = 0x1118,
- REFERENCE_PRINTING_SVCLASS_ID = 0x1119,
- IMAGING_SVCLASS_ID = 0x111a,
- IMAGING_RESPONDER_SVCLASS_ID = 0x111b,
- IMAGING_ARCHIVE_SVCLASS_ID = 0x111c,
- IMAGING_REFOBJS_SVCLASS_ID = 0x111d,
- HANDSFREE_SVCLASS_ID = 0x111e,
- HANDSFREE_AGW_SVCLASS_ID = 0x111f,
- DIRECT_PRT_REFOBJS_SVCLASS_ID = 0x1120,
- REFLECTED_UI_SVCLASS_ID = 0x1121,
- BASIC_PRINTING_SVCLASS_ID = 0x1122,
- PRINTING_STATUS_SVCLASS_ID = 0x1123,
- HID_SVCLASS_ID = 0x1124,
- HCR_SVCLASS_ID = 0x1125,
- HCR_PRINT_SVCLASS_ID = 0x1126,
- HCR_SCAN_SVCLASS_ID = 0x1127,
- CIP_SVCLASS_ID = 0x1128,
- VIDEO_CONF_GW_SVCLASS_ID = 0x1129,
- UDI_MT_SVCLASS_ID = 0x112a,
- UDI_TA_SVCLASS_ID = 0x112b,
- AV_SVCLASS_ID = 0x112c,
- SAP_SVCLASS_ID = 0x112d,
- PBAP_PCE_SVCLASS_ID = 0x112e,
- PBAP_PSE_SVCLASS_ID = 0x112f,
- PBAP_SVCLASS_ID = 0x1130,
- PNP_INFO_SVCLASS_ID = 0x1200,
- GENERIC_NETWORKING_SVCLASS_ID = 0x1201,
- GENERIC_FILETRANS_SVCLASS_ID = 0x1202,
- GENERIC_AUDIO_SVCLASS_ID = 0x1203,
- GENERIC_TELEPHONY_SVCLASS_ID = 0x1204,
- UPNP_SVCLASS_ID = 0x1205,
- UPNP_IP_SVCLASS_ID = 0x1206,
- UPNP_PAN_SVCLASS_ID = 0x1300,
- UPNP_LAP_SVCLASS_ID = 0x1301,
- UPNP_L2CAP_SVCLASS_ID = 0x1302,
- VIDEO_SOURCE_SVCLASS_ID = 0x1303,
- VIDEO_SINK_SVCLASS_ID = 0x1304,
- VIDEO_DISTRIBUTION_SVCLASS_ID = 0x1305,
- MDP_SVCLASS_ID = 0x1400,
- MDP_SOURCE_SVCLASS_ID = 0x1401,
- MDP_SINK_SVCLASS_ID = 0x1402,
- APPLE_AGENT_SVCLASS_ID = 0x2112,
-};
-
-/*
- * Standard profile descriptor identifiers; note these
- * may be identical to some of the service classes defined above
- */
-#define SDP_SERVER_PROFILE_ID SDP_SERVER_SVCLASS_ID
-#define BROWSE_GRP_DESC_PROFILE_ID BROWSE_GRP_DESC_SVCLASS_ID
-#define SERIAL_PORT_PROFILE_ID SERIAL_PORT_SVCLASS_ID
-#define LAN_ACCESS_PROFILE_ID LAN_ACCESS_SVCLASS_ID
-#define DIALUP_NET_PROFILE_ID DIALUP_NET_SVCLASS_ID
-#define IRMC_SYNC_PROFILE_ID IRMC_SYNC_SVCLASS_ID
-#define OBEX_OBJPUSH_PROFILE_ID OBEX_OBJPUSH_SVCLASS_ID
-#define OBEX_FILETRANS_PROFILE_ID OBEX_FILETRANS_SVCLASS_ID
-#define IRMC_SYNC_CMD_PROFILE_ID IRMC_SYNC_CMD_SVCLASS_ID
-#define HEADSET_PROFILE_ID HEADSET_SVCLASS_ID
-#define CORDLESS_TELEPHONY_PROFILE_ID CORDLESS_TELEPHONY_SVCLASS_ID
-#define AUDIO_SOURCE_PROFILE_ID AUDIO_SOURCE_SVCLASS_ID
-#define AUDIO_SINK_PROFILE_ID AUDIO_SINK_SVCLASS_ID
-#define AV_REMOTE_TARGET_PROFILE_ID AV_REMOTE_TARGET_SVCLASS_ID
-#define ADVANCED_AUDIO_PROFILE_ID ADVANCED_AUDIO_SVCLASS_ID
-#define AV_REMOTE_PROFILE_ID AV_REMOTE_SVCLASS_ID
-#define VIDEO_CONF_PROFILE_ID VIDEO_CONF_SVCLASS_ID
-#define INTERCOM_PROFILE_ID INTERCOM_SVCLASS_ID
-#define FAX_PROFILE_ID FAX_SVCLASS_ID
-#define HEADSET_AGW_PROFILE_ID HEADSET_AGW_SVCLASS_ID
-#define WAP_PROFILE_ID WAP_SVCLASS_ID
-#define WAP_CLIENT_PROFILE_ID WAP_CLIENT_SVCLASS_ID
-#define PANU_PROFILE_ID PANU_SVCLASS_ID
-#define NAP_PROFILE_ID NAP_SVCLASS_ID
-#define GN_PROFILE_ID GN_SVCLASS_ID
-#define DIRECT_PRINTING_PROFILE_ID DIRECT_PRINTING_SVCLASS_ID
-#define REFERENCE_PRINTING_PROFILE_ID REFERENCE_PRINTING_SVCLASS_ID
-#define IMAGING_PROFILE_ID IMAGING_SVCLASS_ID
-#define IMAGING_RESPONDER_PROFILE_ID IMAGING_RESPONDER_SVCLASS_ID
-#define IMAGING_ARCHIVE_PROFILE_ID IMAGING_ARCHIVE_SVCLASS_ID
-#define IMAGING_REFOBJS_PROFILE_ID IMAGING_REFOBJS_SVCLASS_ID
-#define HANDSFREE_PROFILE_ID HANDSFREE_SVCLASS_ID
-#define HANDSFREE_AGW_PROFILE_ID HANDSFREE_AGW_SVCLASS_ID
-#define DIRECT_PRT_REFOBJS_PROFILE_ID DIRECT_PRT_REFOBJS_SVCLASS_ID
-#define REFLECTED_UI_PROFILE_ID REFLECTED_UI_SVCLASS_ID
-#define BASIC_PRINTING_PROFILE_ID BASIC_PRINTING_SVCLASS_ID
-#define PRINTING_STATUS_PROFILE_ID PRINTING_STATUS_SVCLASS_ID
-#define HID_PROFILE_ID HID_SVCLASS_ID
-#define HCR_PROFILE_ID HCR_SCAN_SVCLASS_ID
-#define HCR_PRINT_PROFILE_ID HCR_PRINT_SVCLASS_ID
-#define HCR_SCAN_PROFILE_ID HCR_SCAN_SVCLASS_ID
-#define CIP_PROFILE_ID CIP_SVCLASS_ID
-#define VIDEO_CONF_GW_PROFILE_ID VIDEO_CONF_GW_SVCLASS_ID
-#define UDI_MT_PROFILE_ID UDI_MT_SVCLASS_ID
-#define UDI_TA_PROFILE_ID UDI_TA_SVCLASS_ID
-#define AV_PROFILE_ID AV_SVCLASS_ID
-#define SAP_PROFILE_ID SAP_SVCLASS_ID
-#define PBAP_PCE_PROFILE_ID PBAP_PCE_SVCLASS_ID
-#define PBAP_PSE_PROFILE_ID PBAP_PSE_SVCLASS_ID
-#define PBAP_PROFILE_ID PBAP_SVCLASS_ID
-#define PNP_INFO_PROFILE_ID PNP_INFO_SVCLASS_ID
-#define GENERIC_NETWORKING_PROFILE_ID GENERIC_NETWORKING_SVCLASS_ID
-#define GENERIC_FILETRANS_PROFILE_ID GENERIC_FILETRANS_SVCLASS_ID
-#define GENERIC_AUDIO_PROFILE_ID GENERIC_AUDIO_SVCLASS_ID
-#define GENERIC_TELEPHONY_PROFILE_ID GENERIC_TELEPHONY_SVCLASS_ID
-#define UPNP_PROFILE_ID UPNP_SVCLASS_ID
-#define UPNP_IP_PROFILE_ID UPNP_IP_SVCLASS_ID
-#define UPNP_PAN_PROFILE_ID UPNP_PAN_SVCLASS_ID
-#define UPNP_LAP_PROFILE_ID UPNP_LAP_SVCLASS_ID
-#define UPNP_L2CAP_PROFILE_ID UPNP_L2CAP_SVCLASS_ID
-#define VIDEO_SOURCE_PROFILE_ID VIDEO_SOURCE_SVCLASS_ID
-#define VIDEO_SINK_PROFILE_ID VIDEO_SINK_SVCLASS_ID
-#define VIDEO_DISTRIBUTION_PROFILE_ID VIDEO_DISTRIBUTION_SVCLASS_ID
-#define MDP_PROFILE_ID MDP_SVCLASS_ID
-#define MDP_SOURCE_PROFILE_ID MDP_SROUCE_SVCLASS_ID
-#define MDP_SINK_PROFILE_ID MDP_SINK_SVCLASS_ID
-#define APPLE_AGENT_PROFILE_ID APPLE_AGENT_SVCLASS_ID
-
-/* Data Representation */
-enum bt_sdp_data_type {
- SDP_DTYPE_NIL = 0 << 3,
- SDP_DTYPE_UINT = 1 << 3,
- SDP_DTYPE_SINT = 2 << 3,
- SDP_DTYPE_UUID = 3 << 3,
- SDP_DTYPE_STRING = 4 << 3,
- SDP_DTYPE_BOOL = 5 << 3,
- SDP_DTYPE_SEQ = 6 << 3,
- SDP_DTYPE_ALT = 7 << 3,
- SDP_DTYPE_URL = 8 << 3,
-};
-
-enum bt_sdp_data_size {
- SDP_DSIZE_1 = 0,
- SDP_DSIZE_2,
- SDP_DSIZE_4,
- SDP_DSIZE_8,
- SDP_DSIZE_16,
- SDP_DSIZE_NEXT1,
- SDP_DSIZE_NEXT2,
- SDP_DSIZE_NEXT4,
- SDP_DSIZE_MASK = SDP_DSIZE_NEXT4,
-};
-
-enum bt_sdp_cmd {
- SDP_ERROR_RSP = 0x01,
- SDP_SVC_SEARCH_REQ = 0x02,
- SDP_SVC_SEARCH_RSP = 0x03,
- SDP_SVC_ATTR_REQ = 0x04,
- SDP_SVC_ATTR_RSP = 0x05,
- SDP_SVC_SEARCH_ATTR_REQ = 0x06,
- SDP_SVC_SEARCH_ATTR_RSP = 0x07,
-};
-
-enum bt_sdp_errorcode {
- SDP_INVALID_VERSION = 0x0001,
- SDP_INVALID_RECORD_HANDLE = 0x0002,
- SDP_INVALID_SYNTAX = 0x0003,
- SDP_INVALID_PDU_SIZE = 0x0004,
- SDP_INVALID_CSTATE = 0x0005,
-};
-
-/*
- * String identifiers are based on the SDP spec stating that
- * "base attribute id of the primary (universal) language must be 0x0100"
- *
- * Other languages should have their own offset; e.g.:
- * #define XXXLangBase yyyy
- * #define AttrServiceName_XXX 0x0000+XXXLangBase
- */
-#define SDP_PRIMARY_LANG_BASE 0x0100
-
-enum bt_sdp_attribute_id {
- SDP_ATTR_RECORD_HANDLE = 0x0000,
- SDP_ATTR_SVCLASS_ID_LIST = 0x0001,
- SDP_ATTR_RECORD_STATE = 0x0002,
- SDP_ATTR_SERVICE_ID = 0x0003,
- SDP_ATTR_PROTO_DESC_LIST = 0x0004,
- SDP_ATTR_BROWSE_GRP_LIST = 0x0005,
- SDP_ATTR_LANG_BASE_ATTR_ID_LIST = 0x0006,
- SDP_ATTR_SVCINFO_TTL = 0x0007,
- SDP_ATTR_SERVICE_AVAILABILITY = 0x0008,
- SDP_ATTR_PFILE_DESC_LIST = 0x0009,
- SDP_ATTR_DOC_URL = 0x000a,
- SDP_ATTR_CLNT_EXEC_URL = 0x000b,
- SDP_ATTR_ICON_URL = 0x000c,
- SDP_ATTR_ADD_PROTO_DESC_LIST = 0x000d,
-
- SDP_ATTR_SVCNAME_PRIMARY = SDP_PRIMARY_LANG_BASE + 0,
- SDP_ATTR_SVCDESC_PRIMARY = SDP_PRIMARY_LANG_BASE + 1,
- SDP_ATTR_SVCPROV_PRIMARY = SDP_PRIMARY_LANG_BASE + 2,
-
- SDP_ATTR_GROUP_ID = 0x0200,
- SDP_ATTR_IP_SUBNET = 0x0200,
-
- /* SDP */
- SDP_ATTR_VERSION_NUM_LIST = 0x0200,
- SDP_ATTR_SVCDB_STATE = 0x0201,
-
- SDP_ATTR_SERVICE_VERSION = 0x0300,
- SDP_ATTR_EXTERNAL_NETWORK = 0x0301,
- SDP_ATTR_SUPPORTED_DATA_STORES_LIST = 0x0301,
- SDP_ATTR_FAX_CLASS1_SUPPORT = 0x0302,
- SDP_ATTR_REMOTE_AUDIO_VOLUME_CONTROL = 0x0302,
- SDP_ATTR_FAX_CLASS20_SUPPORT = 0x0303,
- SDP_ATTR_SUPPORTED_FORMATS_LIST = 0x0303,
- SDP_ATTR_FAX_CLASS2_SUPPORT = 0x0304,
- SDP_ATTR_AUDIO_FEEDBACK_SUPPORT = 0x0305,
- SDP_ATTR_NETWORK_ADDRESS = 0x0306,
- SDP_ATTR_WAP_GATEWAY = 0x0307,
- SDP_ATTR_HOMEPAGE_URL = 0x0308,
- SDP_ATTR_WAP_STACK_TYPE = 0x0309,
- SDP_ATTR_SECURITY_DESC = 0x030a,
- SDP_ATTR_NET_ACCESS_TYPE = 0x030b,
- SDP_ATTR_MAX_NET_ACCESSRATE = 0x030c,
- SDP_ATTR_IP4_SUBNET = 0x030d,
- SDP_ATTR_IP6_SUBNET = 0x030e,
- SDP_ATTR_SUPPORTED_CAPABILITIES = 0x0310,
- SDP_ATTR_SUPPORTED_FEATURES = 0x0311,
- SDP_ATTR_SUPPORTED_FUNCTIONS = 0x0312,
- SDP_ATTR_TOTAL_IMAGING_DATA_CAPACITY = 0x0313,
- SDP_ATTR_SUPPORTED_REPOSITORIES = 0x0314,
-
- /* PnP Information */
- SDP_ATTR_SPECIFICATION_ID = 0x0200,
- SDP_ATTR_VENDOR_ID = 0x0201,
- SDP_ATTR_PRODUCT_ID = 0x0202,
- SDP_ATTR_VERSION = 0x0203,
- SDP_ATTR_PRIMARY_RECORD = 0x0204,
- SDP_ATTR_VENDOR_ID_SOURCE = 0x0205,
-
- /* BT HID */
- SDP_ATTR_DEVICE_RELEASE_NUMBER = 0x0200,
- SDP_ATTR_PARSER_VERSION = 0x0201,
- SDP_ATTR_DEVICE_SUBCLASS = 0x0202,
- SDP_ATTR_COUNTRY_CODE = 0x0203,
- SDP_ATTR_VIRTUAL_CABLE = 0x0204,
- SDP_ATTR_RECONNECT_INITIATE = 0x0205,
- SDP_ATTR_DESCRIPTOR_LIST = 0x0206,
- SDP_ATTR_LANG_ID_BASE_LIST = 0x0207,
- SDP_ATTR_SDP_DISABLE = 0x0208,
- SDP_ATTR_BATTERY_POWER = 0x0209,
- SDP_ATTR_REMOTE_WAKEUP = 0x020a,
- SDP_ATTR_PROFILE_VERSION = 0x020b,
- SDP_ATTR_SUPERVISION_TIMEOUT = 0x020c,
- SDP_ATTR_NORMALLY_CONNECTABLE = 0x020d,
- SDP_ATTR_BOOT_DEVICE = 0x020e,
-};
-
-#endif
diff --git a/include/hw/char/avr_usart.h b/include/hw/char/avr_usart.h
new file mode 100644
index 0000000000..0cc599e9b1
--- /dev/null
+++ b/include/hw/char/avr_usart.h
@@ -0,0 +1,92 @@
+/*
+ * AVR USART
+ *
+ * Copyright (c) 2018 University of Kent
+ * Author: Sarah Harris
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef HW_CHAR_AVR_USART_H
+#define HW_CHAR_AVR_USART_H
+
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+#include "qom/object.h"
+
+/* Offsets of registers. */
+#define USART_DR 0x06
+#define USART_CSRA 0x00
+#define USART_CSRB 0x01
+#define USART_CSRC 0x02
+#define USART_BRRH 0x05
+#define USART_BRRL 0x04
+
+/* Relevant bits in registers. */
+#define USART_CSRA_RXC (1 << 7)
+#define USART_CSRA_TXC (1 << 6)
+#define USART_CSRA_DRE (1 << 5)
+#define USART_CSRA_MPCM (1 << 0)
+
+#define USART_CSRB_RXCIE (1 << 7)
+#define USART_CSRB_TXCIE (1 << 6)
+#define USART_CSRB_DREIE (1 << 5)
+#define USART_CSRB_RXEN (1 << 4)
+#define USART_CSRB_TXEN (1 << 3)
+#define USART_CSRB_CSZ2 (1 << 2)
+#define USART_CSRB_RXB8 (1 << 1)
+#define USART_CSRB_TXB8 (1 << 0)
+
+#define USART_CSRC_MSEL1 (1 << 7)
+#define USART_CSRC_MSEL0 (1 << 6)
+#define USART_CSRC_PM1 (1 << 5)
+#define USART_CSRC_PM0 (1 << 4)
+#define USART_CSRC_CSZ1 (1 << 2)
+#define USART_CSRC_CSZ0 (1 << 1)
+
+#define TYPE_AVR_USART "avr-usart"
+OBJECT_DECLARE_SIMPLE_TYPE(AVRUsartState, AVR_USART)
+
+struct AVRUsartState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+
+ CharBackend chr;
+
+ bool enabled;
+
+ uint8_t data;
+ bool data_valid;
+ uint8_t char_mask;
+ /* Control and Status Registers */
+ uint8_t csra;
+ uint8_t csrb;
+ uint8_t csrc;
+ /* Baud Rate Registers (low/high byte) */
+ uint8_t brrh;
+ uint8_t brrl;
+
+ /* Receive Complete */
+ qemu_irq rxc_irq;
+ /* Transmit Complete */
+ qemu_irq txc_irq;
+ /* Data Register Empty */
+ qemu_irq dre_irq;
+};
+
+#endif /* HW_CHAR_AVR_USART_H */
diff --git a/include/hw/char/bcm2835_aux.h b/include/hw/char/bcm2835_aux.h
index cdbf7e3e37..9e081793a0 100644
--- a/include/hw/char/bcm2835_aux.h
+++ b/include/hw/char/bcm2835_aux.h
@@ -2,7 +2,8 @@
* Rasperry Pi 2 emulation and refactoring Copyright (c) 2015, Microsoft
* Written by Andrew Baumann
*
- * This code is licensed under the GNU GPLv2 and later.
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2835_AUX_H
@@ -10,13 +11,14 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
#define TYPE_BCM2835_AUX "bcm2835-aux"
-#define BCM2835_AUX(obj) OBJECT_CHECK(BCM2835AuxState, (obj), TYPE_BCM2835_AUX)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835AuxState, BCM2835_AUX)
#define BCM2835_AUX_RX_FIFO_LEN 8
-typedef struct {
+struct BCM2835AuxState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -28,6 +30,6 @@ typedef struct {
uint8_t read_fifo[BCM2835_AUX_RX_FIFO_LEN];
uint8_t read_pos, read_count;
uint8_t ier, iir;
-} BCM2835AuxState;
+};
#endif
diff --git a/include/hw/char/cadence_uart.h b/include/hw/char/cadence_uart.h
index 118e3f10de..e7f7cd8468 100644
--- a/include/hw/char/cadence_uart.h
+++ b/include/hw/char/cadence_uart.h
@@ -17,10 +17,14 @@
*/
#ifndef CADENCE_UART_H
+#define CADENCE_UART_H
+#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qapi/error.h"
#include "qemu/timer.h"
+#include "qom/object.h"
#define CADENCE_UART_RX_FIFO_SIZE 16
#define CADENCE_UART_TX_FIFO_SIZE 16
@@ -28,10 +32,9 @@
#define CADENCE_UART_R_MAX (0x48/4)
#define TYPE_CADENCE_UART "cadence_uart"
-#define CADENCE_UART(obj) OBJECT_CHECK(CadenceUARTState, (obj), \
- TYPE_CADENCE_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(CadenceUARTState, CADENCE_UART)
-typedef struct {
+struct CadenceUARTState {
/*< private >*/
SysBusDevice parent_obj;
@@ -47,24 +50,7 @@ typedef struct {
CharBackend chr;
qemu_irq irq;
QEMUTimer *fifo_trigger_handle;
-} CadenceUARTState;
-
-static inline DeviceState *cadence_uart_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_create(NULL, TYPE_CADENCE_UART);
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- qdev_init_nofail(dev);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
+ Clock *refclk;
+};
- return dev;
-}
-
-#define CADENCE_UART_H
#endif
diff --git a/include/hw/char/cmsdk-apb-uart.h b/include/hw/char/cmsdk-apb-uart.h
index c41fba9a27..7de8f8d1b9 100644
--- a/include/hw/char/cmsdk-apb-uart.h
+++ b/include/hw/char/cmsdk-apb-uart.h
@@ -14,12 +14,12 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
#define TYPE_CMSDK_APB_UART "cmsdk-apb-uart"
-#define CMSDK_APB_UART(obj) OBJECT_CHECK(CMSDKAPBUART, (obj), \
- TYPE_CMSDK_APB_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(CMSDKAPBUART, CMSDK_APB_UART)
-typedef struct {
+struct CMSDKAPBUART {
/*< private >*/
SysBusDevice parent_obj;
@@ -41,38 +41,6 @@ typedef struct {
/* This UART has no FIFO, only a 1-character buffer for each of Tx and Rx */
uint8_t txbuf;
uint8_t rxbuf;
-} CMSDKAPBUART;
-
-/**
- * cmsdk_apb_uart_create - convenience function to create TYPE_CMSDK_APB_UART
- * @addr: location in system memory to map registers
- * @chr: Chardev backend to connect UART to, or NULL if no backend
- * @pclk_frq: frequency in Hz of the PCLK clock (used for calculating baud rate)
- */
-static inline DeviceState *cmsdk_apb_uart_create(hwaddr addr,
- qemu_irq txint,
- qemu_irq rxint,
- qemu_irq txovrint,
- qemu_irq rxovrint,
- qemu_irq uartint,
- Chardev *chr,
- uint32_t pclk_frq)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_create(NULL, TYPE_CMSDK_APB_UART);
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- qdev_prop_set_uint32(dev, "pclk-frq", pclk_frq);
- qdev_init_nofail(dev);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, txint);
- sysbus_connect_irq(s, 1, rxint);
- sysbus_connect_irq(s, 2, txovrint);
- sysbus_connect_irq(s, 3, rxovrint);
- sysbus_connect_irq(s, 4, uartint);
- return dev;
-}
+};
#endif
diff --git a/include/hw/char/digic-uart.h b/include/hw/char/digic-uart.h
index de9a3e3551..f710a1a099 100644
--- a/include/hw/char/digic-uart.h
+++ b/include/hw/char/digic-uart.h
@@ -20,10 +20,10 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
#define TYPE_DIGIC_UART "digic-uart"
-#define DIGIC_UART(obj) \
- OBJECT_CHECK(DigicUartState, (obj), TYPE_DIGIC_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(DigicUartState, DIGIC_UART)
enum {
R_TX = 0x00,
@@ -32,7 +32,7 @@ enum {
R_MAX
};
-typedef struct DigicUartState {
+struct DigicUartState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -42,6 +42,6 @@ typedef struct DigicUartState {
uint32_t reg_rx;
uint32_t reg_st;
-} DigicUartState;
+};
#endif /* HW_CHAR_DIGIC_UART_H */
diff --git a/include/hw/char/escc.h b/include/hw/char/escc.h
index 42aca83611..5669a5b811 100644
--- a/include/hw/char/escc.h
+++ b/include/hw/char/escc.h
@@ -3,13 +3,15 @@
#include "chardev/char-fe.h"
#include "chardev/char-serial.h"
+#include "hw/sysbus.h"
#include "ui/input.h"
+#include "qom/object.h"
/* escc.c */
#define TYPE_ESCC "escc"
#define ESCC_SIZE 4
-#define ESCC(obj) OBJECT_CHECK(ESCCState, (obj), TYPE_ESCC)
+OBJECT_DECLARE_SIMPLE_TYPE(ESCCState, ESCC)
typedef enum {
escc_chn_a, escc_chn_b,
@@ -43,16 +45,18 @@ typedef struct ESCCChannelState {
ESCCChnType type;
uint8_t rx, tx;
QemuInputHandlerState *hs;
+ char *sunkbd_layout;
} ESCCChannelState;
-typedef struct ESCCState {
+struct ESCCState {
SysBusDevice parent_obj;
struct ESCCChannelState chn[2];
uint32_t it_shift;
+ bool bit_swap;
MemoryRegion mmio;
uint32_t disabled;
uint32_t frequency;
-} ESCCState;
+};
#endif
diff --git a/include/hw/char/goldfish_tty.h b/include/hw/char/goldfish_tty.h
new file mode 100644
index 0000000000..d59733e5ae
--- /dev/null
+++ b/include/hw/char/goldfish_tty.h
@@ -0,0 +1,36 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Goldfish TTY
+ *
+ * (c) 2020 Laurent Vivier <laurent@vivier.eu>
+ *
+ */
+
+#ifndef HW_CHAR_GOLDFISH_TTY_H
+#define HW_CHAR_GOLDFISH_TTY_H
+
+#include "qemu/fifo8.h"
+#include "chardev/char-fe.h"
+#include "hw/sysbus.h"
+
+#define TYPE_GOLDFISH_TTY "goldfish_tty"
+OBJECT_DECLARE_SIMPLE_TYPE(GoldfishTTYState, GOLDFISH_TTY)
+
+#define GOLFISH_TTY_BUFFER_SIZE 128
+
+struct GoldfishTTYState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+ CharBackend chr;
+
+ uint32_t data_len;
+ uint64_t data_ptr;
+ bool int_enabled;
+
+ Fifo8 rx_fifo;
+};
+
+#endif
diff --git a/include/hw/char/grlib_uart.h b/include/hw/char/grlib_uart.h
new file mode 100644
index 0000000000..7496f8fd5e
--- /dev/null
+++ b/include/hw/char/grlib_uart.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU GRLIB UART
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2024 AdaCore
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef GRLIB_UART_H
+#define GRLIB_UART_H
+
+#define TYPE_GRLIB_APB_UART "grlib-apbuart"
+
+#endif
diff --git a/include/hw/char/ibex_uart.h b/include/hw/char/ibex_uart.h
new file mode 100644
index 0000000000..9deadf223b
--- /dev/null
+++ b/include/hw/char/ibex_uart.h
@@ -0,0 +1,73 @@
+/*
+ * QEMU lowRISC Ibex UART device
+ *
+ * Copyright (c) 2020 Western Digital
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_IBEX_UART_H
+#define HW_IBEX_UART_H
+
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+
+#define IBEX_UART_TX_FIFO_SIZE 16
+#define IBEX_UART_CLOCK 50000000 /* 50MHz clock */
+
+#define TYPE_IBEX_UART "ibex-uart"
+OBJECT_DECLARE_SIMPLE_TYPE(IbexUartState, IBEX_UART)
+
+struct IbexUartState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+
+ uint8_t tx_fifo[IBEX_UART_TX_FIFO_SIZE];
+ uint32_t tx_level;
+
+ uint32_t rx_level;
+
+ QEMUTimer *fifo_trigger_handle;
+ uint64_t char_tx_time;
+
+ uint32_t uart_intr_state;
+ uint32_t uart_intr_enable;
+ uint32_t uart_ctrl;
+ uint32_t uart_status;
+ uint32_t uart_rdata;
+ uint32_t uart_fifo_ctrl;
+ uint32_t uart_fifo_status;
+ uint32_t uart_ovrd;
+ uint32_t uart_val;
+ uint32_t uart_timeout_ctrl;
+
+ Clock *f_clk;
+
+ CharBackend chr;
+ qemu_irq tx_watermark;
+ qemu_irq rx_watermark;
+ qemu_irq tx_empty;
+ qemu_irq rx_overflow;
+};
+#endif /* HW_IBEX_UART_H */
diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h
index c8b74284f8..65f0e97c76 100644
--- a/include/hw/char/imx_serial.h
+++ b/include/hw/char/imx_serial.h
@@ -20,12 +20,17 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
+#include "qemu/fifo32.h"
#define TYPE_IMX_SERIAL "imx.serial"
-#define IMX_SERIAL(obj) OBJECT_CHECK(IMXSerialState, (obj), TYPE_IMX_SERIAL)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXSerialState, IMX_SERIAL)
+
+#define FIFO_SIZE 32
#define URXD_CHARRDY (1<<15) /* character read is valid */
#define URXD_ERR (1<<14) /* Character has error */
+#define URXD_OVRRUN (1<<13) /* 32nd character in RX FIFO */
#define URXD_FRMERR (1<<12) /* Character has frame error */
#define URXD_BRK (1<<11) /* Break received */
@@ -64,25 +69,40 @@
#define UCR1_TXMPTYEN (1<<6) /* Tx Empty Interrupt Enable */
#define UCR1_UARTEN (1<<0) /* UART Enable */
+#define UCR2_ATEN (1<<3) /* Ageing Timer Enable */
#define UCR2_TXEN (1<<2) /* Transmitter enable */
#define UCR2_RXEN (1<<1) /* Receiver enable */
#define UCR2_SRST (1<<0) /* Reset complete */
#define UCR4_DREN BIT(0) /* Receive Data Ready interrupt enable */
+#define UCR4_OREN BIT(1) /* Overrun interrupt enable */
#define UCR4_TCEN BIT(3) /* TX complete interrupt enable */
+#define UCR4_WKEN BIT(7) /* WAKE interrupt enable */
#define UTS1_TXEMPTY (1<<6)
#define UTS1_RXEMPTY (1<<5)
#define UTS1_TXFULL (1<<4)
#define UTS1_RXFULL (1<<3)
-typedef struct IMXSerialState {
+#define TL_MASK 0x3f
+
+ /* Bit time in nanoseconds assuming maximum baud rate of 115200 */
+#define BIT_TIME_NS 8681
+
+/* Assume 8 bits per character */
+#define NUM_BITS 8
+
+/* Ageing timer triggers after 8 characters */
+#define AGE_DURATION_NS (8 * NUM_BITS * BIT_TIME_NS)
+
+struct IMXSerialState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
- int32_t readbuff;
+ QEMUTimer ageing_timer;
+ Fifo32 rx_fifo;
uint32_t usr1;
uint32_t usr2;
@@ -103,6 +123,6 @@ typedef struct IMXSerialState {
qemu_irq irq;
CharBackend chr;
-} IMXSerialState;
+};
#endif
diff --git a/include/hw/char/lm32_juart.h b/include/hw/char/lm32_juart.h
deleted file mode 100644
index e7c6fb5a3b..0000000000
--- a/include/hw/char/lm32_juart.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef QEMU_HW_CHAR_LM32_JUART_H
-#define QEMU_HW_CHAR_LM32_JUART_H
-
-#include "hw/qdev.h"
-
-#define TYPE_LM32_JUART "lm32-juart"
-
-uint32_t lm32_juart_get_jtx(DeviceState *d);
-uint32_t lm32_juart_get_jrx(DeviceState *d);
-void lm32_juart_set_jtx(DeviceState *d, uint32_t jtx);
-void lm32_juart_set_jrx(DeviceState *d, uint32_t jrx);
-
-#endif /* QEMU_HW_CHAR_LM32_JUART_H */
diff --git a/include/hw/char/mchp_pfsoc_mmuart.h b/include/hw/char/mchp_pfsoc_mmuart.h
new file mode 100644
index 0000000000..b0e14ca355
--- /dev/null
+++ b/include/hw/char/mchp_pfsoc_mmuart.h
@@ -0,0 +1,68 @@
+/*
+ * Microchip PolarFire SoC MMUART emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_MCHP_PFSOC_MMUART_H
+#define HW_MCHP_PFSOC_MMUART_H
+
+#include "hw/sysbus.h"
+#include "hw/char/serial.h"
+
+#define MCHP_PFSOC_MMUART_REG_COUNT 13
+
+#define TYPE_MCHP_PFSOC_UART "mchp.pfsoc.uart"
+OBJECT_DECLARE_SIMPLE_TYPE(MchpPfSoCMMUartState, MCHP_PFSOC_UART)
+
+typedef struct MchpPfSoCMMUartState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion container;
+ MemoryRegion iomem;
+
+ SerialMM serial_mm;
+
+ uint32_t reg[MCHP_PFSOC_MMUART_REG_COUNT];
+} MchpPfSoCMMUartState;
+
+/**
+ * mchp_pfsoc_mmuart_create - Create a Microchip PolarFire SoC MMUART
+ *
+ * This is a helper routine for board to create a MMUART device that is
+ * compatible with Microchip PolarFire SoC.
+ *
+ * @sysmem: system memory region to map
+ * @base: base address of the MMUART registers
+ * @irq: IRQ number of the MMUART device
+ * @chr: character device to associate to
+ *
+ * @return: a pointer to the device specific control structure
+ */
+MchpPfSoCMMUartState *mchp_pfsoc_mmuart_create(MemoryRegion *sysmem,
+ hwaddr base, qemu_irq irq, Chardev *chr);
+
+#endif /* HW_MCHP_PFSOC_MMUART_H */
diff --git a/include/hw/char/nrf51_uart.h b/include/hw/char/nrf51_uart.h
index eb1c15b490..561b6383c4 100644
--- a/include/hw/char/nrf51_uart.h
+++ b/include/hw/char/nrf51_uart.h
@@ -14,12 +14,13 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
#include "hw/registerfields.h"
+#include "qom/object.h"
#define UART_FIFO_LENGTH 6
#define UART_SIZE 0x1000
#define TYPE_NRF51_UART "nrf51_soc.uart"
-#define NRF51_UART(obj) OBJECT_CHECK(NRF51UARTState, (obj), TYPE_NRF51_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51UARTState, NRF51_UART)
REG32(UART_STARTRX, 0x000)
REG32(UART_STOPRX, 0x004)
@@ -54,7 +55,7 @@ REG32(UART_TXD, 0x51C)
REG32(UART_BAUDRATE, 0x524)
REG32(UART_CONFIG, 0x56C)
-typedef struct NRF51UARTState {
+struct NRF51UARTState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -72,6 +73,6 @@ typedef struct NRF51UARTState {
bool tx_started;
bool pending_tx_byte;
bool enabled;
-} NRF51UARTState;
+};
#endif
diff --git a/include/hw/char/parallel-isa.h b/include/hw/char/parallel-isa.h
new file mode 100644
index 0000000000..5284b2ffec
--- /dev/null
+++ b/include/hw/char/parallel-isa.h
@@ -0,0 +1,35 @@
+/*
+ * QEMU ISA Parallel PORT emulation
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2007 Marko Kohtala
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef HW_PARALLEL_ISA_H
+#define HW_PARALLEL_ISA_H
+
+#include "parallel.h"
+
+#include "exec/ioport.h"
+#include "hw/isa/isa.h"
+#include "qom/object.h"
+
+#define TYPE_ISA_PARALLEL "isa-parallel"
+OBJECT_DECLARE_SIMPLE_TYPE(ISAParallelState, ISA_PARALLEL)
+
+struct ISAParallelState {
+ ISADevice parent_obj;
+
+ uint32_t index;
+ uint32_t iobase;
+ uint32_t isairq;
+ ParallelState state;
+ PortioList portio_list;
+};
+
+void isa_parallel_set_iobase(ISADevice *parallel, hwaddr iobase);
+void isa_parallel_set_enabled(ISADevice *parallel, bool enabled);
+
+#endif /* HW_PARALLEL_ISA_H */
diff --git a/include/hw/char/parallel.h b/include/hw/char/parallel.h
index d6dd62fb9f..cfb97cc7cc 100644
--- a/include/hw/char/parallel.h
+++ b/include/hw/char/parallel.h
@@ -3,8 +3,26 @@
#include "exec/memory.h"
#include "hw/isa/isa.h"
+#include "hw/irq.h"
+#include "chardev/char-fe.h"
#include "chardev/char.h"
+typedef struct ParallelState {
+ MemoryRegion iomem;
+ uint8_t dataw;
+ uint8_t datar;
+ uint8_t status;
+ uint8_t control;
+ qemu_irq irq;
+ int irq_pending;
+ CharBackend chr;
+ int hw_driver;
+ int epp_timeout;
+ uint32_t last_read_offset; /* For debugging */
+ /* Memory-mapped interface */
+ int it_shift;
+} ParallelState;
+
void parallel_hds_isa_init(ISABus *bus, int n);
bool parallel_mm_init(MemoryRegion *address_space,
diff --git a/include/hw/char/pl011.h b/include/hw/char/pl011.h
index 83649324b6..d853802132 100644
--- a/include/hw/char/pl011.h
+++ b/include/hw/char/pl011.h
@@ -15,38 +15,46 @@
#ifndef HW_PL011_H
#define HW_PL011_H
-static inline DeviceState *pl011_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_create(NULL, "pl011");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- qdev_init_nofail(dev);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
-
-static inline DeviceState *pl011_luminary_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_create(NULL, "pl011_luminary");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- qdev_init_nofail(dev);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+#include "qom/object.h"
+
+#define TYPE_PL011 "pl011"
+OBJECT_DECLARE_SIMPLE_TYPE(PL011State, PL011)
+
+/* This shares the same struct (and cast macro) as the base pl011 device */
+#define TYPE_PL011_LUMINARY "pl011_luminary"
+
+/* Depth of UART FIFO in bytes, when FIFO mode is enabled (else depth == 1) */
+#define PL011_FIFO_DEPTH 16
+
+struct PL011State {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ uint32_t readbuff;
+ uint32_t flags;
+ uint32_t lcr;
+ uint32_t rsr;
+ uint32_t cr;
+ uint32_t dmacr;
+ uint32_t int_enabled;
+ uint32_t int_level;
+ uint32_t read_fifo[PL011_FIFO_DEPTH];
+ uint32_t ilpr;
+ uint32_t ibrd;
+ uint32_t fbrd;
+ uint32_t ifl;
+ int read_pos;
+ int read_count;
+ int read_trigger;
+ CharBackend chr;
+ qemu_irq irq[6];
+ Clock *clk;
+ bool migrate_clk;
+ const unsigned char *id;
+};
+
+DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr);
#endif
diff --git a/include/hw/char/renesas_sci.h b/include/hw/char/renesas_sci.h
new file mode 100644
index 0000000000..a4764e3eee
--- /dev/null
+++ b/include/hw/char/renesas_sci.h
@@ -0,0 +1,54 @@
+/*
+ * Renesas Serial Communication Interface
+ *
+ * Copyright (c) 2018 Yoshinori Sato
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_CHAR_RENESAS_SCI_H
+#define HW_CHAR_RENESAS_SCI_H
+
+#include "chardev/char-fe.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_RENESAS_SCI "renesas-sci"
+typedef struct RSCIState RSCIState;
+DECLARE_INSTANCE_CHECKER(RSCIState, RSCI,
+ TYPE_RENESAS_SCI)
+
+enum {
+ ERI = 0,
+ RXI = 1,
+ TXI = 2,
+ TEI = 3,
+ SCI_NR_IRQ = 4
+};
+
+struct RSCIState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion memory;
+ QEMUTimer timer;
+ CharBackend chr;
+ qemu_irq irq[SCI_NR_IRQ];
+
+ uint8_t smr;
+ uint8_t brr;
+ uint8_t scr;
+ uint8_t tdr;
+ uint8_t ssr;
+ uint8_t rdr;
+ uint8_t scmr;
+ uint8_t semr;
+
+ uint8_t read_ssr;
+ int64_t trtime;
+ int64_t rx_next;
+ uint64_t input_freq;
+};
+
+#endif
diff --git a/include/hw/riscv/riscv_htif.h b/include/hw/char/riscv_htif.h
index fb5f88129e..df493fdf6b 100644
--- a/include/hw/riscv/riscv_htif.h
+++ b/include/hw/char/riscv_htif.h
@@ -20,12 +20,9 @@
#ifndef HW_RISCV_HTIF_H
#define HW_RISCV_HTIF_H
-#include "hw/hw.h"
#include "chardev/char.h"
#include "chardev/char-fe.h"
-#include "sysemu/sysemu.h"
#include "exec/memory.h"
-#include "target/riscv/cpu.h"
#define TYPE_HTIF_UART "riscv.htif.uart"
@@ -33,29 +30,25 @@ typedef struct HTIFState {
int allow_tohost;
int fromhost_inprogress;
+ uint64_t tohost;
+ uint64_t fromhost;
hwaddr tohost_offset;
hwaddr fromhost_offset;
- uint64_t tohost_size;
- uint64_t fromhost_size;
MemoryRegion mmio;
- MemoryRegion *address_space;
- MemoryRegion *main_mem;
- void *main_mem_ram_ptr;
- CPURISCVState *env;
CharBackend chr;
uint64_t pending_read;
} HTIFState;
-extern const VMStateDescription vmstate_htif;
-extern const MemoryRegionOps htif_io_ops;
+extern const char *sig_file;
+extern uint8_t line_size;
/* HTIF symbol callback */
void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value,
uint64_t st_size);
/* legacy pre qom */
-HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem,
- CPURISCVState *env, Chardev *chr);
+HTIFState *htif_mm_init(MemoryRegion *address_space, Chardev *chr,
+ uint64_t nonelf_base, bool custom_base);
#endif
diff --git a/include/hw/char/serial.h b/include/hw/char/serial.h
index 0acfbbc382..6e14099ee7 100644
--- a/include/hw/char/serial.h
+++ b/include/hw/char/serial.h
@@ -26,16 +26,18 @@
#ifndef HW_SERIAL_H
#define HW_SERIAL_H
-#include "hw/hw.h"
-#include "sysemu/sysemu.h"
#include "chardev/char-fe.h"
#include "exec/memory.h"
#include "qemu/fifo8.h"
#include "chardev/char.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
#define UART_FIFO_LENGTH 16 /* 16550A Fifo Length */
struct SerialState {
+ DeviceState parent;
+
uint16_t divider;
uint8_t rbr; /* receive register */
uint8_t thr; /* transmit holding register */
@@ -56,11 +58,10 @@ struct SerialState {
qemu_irq irq;
CharBackend chr;
int last_break_enable;
- int it_shift;
- int baudbase;
+ uint32_t baudbase;
uint32_t tsr_retry;
guint watch_tag;
- uint32_t wakeup;
+ bool wakeup;
/* Time when the last byte was successfully sent out of the tsr */
uint64_t last_xmit_ts;
@@ -78,21 +79,32 @@ struct SerialState {
QEMUTimer *modem_status_poll;
MemoryRegion io;
};
+typedef struct SerialState SerialState;
+
+struct SerialMM {
+ SysBusDevice parent;
+
+ SerialState serial;
+
+ uint8_t regshift;
+ uint8_t endianness;
+};
extern const VMStateDescription vmstate_serial;
extern const MemoryRegionOps serial_io_ops;
-void serial_realize_core(SerialState *s, Error **errp);
-void serial_exit_core(SerialState *s);
void serial_set_frequency(SerialState *s, uint32_t frequency);
-/* legacy pre qom */
-SerialState *serial_init(int base, qemu_irq irq, int baudbase,
- Chardev *chr, MemoryRegion *system_io);
-SerialState *serial_mm_init(MemoryRegion *address_space,
- hwaddr base, int it_shift,
- qemu_irq irq, int baudbase,
- Chardev *chr, enum device_endian end);
+#define TYPE_SERIAL "serial"
+OBJECT_DECLARE_SIMPLE_TYPE(SerialState, SERIAL)
+
+#define TYPE_SERIAL_MM "serial-mm"
+OBJECT_DECLARE_SIMPLE_TYPE(SerialMM, SERIAL_MM)
+
+SerialMM *serial_mm_init(MemoryRegion *address_space,
+ hwaddr base, int regshift,
+ qemu_irq irq, int baudbase,
+ Chardev *chr, enum device_endian end);
/* serial-isa.c */
@@ -100,5 +112,7 @@ SerialState *serial_mm_init(MemoryRegion *address_space,
#define TYPE_ISA_SERIAL "isa-serial"
void serial_hds_isa_init(ISABus *bus, int from, int to);
+void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase);
+void isa_serial_set_enabled(ISADevice *serial, bool enabled);
#endif
diff --git a/include/hw/char/shakti_uart.h b/include/hw/char/shakti_uart.h
new file mode 100644
index 0000000000..526c408233
--- /dev/null
+++ b/include/hw/char/shakti_uart.h
@@ -0,0 +1,74 @@
+/*
+ * SHAKTI UART
+ *
+ * Copyright (c) 2021 Vijai Kumar K <vijai@behindbytes.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_SHAKTI_UART_H
+#define HW_SHAKTI_UART_H
+
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+
+#define SHAKTI_UART_BAUD 0x00
+#define SHAKTI_UART_TX 0x04
+#define SHAKTI_UART_RX 0x08
+#define SHAKTI_UART_STATUS 0x0C
+#define SHAKTI_UART_DELAY 0x10
+#define SHAKTI_UART_CONTROL 0x14
+#define SHAKTI_UART_INT_EN 0x18
+#define SHAKTI_UART_IQ_CYCLES 0x1C
+#define SHAKTI_UART_RX_THRES 0x20
+
+#define SHAKTI_UART_STATUS_TX_EMPTY (1 << 0)
+#define SHAKTI_UART_STATUS_TX_FULL (1 << 1)
+#define SHAKTI_UART_STATUS_RX_NOT_EMPTY (1 << 2)
+#define SHAKTI_UART_STATUS_RX_FULL (1 << 3)
+/* 9600 8N1 is the default setting */
+/* Reg value = (50000000 Hz)/(16 * 9600)*/
+#define SHAKTI_UART_BAUD_DEFAULT 0x0145
+#define SHAKTI_UART_CONTROL_DEFAULT 0x0100
+
+#define TYPE_SHAKTI_UART "shakti-uart"
+#define SHAKTI_UART(obj) \
+ OBJECT_CHECK(ShaktiUartState, (obj), TYPE_SHAKTI_UART)
+
+typedef struct {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+
+ uint32_t uart_baud;
+ uint32_t uart_tx;
+ uint32_t uart_rx;
+ uint32_t uart_status;
+ uint32_t uart_delay;
+ uint32_t uart_control;
+ uint32_t uart_interrupt;
+ uint32_t uart_iq_cycles;
+ uint32_t uart_rx_threshold;
+
+ CharBackend chr;
+} ShaktiUartState;
+
+#endif /* HW_SHAKTI_UART_H */
diff --git a/include/hw/riscv/sifive_uart.h b/include/hw/char/sifive_uart.h
index c8dc1c57fd..7f6c79f8bd 100644
--- a/include/hw/riscv/sifive_uart.h
+++ b/include/hw/char/sifive_uart.h
@@ -20,6 +20,11 @@
#ifndef HW_SIFIVE_UART_H
#define HW_SIFIVE_UART_H
+#include "chardev/char-fe.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
enum {
SIFIVE_UART_TXFIFO = 0,
SIFIVE_UART_RXFIFO = 4,
@@ -45,13 +50,12 @@ enum {
#define SIFIVE_UART_GET_TXCNT(txctrl) ((txctrl >> 16) & 0x7)
#define SIFIVE_UART_GET_RXCNT(rxctrl) ((rxctrl >> 16) & 0x7)
+#define SIFIVE_UART_RX_FIFO_SIZE 8
#define TYPE_SIFIVE_UART "riscv.sifive.uart"
+OBJECT_DECLARE_SIMPLE_TYPE(SiFiveUARTState, SIFIVE_UART)
-#define SIFIVE_UART(obj) \
- OBJECT_CHECK(SiFiveUARTState, (obj), TYPE_SIFIVE_UART)
-
-typedef struct SiFiveUARTState {
+struct SiFiveUARTState {
/*< private >*/
SysBusDevice parent_obj;
@@ -59,14 +63,14 @@ typedef struct SiFiveUARTState {
qemu_irq irq;
MemoryRegion mmio;
CharBackend chr;
- uint8_t rx_fifo[8];
- unsigned int rx_fifo_len;
+ uint8_t rx_fifo[SIFIVE_UART_RX_FIFO_SIZE];
+ uint8_t rx_fifo_len;
uint32_t ie;
uint32_t ip;
uint32_t txctrl;
uint32_t rxctrl;
uint32_t div;
-} SiFiveUARTState;
+};
SiFiveUARTState *sifive_uart_create(MemoryRegion *address_space, hwaddr base,
Chardev *chr, qemu_irq irq);
diff --git a/include/hw/char/stm32f2xx_usart.h b/include/hw/char/stm32f2xx_usart.h
index 84c4029777..fdfa7424a7 100644
--- a/include/hw/char/stm32f2xx_usart.h
+++ b/include/hw/char/stm32f2xx_usart.h
@@ -27,7 +27,7 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
-#include "hw/hw.h"
+#include "qom/object.h"
#define USART_SR 0x00
#define USART_DR 0x04
@@ -48,16 +48,17 @@
#define USART_SR_TC (1 << 6)
#define USART_SR_RXNE (1 << 5)
-#define USART_CR1_UE (1 << 13)
-#define USART_CR1_RXNEIE (1 << 5)
-#define USART_CR1_TE (1 << 3)
-#define USART_CR1_RE (1 << 2)
+#define USART_CR1_UE (1 << 13)
+#define USART_CR1_TXEIE (1 << 7)
+#define USART_CR1_TCEIE (1 << 6)
+#define USART_CR1_RXNEIE (1 << 5)
+#define USART_CR1_TE (1 << 3)
+#define USART_CR1_RE (1 << 2)
#define TYPE_STM32F2XX_USART "stm32f2xx-usart"
-#define STM32F2XX_USART(obj) \
- OBJECT_CHECK(STM32F2XXUsartState, (obj), TYPE_STM32F2XX_USART)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F2XXUsartState, STM32F2XX_USART)
-typedef struct {
+struct STM32F2XXUsartState {
/* <private> */
SysBusDevice parent_obj;
@@ -74,5 +75,5 @@ typedef struct {
CharBackend chr;
qemu_irq irq;
-} STM32F2XXUsartState;
+};
#endif /* HW_STM32F2XX_USART_H */
diff --git a/include/hw/char/stm32l4x5_usart.h b/include/hw/char/stm32l4x5_usart.h
new file mode 100644
index 0000000000..dd3866682a
--- /dev/null
+++ b/include/hw/char/stm32l4x5_usart.h
@@ -0,0 +1,67 @@
+/*
+ * STM32L4X5 USART (Universal Synchronous Asynchronous Receiver Transmitter)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * The STM32L4X5 USART is heavily inspired by the stm32f2xx_usart
+ * by Alistair Francis.
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ */
+
+#ifndef HW_STM32L4X5_USART_H
+#define HW_STM32L4X5_USART_H
+
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_USART_BASE "stm32l4x5-usart-base"
+#define TYPE_STM32L4X5_USART "stm32l4x5-usart"
+#define TYPE_STM32L4X5_UART "stm32l4x5-uart"
+#define TYPE_STM32L4X5_LPUART "stm32l4x5-lpuart"
+OBJECT_DECLARE_TYPE(Stm32l4x5UsartBaseState, Stm32l4x5UsartBaseClass,
+ STM32L4X5_USART_BASE)
+
+typedef enum {
+ STM32L4x5_USART,
+ STM32L4x5_UART,
+ STM32L4x5_LPUART,
+} Stm32l4x5UsartType;
+
+struct Stm32l4x5UsartBaseState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t cr1;
+ uint32_t cr2;
+ uint32_t cr3;
+ uint32_t brr;
+ uint32_t gtpr;
+ uint32_t rtor;
+ /* rqr is write-only */
+ uint32_t isr;
+ /* icr is a clear register */
+ uint32_t rdr;
+ uint32_t tdr;
+
+ Clock *clk;
+ CharBackend chr;
+ qemu_irq irq;
+ guint watch_tag;
+};
+
+struct Stm32l4x5UsartBaseClass {
+ SysBusDeviceClass parent_class;
+
+ Stm32l4x5UsartType type;
+};
+
+#endif /* HW_STM32L4X5_USART_H */
diff --git a/include/hw/char/xilinx_uartlite.h b/include/hw/char/xilinx_uartlite.h
index 634086b657..36d4e8444d 100644
--- a/include/hw/char/xilinx_uartlite.h
+++ b/include/hw/char/xilinx_uartlite.h
@@ -15,21 +15,9 @@
#ifndef XILINX_UARTLITE_H
#define XILINX_UARTLITE_H
-static inline DeviceState *xilinx_uartlite_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
+#include "qom/object.h"
- dev = qdev_create(NULL, "xlnx.xps-uartlite");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- qdev_init_nofail(dev);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
+#define TYPE_XILINX_UARTLITE "xlnx.xps-uartlite"
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxUARTLite, XILINX_UARTLITE)
#endif
diff --git a/include/hw/clock.h b/include/hw/clock.h
new file mode 100644
index 0000000000..eb58599131
--- /dev/null
+++ b/include/hw/clock.h
@@ -0,0 +1,381 @@
+/*
+ * Hardware Clocks
+ *
+ * Copyright GreenSocs 2016-2020
+ *
+ * Authors:
+ * Frederic Konrad
+ * Damien Hedde
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HW_CLOCK_H
+#define QEMU_HW_CLOCK_H
+
+#include "qom/object.h"
+#include "qemu/queue.h"
+#include "qemu/host-utils.h"
+#include "qemu/bitops.h"
+
+#define TYPE_CLOCK "clock"
+OBJECT_DECLARE_SIMPLE_TYPE(Clock, CLOCK)
+
+/*
+ * Argument to ClockCallback functions indicating why the callback
+ * has been called. A mask of these values logically ORed together
+ * is used to specify which events are interesting when the callback
+ * is registered, so these values must all be different bit values.
+ */
+typedef enum ClockEvent {
+ ClockUpdate = 1, /* Clock period has just updated */
+ ClockPreUpdate = 2, /* Clock period is about to update */
+} ClockEvent;
+
+typedef void ClockCallback(void *opaque, ClockEvent event);
+
+/*
+ * clock store a value representing the clock's period in 2^-32ns unit.
+ * It can represent:
+ * + periods from 2^-32ns up to 4seconds
+ * + frequency from ~0.25Hz 2e10Ghz
+ * Resolution of frequency representation decreases with frequency:
+ * + at 100MHz, resolution is ~2mHz
+ * + at 1Ghz, resolution is ~0.2Hz
+ * + at 10Ghz, resolution is ~20Hz
+ */
+#define CLOCK_PERIOD_1SEC (1000000000llu << 32)
+
+/*
+ * macro helpers to convert to hertz / nanosecond
+ */
+#define CLOCK_PERIOD_FROM_NS(ns) ((ns) * (CLOCK_PERIOD_1SEC / 1000000000llu))
+#define CLOCK_PERIOD_FROM_HZ(hz) (((hz) != 0) ? CLOCK_PERIOD_1SEC / (hz) : 0u)
+#define CLOCK_PERIOD_TO_HZ(per) (((per) != 0) ? CLOCK_PERIOD_1SEC / (per) : 0u)
+
+/**
+ * Clock:
+ * @parent_obj: parent class
+ * @period: unsigned integer representing the period of the clock
+ * @canonical_path: clock path string cache (used for trace purpose)
+ * @callback: called when clock changes
+ * @callback_opaque: argument for @callback
+ * @callback_events: mask of events when callback should be called
+ * @source: source (or parent in clock tree) of the clock
+ * @children: list of clocks connected to this one (it is their source)
+ * @sibling: structure used to form a clock list
+ */
+
+
+struct Clock {
+ /*< private >*/
+ Object parent_obj;
+
+ /* all fields are private and should not be modified directly */
+
+ /* fields */
+ uint64_t period;
+ char *canonical_path;
+ ClockCallback *callback;
+ void *callback_opaque;
+ unsigned int callback_events;
+
+ /* Ratio of the parent clock to run the child clocks at */
+ uint32_t multiplier;
+ uint32_t divider;
+
+ /* Clocks are organized in a clock tree */
+ Clock *source;
+ QLIST_HEAD(, Clock) children;
+ QLIST_ENTRY(Clock) sibling;
+};
+
+/*
+ * vmstate description entry to be added in device vmsd.
+ */
+extern const VMStateDescription vmstate_clock;
+#define VMSTATE_CLOCK(field, state) \
+ VMSTATE_CLOCK_V(field, state, 0)
+#define VMSTATE_CLOCK_V(field, state, version) \
+ VMSTATE_STRUCT_POINTER_V(field, state, version, vmstate_clock, Clock)
+#define VMSTATE_ARRAY_CLOCK(field, state, num) \
+ VMSTATE_ARRAY_CLOCK_V(field, state, num, 0)
+#define VMSTATE_ARRAY_CLOCK_V(field, state, num, version) \
+ VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(field, state, num, version, \
+ vmstate_clock, Clock)
+
+/**
+ * clock_setup_canonical_path:
+ * @clk: clock
+ *
+ * compute the canonical path of the clock (used by log messages)
+ */
+void clock_setup_canonical_path(Clock *clk);
+
+/**
+ * clock_new:
+ * @parent: the clock parent
+ * @name: the clock object name
+ *
+ * Helper function to create a new clock and parent it to @parent. There is no
+ * need to call clock_setup_canonical_path on the returned clock as it is done
+ * by this function.
+ *
+ * @return the newly created clock
+ */
+Clock *clock_new(Object *parent, const char *name);
+
+/**
+ * clock_set_callback:
+ * @clk: the clock to register the callback into
+ * @cb: the callback function
+ * @opaque: the argument to the callback
+ * @events: the events the callback should be called for
+ * (logical OR of ClockEvent enum values)
+ *
+ * Register a callback called on every clock update.
+ * Note that a clock has only one callback: you cannot register
+ * different callback functions for different events.
+ */
+void clock_set_callback(Clock *clk, ClockCallback *cb,
+ void *opaque, unsigned int events);
+
+/**
+ * clock_clear_callback:
+ * @clk: the clock to delete the callback from
+ *
+ * Unregister the callback registered with clock_set_callback.
+ */
+void clock_clear_callback(Clock *clk);
+
+/**
+ * clock_set_source:
+ * @clk: the clock.
+ * @src: the source clock
+ *
+ * Setup @src as the clock source of @clk. The current @src period
+ * value is also copied to @clk and its subtree but no callback is
+ * called.
+ * Further @src update will be propagated to @clk and its subtree.
+ */
+void clock_set_source(Clock *clk, Clock *src);
+
+/**
+ * clock_has_source:
+ * @clk: the clock
+ *
+ * Returns true if the clock has a source clock connected to it.
+ * This is useful for devices which have input clocks which must
+ * be connected by the board/SoC code which creates them. The
+ * device code can use this to check in its realize method that
+ * the clock has been connected.
+ */
+static inline bool clock_has_source(const Clock *clk)
+{
+ return clk->source != NULL;
+}
+
+/**
+ * clock_set:
+ * @clk: the clock to initialize.
+ * @value: the clock's value, 0 means unclocked
+ *
+ * Set the local cached period value of @clk to @value.
+ *
+ * @return: true if the clock is changed.
+ */
+bool clock_set(Clock *clk, uint64_t value);
+
+static inline bool clock_set_hz(Clock *clk, unsigned hz)
+{
+ return clock_set(clk, CLOCK_PERIOD_FROM_HZ(hz));
+}
+
+static inline bool clock_set_ns(Clock *clk, unsigned ns)
+{
+ return clock_set(clk, CLOCK_PERIOD_FROM_NS(ns));
+}
+
+/**
+ * clock_propagate:
+ * @clk: the clock
+ *
+ * Propagate the clock period that has been previously configured using
+ * @clock_set(). This will update recursively all connected clocks.
+ * It is an error to call this function on a clock which has a source.
+ * Note: this function must not be called during device initialization
+ * or migration.
+ */
+void clock_propagate(Clock *clk);
+
+/**
+ * clock_update:
+ * @clk: the clock to update.
+ * @value: the new clock's value, 0 means unclocked
+ *
+ * Update the @clk to the new @value. All connected clocks will be informed
+ * of this update. This is equivalent to call @clock_set() then
+ * @clock_propagate().
+ */
+static inline void clock_update(Clock *clk, uint64_t value)
+{
+ if (clock_set(clk, value)) {
+ clock_propagate(clk);
+ }
+}
+
+static inline void clock_update_hz(Clock *clk, unsigned hz)
+{
+ clock_update(clk, CLOCK_PERIOD_FROM_HZ(hz));
+}
+
+static inline void clock_update_ns(Clock *clk, unsigned ns)
+{
+ clock_update(clk, CLOCK_PERIOD_FROM_NS(ns));
+}
+
+/**
+ * clock_get:
+ * @clk: the clk to fetch the clock
+ *
+ * @return: the current period.
+ */
+static inline uint64_t clock_get(const Clock *clk)
+{
+ return clk->period;
+}
+
+static inline unsigned clock_get_hz(Clock *clk)
+{
+ return CLOCK_PERIOD_TO_HZ(clock_get(clk));
+}
+
+/**
+ * clock_ticks_to_ns:
+ * @clk: the clock to query
+ * @ticks: number of ticks
+ *
+ * Returns the length of time in nanoseconds for this clock
+ * to tick @ticks times. Because a clock can have a period
+ * which is not a whole number of nanoseconds, it is important
+ * to use this function when calculating things like timer
+ * expiry deadlines, rather than attempting to obtain a "period
+ * in nanoseconds" value and then multiplying that by a number
+ * of ticks.
+ *
+ * The result could in theory be too large to fit in a 64-bit
+ * value if the number of ticks and the clock period are both
+ * large; to avoid overflow the result will be saturated to INT64_MAX
+ * (because this is the largest valid input to the QEMUTimer APIs).
+ * Since INT64_MAX nanoseconds is almost 300 years, anything with
+ * an expiry later than that is in the "will never happen" category
+ * and callers can reasonably not special-case the saturated result.
+ */
+static inline uint64_t clock_ticks_to_ns(const Clock *clk, uint64_t ticks)
+{
+ uint64_t ns_low, ns_high;
+
+ /*
+ * clk->period is the period in units of 2^-32 ns, so
+ * (clk->period * ticks) is the required length of time in those
+ * units, and we can convert to nanoseconds by multiplying by
+ * 2^32, which is the same as shifting the 128-bit multiplication
+ * result right by 32.
+ */
+ mulu64(&ns_low, &ns_high, clk->period, ticks);
+ if (ns_high & MAKE_64BIT_MASK(31, 33)) {
+ return INT64_MAX;
+ }
+ return ns_low >> 32 | ns_high << 32;
+}
+
+/**
+ * clock_ns_to_ticks:
+ * @clk: the clock to query
+ * @ns: duration in nanoseconds
+ *
+ * Returns the number of ticks this clock would make in the given
+ * number of nanoseconds. Because a clock can have a period which
+ * is not a whole number of nanoseconds, it is important to use this
+ * function rather than attempting to obtain a "period in nanoseconds"
+ * value and then dividing the duration by that value.
+ *
+ * If the clock is stopped (ie it has period zero), returns 0.
+ *
+ * For some inputs the result could overflow a 64-bit value (because
+ * the clock's period is short and the duration is long). In these
+ * cases we truncate the result to a 64-bit value. This is on the
+ * assumption that generally the result is going to be used to report
+ * a 32-bit or 64-bit guest register value, so wrapping either cannot
+ * happen or is the desired behaviour.
+ */
+static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
+{
+ /*
+ * ticks = duration_in_ns / period_in_ns
+ * = ns / (period / 2^32)
+ * = (ns * 2^32) / period
+ * The hi, lo inputs to divu128() are (ns << 32) as a 128 bit value.
+ */
+ uint64_t lo = ns << 32;
+ uint64_t hi = ns >> 32;
+ if (clk->period == 0) {
+ return 0;
+ }
+
+ divu128(&lo, &hi, clk->period);
+ return lo;
+}
+
+/**
+ * clock_is_enabled:
+ * @clk: a clock
+ *
+ * @return: true if the clock is running.
+ */
+static inline bool clock_is_enabled(const Clock *clk)
+{
+ return clock_get(clk) != 0;
+}
+
+/**
+ * clock_display_freq: return human-readable representation of clock frequency
+ * @clk: clock
+ *
+ * Return a string which has a human-readable representation of the
+ * clock's frequency, e.g. "33.3 MHz". This is intended for debug
+ * and display purposes.
+ *
+ * The caller is responsible for freeing the string with g_free().
+ */
+char *clock_display_freq(Clock *clk);
+
+/**
+ * clock_set_mul_div: set multiplier/divider for child clocks
+ * @clk: clock
+ * @multiplier: multiplier value
+ * @divider: divider value
+ *
+ * @return: true if the clock is changed.
+ *
+ * By default, a Clock's children will all run with the same period
+ * as their parent. This function allows you to adjust the multiplier
+ * and divider used to derive the child clock frequency.
+ * For example, setting a multiplier of 2 and a divider of 3
+ * will run child clocks with a period 2/3 of the parent clock,
+ * so if the parent clock is an 8MHz clock the children will
+ * be 12MHz.
+ *
+ * Setting the multiplier to 0 will stop the child clocks.
+ * Setting the divider to 0 is a programming error (diagnosed with
+ * an assertion failure).
+ * Setting a multiplier value that results in the child period
+ * overflowing is not diagnosed.
+ *
+ * Note that this function does not call clock_propagate(); the
+ * caller should do that if necessary.
+ */
+bool clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider);
+
+#endif /* QEMU_HW_CLOCK_H */
diff --git a/include/hw/core/accel-cpu.h b/include/hw/core/accel-cpu.h
new file mode 100644
index 0000000000..24dad45ab9
--- /dev/null
+++ b/include/hw/core/accel-cpu.h
@@ -0,0 +1,38 @@
+/*
+ * Accelerator interface, specializes CPUClass
+ * This header is used only by target-specific code.
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ACCEL_CPU_H
+#define ACCEL_CPU_H
+
+/*
+ * This header is used to define new accelerator-specific target-specific
+ * accelerator cpu subclasses.
+ * It uses CPU_RESOLVING_TYPE, so this is clearly target-specific.
+ *
+ * Do not try to use for any other purpose than the implementation of new
+ * subclasses in target/, or the accel implementation itself in accel/
+ */
+
+#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE
+#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU)
+typedef struct AccelCPUClass AccelCPUClass;
+DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU)
+
+typedef struct AccelCPUClass {
+ /*< private >*/
+ ObjectClass parent_class;
+ /*< public >*/
+
+ void (*cpu_class_init)(CPUClass *cc);
+ void (*cpu_instance_init)(CPUState *cpu);
+ bool (*cpu_target_realize)(CPUState *cpu, Error **errp);
+} AccelCPUClass;
+
+#endif /* ACCEL_CPU_H */
diff --git a/include/qom/cpu.h b/include/hw/core/cpu.h
index 16bbed1ae0..ec14f74ce5 100644
--- a/include/qom/cpu.h
+++ b/include/hw/core/cpu.h
@@ -21,32 +21,22 @@
#define QEMU_CPU_H
#include "hw/qdev-core.h"
-#include "disas/bfd.h"
+#include "disas/dis-asm.h"
#include "exec/hwaddr.h"
+#include "exec/vaddr.h"
#include "exec/memattrs.h"
+#include "exec/tlb-common.h"
#include "qapi/qapi-types-run-state.h"
#include "qemu/bitmap.h"
-#include "qemu/fprintf-fn.h"
#include "qemu/rcu_queue.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
+#include "qom/object.h"
typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
void *opaque);
/**
- * vaddr:
- * Type wide enough to contain any #target_ulong virtual address.
- */
-typedef uint64_t vaddr;
-#define VADDR_PRId PRId64
-#define VADDR_PRIu PRIu64
-#define VADDR_PRIo PRIo64
-#define VADDR_PRIx PRIx64
-#define VADDR_PRIX PRIX64
-#define VADDR_MAX UINT64_MAX
-
-/**
* SECTION:cpu
* @section_id: QEMU-cpu
* @title: CPU Class
@@ -61,92 +51,96 @@ typedef uint64_t vaddr;
*/
#define CPU(obj) ((CPUState *)(obj))
-#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
-#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
+/*
+ * The class checkers bring in CPU_GET_CLASS() which is potentially
+ * expensive given the eventual call to
+ * object_class_dynamic_cast_assert(). Because of this the CPUState
+ * has a cached value for the class in cs->cc which is set up in
+ * cpu_exec_realizefn() for use in hot code paths.
+ */
+typedef struct CPUClass CPUClass;
+DECLARE_CLASS_CHECKERS(CPUClass, CPU,
+ TYPE_CPU)
+
+/**
+ * OBJECT_DECLARE_CPU_TYPE:
+ * @CpuInstanceType: instance struct name
+ * @CpuClassType: class struct name
+ * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators
+ *
+ * This macro is typically used in "cpu-qom.h" header file, and will:
+ *
+ * - create the typedefs for the CPU object and class structs
+ * - register the type for use with g_autoptr
+ * - provide three standard type cast functions
+ *
+ * The object struct and class struct need to be declared manually.
+ */
+#define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \
+ typedef struct ArchCPU CpuInstanceType; \
+ OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME);
typedef enum MMUAccessType {
MMU_DATA_LOAD = 0,
MMU_DATA_STORE = 1,
MMU_INST_FETCH = 2
+#define MMU_ACCESS_COUNT 3
} MMUAccessType;
typedef struct CPUWatchpoint CPUWatchpoint;
-typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
- bool is_write, bool is_exec, int opaque,
- unsigned size);
+/* see accel-cpu.h */
+struct AccelCPUClass;
-struct TranslationBlock;
+/* see sysemu-cpu-ops.h */
+struct SysemuCPUOps;
/**
* CPUClass:
* @class_by_name: Callback to map -cpu command line model name to an
- * instantiatable CPU type.
+ * instantiatable CPU type.
* @parse_features: Callback to parse command line arguments.
- * @reset: Callback to reset the #CPUState to its initial state.
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
* @has_work: Callback for checking if there is work to do.
- * @do_interrupt: Callback for interrupt handling.
- * @do_unassigned_access: Callback for unassigned access handling.
- * (this is deprecated: new targets should use do_transaction_failed instead)
- * @do_unaligned_access: Callback for unaligned access handling, if
- * the target defines #ALIGNED_ONLY.
- * @do_transaction_failed: Callback for handling failed memory transactions
- * (ie bus faults or external aborts; not MMU faults)
- * @virtio_is_big_endian: Callback to return %true if a CPU which supports
- * runtime configurable endianness is currently big-endian. Non-configurable
- * CPUs can use the default implementation of this method. This method should
- * not be used by any callers other than the pre-1.0 virtio devices.
+ * @mmu_index: Callback for choosing softmmu mmu index;
+ * may be used internally by memory_rw_debug without TCG.
* @memory_rw_debug: Callback for GDB memory access.
* @dump_state: Callback for dumping state.
- * @dump_statistics: Callback for dumping statistics.
+ * @query_cpu_fast:
+ * Fill in target specific information for the "query-cpus-fast"
+ * QAPI call.
* @get_arch_id: Callback for getting architecture-dependent CPU ID.
- * @get_paging_enabled: Callback for inquiring whether paging is enabled.
- * @get_memory_mapping: Callback for obtaining the memory mappings.
- * @set_pc: Callback for setting the Program Counter register.
- * @synchronize_from_tb: Callback for synchronizing state from a TCG
- * #TranslationBlock.
- * @handle_mmu_fault: Callback for handling an MMU fault.
- * @get_phys_page_debug: Callback for obtaining a physical address.
- * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
- * associated memory transaction attributes to use for the access.
- * CPUs which use memory transaction attributes should implement this
- * instead of get_phys_page_debug.
- * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
- * a memory access with the specified memory transaction attributes.
+ * @set_pc: Callback for setting the Program Counter register. This
+ * should have the semantics used by the target architecture when
+ * setting the PC from a source such as an ELF file entry point;
+ * for example on Arm it will also set the Thumb mode bit based
+ * on the least significant bit of the new PC value.
+ * If the target behaviour here is anything other than "set
+ * the PC register to the value passed in" then the target must
+ * also implement the synchronize_from_tb hook.
+ * @get_pc: Callback for getting the Program Counter register.
+ * As above, with the semantics of the target architecture.
* @gdb_read_register: Callback for letting GDB read a register.
* @gdb_write_register: Callback for letting GDB write a register.
- * @debug_check_watchpoint: Callback: return true if the architectural
- * watchpoint whose address has matched should really fire.
- * @debug_excp_handler: Callback for handling debug exceptions.
- * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
- * 64-bit VM coredump.
- * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
- * note to a 32-bit VM coredump.
- * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
- * 32-bit VM coredump.
- * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
- * note to a 32-bit VM coredump.
- * @vmsd: State description for migration.
- * @gdb_num_core_regs: Number of core registers accessible to GDB.
+ * @gdb_adjust_breakpoint: Callback for adjusting the address of a
+ * breakpoint. Used by AVR to handle a gdb mis-feature with
+ * its Harvard architecture split code and data.
+ * @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer
+ * from @gdb_core_xml_file.
* @gdb_core_xml_file: File name for core registers GDB XML description.
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
* before the insn which triggers a watchpoint rather than after it.
* @gdb_arch_name: Optional callback that returns the architecture name known
* to GDB. The caller must free the returned string with g_free.
- * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
- * gdb stub. Returns a pointer to the XML contents for the specified XML file
- * or NULL if the CPU doesn't have a dynamically generated content for it.
- * @cpu_exec_enter: Callback for cpu_exec preparation.
- * @cpu_exec_exit: Callback for cpu_exec cleanup.
- * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
* @disas_set_info: Setup architecture specific components of disassembly info
* @adjust_watchpoint_address: Perform a target-specific adjustment to an
* address before attempting to match it against watchpoints.
+ * @deprecation_note: If this CPUClass is deprecated, this field provides
+ * related information.
*
* Represents a CPU family or model.
*/
-typedef struct CPUClass {
+struct CPUClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
@@ -154,80 +148,210 @@ typedef struct CPUClass {
ObjectClass *(*class_by_name)(const char *cpu_model);
void (*parse_features)(const char *typename, char *str, Error **errp);
- void (*reset)(CPUState *cpu);
- int reset_dump_flags;
bool (*has_work)(CPUState *cpu);
- void (*do_interrupt)(CPUState *cpu);
- CPUUnassignedAccess do_unassigned_access;
- void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr);
- void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
- unsigned size, MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response, uintptr_t retaddr);
- bool (*virtio_is_big_endian)(CPUState *cpu);
+ int (*mmu_index)(CPUState *cpu, bool ifetch);
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
uint8_t *buf, int len, bool is_write);
- void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
- int flags);
- GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
- void (*dump_statistics)(CPUState *cpu, FILE *f,
- fprintf_function cpu_fprintf, int flags);
+ void (*dump_state)(CPUState *cpu, FILE *, int flags);
+ void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value);
int64_t (*get_arch_id)(CPUState *cpu);
- bool (*get_paging_enabled)(const CPUState *cpu);
- void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
- Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
- void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
- int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_index);
- hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
- hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
- MemTxAttrs *attrs);
- int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
- int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
+ vaddr (*get_pc)(CPUState *cpu);
+ int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
- bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
- void (*debug_excp_handler)(CPUState *cpu);
-
- int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
- int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
- int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
- int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
-
- const struct VMStateDescription *vmsd;
+ vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
+
const char *gdb_core_xml_file;
- gchar * (*gdb_arch_name)(CPUState *cpu);
- const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
- void (*cpu_exec_enter)(CPUState *cpu);
- void (*cpu_exec_exit)(CPUState *cpu);
- bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
+ const gchar * (*gdb_arch_name)(CPUState *cpu);
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
- vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
- void (*tcg_initialize)(void);
- /* Keep non-pointer data at the end to minimize holes. */
+ const char *deprecation_note;
+ struct AccelCPUClass *accel_cpu;
+
+ /* when system emulation is not available, this pointer is NULL */
+ const struct SysemuCPUOps *sysemu_ops;
+
+ /* when TCG is not available, this pointer is NULL */
+ const TCGCPUOps *tcg_ops;
+
+ /*
+ * if not NULL, this is called in order for the CPUClass to initialize
+ * class data that depends on the accelerator, see accel/accel-common.c.
+ */
+ void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
+
+ /*
+ * Keep non-pointer data at the end to minimize holes.
+ */
+ int reset_dump_flags;
int gdb_num_core_regs;
bool gdb_stop_before_watchpoint;
-} CPUClass;
+};
-#ifdef HOST_WORDS_BIGENDIAN
-typedef struct icount_decr_u16 {
- uint16_t high;
- uint16_t low;
-} icount_decr_u16;
+/*
+ * Fix the number of mmu modes to 16, which is also the maximum
+ * supported by the softmmu tlb api.
+ */
+#define NB_MMU_MODES 16
+
+/* Use a fully associative victim tlb of 8 entries. */
+#define CPU_VTLB_SIZE 8
+
+/*
+ * The full TLB entry, which is not accessed by generated TCG code,
+ * so the layout is not as critical as that of CPUTLBEntry. This is
+ * also why we don't want to combine the two structs.
+ */
+typedef struct CPUTLBEntryFull {
+ /*
+ * @xlat_section contains:
+ * - in the lower TARGET_PAGE_BITS, a physical section number
+ * - with the lower TARGET_PAGE_BITS masked off, an offset which
+ * must be added to the virtual address to obtain:
+ * + the ram_addr_t of the target RAM (if the physical section
+ * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
+ * + the offset within the target MemoryRegion (otherwise)
+ */
+ hwaddr xlat_section;
+
+ /*
+ * @phys_addr contains the physical address in the address space
+ * given by cpu_asidx_from_attrs(cpu, @attrs).
+ */
+ hwaddr phys_addr;
+
+ /* @attrs contains the memory transaction attributes for the page. */
+ MemTxAttrs attrs;
+
+ /* @prot contains the complete protections for the page. */
+ uint8_t prot;
+
+ /* @lg_page_size contains the log2 of the page size. */
+ uint8_t lg_page_size;
+
+ /* Additional tlb flags requested by tlb_fill. */
+ uint8_t tlb_fill_flags;
+
+ /*
+ * Additional tlb flags for use by the slow path. If non-zero,
+ * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
+ */
+ uint8_t slow_flags[MMU_ACCESS_COUNT];
+
+ /*
+ * Allow target-specific additions to this structure.
+ * This may be used to cache items from the guest cpu
+ * page tables for later use by the implementation.
+ */
+ union {
+ /*
+ * Cache the attrs and shareability fields from the page table entry.
+ *
+ * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
+ * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
+ * For shareability and guarded, as in the SH and GP fields respectively
+ * of the VMSAv8-64 PTEs.
+ */
+ struct {
+ uint8_t pte_attrs;
+ uint8_t shareability;
+ bool guarded;
+ } arm;
+ } extra;
+} CPUTLBEntryFull;
+
+/*
+ * Data elements that are per MMU mode, minus the bits accessed by
+ * the TCG fast path.
+ */
+typedef struct CPUTLBDesc {
+ /*
+ * Describe a region covering all of the large pages allocated
+ * into the tlb. When any page within this region is flushed,
+ * we must flush the entire tlb. The region is matched if
+ * (addr & large_page_mask) == large_page_addr.
+ */
+ vaddr large_page_addr;
+ vaddr large_page_mask;
+ /* host time (in ns) at the beginning of the time window */
+ int64_t window_begin_ns;
+ /* maximum number of entries observed in the window */
+ size_t window_max_entries;
+ size_t n_used_entries;
+ /* The next index to use in the tlb victim table. */
+ size_t vindex;
+ /* The tlb victim table, in two parts. */
+ CPUTLBEntry vtable[CPU_VTLB_SIZE];
+ CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
+ CPUTLBEntryFull *fulltlb;
+} CPUTLBDesc;
+
+/*
+ * Data elements that are shared between all MMU modes.
+ */
+typedef struct CPUTLBCommon {
+ /* Serialize updates to f.table and d.vtable, and others as noted. */
+ QemuSpin lock;
+ /*
+ * Within dirty, for each bit N, modifications have been made to
+ * mmu_idx N since the last time that mmu_idx was flushed.
+ * Protected by tlb_c.lock.
+ */
+ uint16_t dirty;
+ /*
+ * Statistics. These are not lock protected, but are read and
+ * written atomically. This allows the monitor to print a snapshot
+ * of the stats without interfering with the cpu.
+ */
+ size_t full_flush_count;
+ size_t part_flush_count;
+ size_t elide_flush_count;
+} CPUTLBCommon;
+
+/*
+ * The entire softmmu tlb, for all MMU modes.
+ * The meaning of each of the MMU modes is defined in the target code.
+ * Since this is placed within CPUNegativeOffsetState, the smallest
+ * negative offsets are at the end of the struct.
+ */
+typedef struct CPUTLB {
+#ifdef CONFIG_TCG
+ CPUTLBCommon c;
+ CPUTLBDesc d[NB_MMU_MODES];
+ CPUTLBDescFast f[NB_MMU_MODES];
+#endif
+} CPUTLB;
+
+/*
+ * Low 16 bits: number of cycles left, used only in icount mode.
+ * High 16 bits: Set to -1 to force TCG to stop executing linked TBs
+ * for this CPU and return to its top level loop (even in non-icount mode).
+ * This allows a single read-compare-cbranch-write sequence to test
+ * for both decrementer underflow and exceptions.
+ */
+typedef union IcountDecr {
+ uint32_t u32;
+ struct {
+#if HOST_BIG_ENDIAN
+ uint16_t high;
+ uint16_t low;
#else
-typedef struct icount_decr_u16 {
- uint16_t low;
- uint16_t high;
-} icount_decr_u16;
+ uint16_t low;
+ uint16_t high;
#endif
+ } u16;
+} IcountDecr;
+
+/*
+ * Elements of CPUState most efficiently accessed from CPUArchState,
+ * via small negative offsets.
+ */
+typedef struct CPUNegativeOffsetState {
+ CPUTLB tlb;
+ IcountDecr icount_decr;
+ bool can_do_io;
+} CPUNegativeOffsetState;
typedef struct CPUBreakpoint {
vaddr pc;
@@ -247,11 +371,6 @@ struct CPUWatchpoint {
struct KVMState;
struct kvm_run;
-struct hax_vcpu_state;
-
-#define TB_JMP_CACHE_BITS 12
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
-
/* work queue */
/* The union type allows passing of 64 bit target pointers on 32 bit
@@ -275,13 +394,20 @@ typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
struct qemu_work_item;
#define CPU_UNSET_NUMA_NODE_ID -1
-#define CPU_TRACE_DSTATE_MAX_EVENTS 32
/**
* CPUState:
* @cpu_index: CPU index (informative).
+ * @cluster_index: Identifies which cluster this CPU is in.
+ * For boards which don't define clusters or for "loose" CPUs not assigned
+ * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
+ * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
+ * QOM parent.
+ * Under TCG this value is propagated to @tcg_cflags.
+ * See TranslationBlock::TCG CF_CLUSTER_MASK.
+ * @tcg_cflags: Pre-computed cflags for this cpu.
* @nr_cores: Number of cores within this CPU package.
- * @nr_threads: Number of threads within this CPU.
+ * @nr_threads: Number of threads within this CPU core.
* @running: #true if CPU is currently running (lockless).
* @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
* valid under cpu_list_lock.
@@ -294,42 +420,42 @@ struct qemu_work_item;
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
- * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
- * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
- * CPU and return to its top level loop (even in non-icount mode).
- * This allows a single read-compare-cbranch-write sequence to test
- * for both decrementer underflow and exceptions.
- * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
- * requires that IO only be performed on the last instruction of a TB
- * so that interrupts take effect immediately.
+ * @neg.can_do_io: True if memory-mapped IO is allowed.
* @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
* AddressSpaces this CPU has)
* @num_ases: number of CPUAddressSpaces in @cpu_ases
* @as: Pointer to the first AddressSpace, for the convenience of targets which
* only have a single AddressSpace
- * @env_ptr: Pointer to subclass-specific CPUArchState field.
* @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets.
- * @next_cpu: Next CPU sharing TB cache.
+ * @node: QTAILQ of CPUs sharing TB cache.
* @opaque: User data.
* @mem_io_pc: Host Program Counter at which the memory was accessed.
- * @mem_io_vaddr: Target virtual address at which the memory was accessed.
+ * @accel: Pointer to accelerator specific state.
* @kvm_fd: vCPU file descriptor for KVM.
- * @work_mutex: Lock to prevent multiple access to queued_work_*.
- * @queued_work_first: First asynchronous work pending.
- * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
- * to @trace_dstate).
- * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
+ * @work_mutex: Lock to prevent multiple access to @work_list.
+ * @work_list: List of pending asynchronous work.
+ * @plugin_mem_cbs: active plugin memory callbacks
+ * @plugin_state: per-CPU plugin state
* @ignore_memory_transaction_failures: Cached copy of the MachineState
* flag of the same name: allows the board to suppress calling of the
* CPU do_transaction_failed hook function.
+ * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty
+ * ring is enabled.
+ * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU
+ * dirty ring structure.
*
* State of one CPU core or thread.
+ *
+ * Align, in order to match possible alignment required by CPUArchState,
+ * and eliminate a hole between CPUState and CPUArchState within ArchCPU.
*/
struct CPUState {
/*< private >*/
DeviceState parent_obj;
+ /* cache to avoid expensive CPU_GET_CLASS */
+ CPUClass *cc;
/*< public >*/
int nr_cores;
@@ -337,7 +463,7 @@ struct CPUState {
struct QemuThread *thread;
#ifdef _WIN32
- HANDLE hThread;
+ QemuSemaphore sem;
#endif
int thread_id;
bool running, has_waiter;
@@ -346,31 +472,34 @@ struct CPUState {
bool created;
bool stop;
bool stopped;
+
+ /* Should CPU start in powered-off state? */
+ bool start_powered_off;
+
bool unplug;
bool crash_occurred;
bool exit_request;
+ int exclusive_context_count;
uint32_t cflags_next_tb;
/* updates protected by BQL */
uint32_t interrupt_request;
int singlestep_enabled;
int64_t icount_budget;
int64_t icount_extra;
+ uint64_t random_seed;
sigjmp_buf jmp_env;
QemuMutex work_mutex;
- struct qemu_work_item *queued_work_first, *queued_work_last;
+ QSIMPLEQ_HEAD(, qemu_work_item) work_list;
CPUAddressSpace *cpu_ases;
int num_ases;
AddressSpace *as;
MemoryRegion *memory;
- void *env_ptr; /* CPUArchState */
+ CPUJumpCache *tb_jmp_cache;
- /* Accessed in parallel; all accesses must be atomic */
- struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
-
- struct GDBRegisterState *gdb_regs;
+ GArray *gdb_regs;
int gdb_num_regs;
int gdb_num_g_regs;
QTAILQ_ENTRY(CPUState) node;
@@ -387,29 +516,37 @@ struct CPUState {
* we store some rarely used information in the CPU context.
*/
uintptr_t mem_io_pc;
- vaddr mem_io_vaddr;
- /*
- * This is only needed for the legacy cpu_unassigned_access() hook;
- * when all targets using it have been converted to use
- * cpu_transaction_failed() instead it can be removed.
- */
- MMUAccessType mem_io_access_type;
+ /* Only used in KVM */
int kvm_fd;
struct KVMState *kvm_state;
struct kvm_run *kvm_run;
+ struct kvm_dirty_gfn *kvm_dirty_gfns;
+ uint32_t kvm_fetch_index;
+ uint64_t dirty_pages;
+ int kvm_vcpu_stats_fd;
- /* Used for events with 'vcpu' and *without* the 'disabled' properties */
- DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
- DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
+ /* Use by accel-block: CPU is executing an ioctl() */
+ QemuLockCnt in_ioctl_lock;
+
+#ifdef CONFIG_PLUGIN
+ /*
+ * The callback pointer stays in the main CPUState as it is
+ * accessed via TCG (see gen_empty_mem_helper).
+ */
+ GArray *plugin_mem_cbs;
+ CPUPluginState *plugin_state;
+#endif
/* TODO Move common fields from CPUArchState here. */
int cpu_index;
+ int cluster_index;
+ uint32_t tcg_cflags;
uint32_t halted;
- uint32_t can_do_io;
int32_t exception_index;
- /* shared by kvm, hax and hvf */
+ AccelCPUState *accel;
+ /* shared by kvm and hvf */
bool vcpu_dirty;
/* Used to keep track of an outstanding cpu throttle thread for migration
@@ -417,45 +554,48 @@ struct CPUState {
*/
bool throttle_thread_scheduled;
- bool ignore_memory_transaction_failures;
-
- /* Note that this is accessed at the start of every TB via a negative
- offset from AREG0. Leave this field at the end so as to make the
- (absolute value) offset as small as possible. This reduces code
- size, especially for hosts without large memory offsets. */
- union {
- uint32_t u32;
- icount_decr_u16 u16;
- } icount_decr;
+ /*
+ * Sleep throttle_us_per_full microseconds once dirty ring is full
+ * if dirty page rate limit is enabled.
+ */
+ int64_t throttle_us_per_full;
- struct hax_vcpu_state *hax_vcpu;
+ bool ignore_memory_transaction_failures;
- int hvf_fd;
+ /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
+ bool prctl_unalign_sigbus;
/* track IOMMUs whose translations we've cached in the TCG TLB */
GArray *iommu_notifiers;
+
+ /*
+ * MUST BE LAST in order to minimize the displacement to CPUArchState.
+ */
+ char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16);
+ CPUNegativeOffsetState neg;
};
+/* Validate placement of CPUNegativeOffsetState. */
+QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) !=
+ sizeof(CPUState) - sizeof(CPUNegativeOffsetState));
+
+static inline CPUArchState *cpu_env(CPUState *cpu)
+{
+ /* We validate that CPUArchState follows CPUState in cpu-all.h. */
+ return (CPUArchState *)(cpu + 1);
+}
+
typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
-extern CPUTailQ cpus;
+extern CPUTailQ cpus_queue;
-#define first_cpu QTAILQ_FIRST_RCU(&cpus)
+#define first_cpu QTAILQ_FIRST_RCU(&cpus_queue)
#define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node)
-#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
+#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus_queue, node)
#define CPU_FOREACH_SAFE(cpu, next_cpu) \
- QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
+ QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus_queue, node, next_cpu)
extern __thread CPUState *current_cpu;
-static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
-{
- unsigned int i;
-
- for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
- atomic_set(&cpu->tb_jmp_cache[i], NULL);
- }
-}
-
/**
* qemu_tcg_mttcg_enabled:
* Check whether we are running MultiThread TCG or not.
@@ -478,10 +618,14 @@ bool cpu_paging_enabled(const CPUState *cpu);
* @cpu: The CPU whose memory mappings are to be obtained.
* @list: Where to write the memory mappings to.
* @errp: Pointer for reporting an #Error.
+ *
+ * Returns: %true on success, %false otherwise.
*/
-void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp);
+#if !defined(CONFIG_USER_ONLY)
+
/**
* cpu_write_elf64_note:
* @f: pointer to a function that writes memory to a file
@@ -531,41 +675,30 @@ int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
*/
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
+#endif /* !CONFIG_USER_ONLY */
+
/**
* CPUDumpFlags:
* @CPU_DUMP_CODE:
* @CPU_DUMP_FPU: dump FPU register state, not just integer
* @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
+ * @CPU_DUMP_VPU: dump VPU registers
*/
enum CPUDumpFlags {
CPU_DUMP_CODE = 0x00010000,
CPU_DUMP_FPU = 0x00020000,
CPU_DUMP_CCOP = 0x00040000,
+ CPU_DUMP_VPU = 0x00080000,
};
/**
* cpu_dump_state:
* @cpu: The CPU whose state is to be dumped.
- * @f: File to dump to.
- * @cpu_fprintf: Function to dump with.
- * @flags: Flags what to dump.
+ * @f: If non-null, dump to this stream, else to current print sink.
*
* Dumps CPU state.
*/
-void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
- int flags);
-
-/**
- * cpu_dump_statistics:
- * @cpu: The CPU whose state is to be dumped.
- * @f: File to dump to.
- * @cpu_fprintf: Function to dump with.
- * @flags: Flags what to dump.
- *
- * Dumps CPU statistics.
- */
-void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
- int flags);
+void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
#ifndef CONFIG_USER_ONLY
/**
@@ -581,18 +714,8 @@ void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
*
* Returns: Corresponding physical page address or -1 if no page found.
*/
-static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
- MemTxAttrs *attrs)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->get_phys_page_attrs_debug) {
- return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
- }
- /* Fallback for CPUs which don't implement the _attrs_ hook */
- *attrs = MEMTXATTRS_UNSPECIFIED;
- return cc->get_phys_page_debug(cpu, addr);
-}
+hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
/**
* cpu_get_phys_page_debug:
@@ -604,12 +727,7 @@ static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
*
* Returns: Corresponding physical page address or -1 if no page found.
*/
-static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
-{
- MemTxAttrs attrs = {};
-
- return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
-}
+hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
/** cpu_asidx_from_attrs:
* @cpu: CPU
@@ -618,18 +736,18 @@ static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
* Returns the address space index specifying the CPU AddressSpace
* to use for a memory access with the given transaction attributes.
*/
-static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
- int ret = 0;
+int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
- if (cc->asidx_from_attrs) {
- ret = cc->asidx_from_attrs(cpu, attrs);
- assert(ret < cpu->num_ases && ret >= 0);
- }
- return ret;
-}
-#endif
+/**
+ * cpu_virtio_is_big_endian:
+ * @cpu: CPU
+
+ * Returns %true if a CPU which supports runtime configurable endianness
+ * is currently big-endian.
+ */
+bool cpu_virtio_is_big_endian(CPUState *cpu);
+
+#endif /* CONFIG_USER_ONLY */
/**
* cpu_list_add:
@@ -654,13 +772,27 @@ void cpu_reset(CPUState *cpu);
* @typename: The CPU base type.
* @cpu_model: The model string without any parameters.
*
- * Looks up a CPU #ObjectClass matching name @cpu_model.
+ * Looks up a concrete CPU #ObjectClass matching name @cpu_model.
*
- * Returns: A #CPUClass or %NULL if not matching class is found.
+ * Returns: A concrete #CPUClass or %NULL if no matching class is found
+ * or if the matching class is abstract.
*/
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
/**
+ * cpu_model_from_type:
+ * @typename: The CPU type name
+ *
+ * Extract the CPU model name from the CPU type name. The
+ * CPU type name is either the combination of the CPU model
+ * name and suffix, or same to the CPU model name.
+ *
+ * Returns: CPU model name or NULL if the CPU class doesn't exist
+ * The user should g_free() the string once no longer needed.
+ */
+char *cpu_model_from_type(const char *typename);
+
+/**
* cpu_create:
* @typename: The CPU type.
*
@@ -671,15 +803,15 @@ ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
CPUState *cpu_create(const char *typename);
/**
- * parse_cpu_model:
- * @cpu_model: The model string including optional parameters.
+ * parse_cpu_option:
+ * @cpu_option: The -cpu option including optional parameters.
*
* processes optional parameters and registers them as global properties
*
* Returns: type of CPU to create or prints error and terminates process
* if an error occurred.
*/
-const char *parse_cpu_model(const char *cpu_model);
+const char *parse_cpu_option(const char *cpu_option);
/**
* cpu_has_work:
@@ -773,6 +905,18 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/**
+ * cpu_in_exclusive_context()
+ * @cpu: The vCPU to check
+ *
+ * Returns true if @cpu is an exclusive context, for example running
+ * something which has previously been queued via async_safe_run_on_cpu().
+ */
+static inline bool cpu_in_exclusive_context(const CPUState *cpu)
+{
+ return cpu->exclusive_context_count;
+}
+
+/**
* qemu_get_cpu:
* @index: The CPUState@cpu_index value of the CPU to obtain.
*
@@ -803,107 +947,15 @@ bool cpu_exists(int64_t id);
CPUState *cpu_by_arch_id(int64_t id);
/**
- * cpu_throttle_set:
- * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
- *
- * Throttles all vcpus by forcing them to sleep for the given percentage of
- * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
- * (example: 10ms sleep for every 30ms awake).
- *
- * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
- * Once the throttling starts, it will remain in effect until cpu_throttle_stop
- * is called.
- */
-void cpu_throttle_set(int new_throttle_pct);
-
-/**
- * cpu_throttle_stop:
- *
- * Stops the vcpu throttling started by cpu_throttle_set.
- */
-void cpu_throttle_stop(void);
-
-/**
- * cpu_throttle_active:
- *
- * Returns: %true if the vcpus are currently being throttled, %false otherwise.
- */
-bool cpu_throttle_active(void);
-
-/**
- * cpu_throttle_get_percentage:
- *
- * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
- *
- * Returns: The throttle percentage in range 1 to 99.
- */
-int cpu_throttle_get_percentage(void);
-
-#ifndef CONFIG_USER_ONLY
-
-typedef void (*CPUInterruptHandler)(CPUState *, int);
-
-extern CPUInterruptHandler cpu_interrupt_handler;
-
-/**
* cpu_interrupt:
* @cpu: The CPU to set an interrupt on.
* @mask: The interrupts to set.
*
* Invokes the interrupt handler.
*/
-static inline void cpu_interrupt(CPUState *cpu, int mask)
-{
- cpu_interrupt_handler(cpu, mask);
-}
-
-#else /* USER_ONLY */
void cpu_interrupt(CPUState *cpu, int mask);
-#endif /* USER_ONLY */
-
-#ifdef NEED_CPU_H
-
-#ifdef CONFIG_SOFTMMU
-static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
- bool is_write, bool is_exec,
- int opaque, unsigned size)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->do_unassigned_access) {
- cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
- }
-}
-
-static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
-}
-
-static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
- vaddr addr, unsigned size,
- MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response,
- uintptr_t retaddr)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
- cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
- mmu_idx, attrs, response, retaddr);
- }
-}
-#endif
-
-#endif /* NEED_CPU_H */
-
/**
* cpu_set_pc:
* @cpu: The CPU to set the program counter for.
@@ -944,14 +996,6 @@ void cpu_exit(CPUState *cpu);
void cpu_resume(CPUState *cpu);
/**
- * cpu_remove:
- * @cpu: The CPU to remove.
- *
- * Requests the CPU to be removed.
- */
-void cpu_remove(CPUState *cpu);
-
- /**
* cpu_remove_sync:
* @cpu: The CPU to remove.
*
@@ -1032,9 +1076,10 @@ void cpu_single_step(CPUState *cpu, int enabled);
#define BP_GDB 0x10
#define BP_CPU 0x20
#define BP_ANY (BP_GDB | BP_CPU)
-#define BP_WATCHPOINT_HIT_READ 0x40
-#define BP_WATCHPOINT_HIT_WRITE 0x80
-#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
+#define BP_HIT_SHIFT 6
+#define BP_WATCHPOINT_HIT_READ (BP_MEM_READ << BP_HIT_SHIFT)
+#define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT)
+#define BP_WATCHPOINT_HIT (BP_MEM_ACCESS << BP_HIT_SHIFT)
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint);
@@ -1057,12 +1102,52 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
return false;
}
+#if defined(CONFIG_USER_ONLY)
+static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
+ int flags, CPUWatchpoint **watchpoint)
+{
+ return -ENOSYS;
+}
+
+static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
+ vaddr len, int flags)
+{
+ return -ENOSYS;
+}
+
+static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
+ CPUWatchpoint *wp)
+{
+}
+
+static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
+{
+}
+#else
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
vaddr len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
+#endif
+
+/**
+ * cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled?
+ * @cs: CPUState pointer
+ *
+ * The memory callbacks are installed if a plugin has instrumented an
+ * instruction for memory. This can be useful to know if you want to
+ * force a slow path for a series of memory accesses.
+ */
+static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
+{
+#ifdef CONFIG_PLUGIN
+ return !!cpu->plugin_mem_cbs;
+#else
+ return false;
+#endif
+}
/**
* cpu_get_address_space:
@@ -1074,31 +1159,34 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
*/
AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
-void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
-extern Property cpu_common_props[];
+G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...)
+ G_GNUC_PRINTF(2, 3);
+
+/* $(top_srcdir)/cpu.c */
+void cpu_class_init_props(DeviceClass *dc);
void cpu_exec_initfn(CPUState *cpu);
-void cpu_exec_realizefn(CPUState *cpu, Error **errp);
+bool cpu_exec_realizefn(CPUState *cpu, Error **errp);
void cpu_exec_unrealizefn(CPUState *cpu);
+void cpu_exec_reset_hold(CPUState *cpu);
/**
* target_words_bigendian:
* Returns true if the (default) endianness of the target is big endian,
* false otherwise. Note that in target-specific code, you can use
- * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common
+ * TARGET_BIG_ENDIAN directly instead. On the other hand, common
* code should normally never need to know about the endianness of the
* target, so please do *not* use this function unless you know very well
* what you are doing!
*/
bool target_words_bigendian(void);
+const char *target_name(void);
+
#ifdef NEED_CPU_H
-#ifdef CONFIG_SOFTMMU
-extern const struct VMStateDescription vmstate_cpu_common;
-#else
-#define vmstate_cpu_common vmstate_dummy
-#endif
+#ifndef CONFIG_USER_ONLY
+
+extern const VMStateDescription vmstate_cpu_common;
#define VMSTATE_CPU() { \
.name = "parent_obj", \
@@ -1107,9 +1195,11 @@ extern const struct VMStateDescription vmstate_cpu_common;
.flags = VMS_STRUCT, \
.offset = 0, \
}
+#endif /* !CONFIG_USER_ONLY */
#endif /* NEED_CPU_H */
#define UNASSIGNED_CPU_INDEX -1
+#define UNASSIGNED_CLUSTER_INDEX -1
#endif
diff --git a/include/hw/core/generic-loader.h b/include/hw/core/generic-loader.h
index dd27c42ab0..19d87b39c8 100644
--- a/include/hw/core/generic-loader.h
+++ b/include/hw/core/generic-loader.h
@@ -19,8 +19,10 @@
#define GENERIC_LOADER_H
#include "elf.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
-typedef struct GenericLoaderState {
+struct GenericLoaderState {
/* <private> */
DeviceState parent_obj;
@@ -37,10 +39,9 @@ typedef struct GenericLoaderState {
bool force_raw;
bool data_be;
bool set_pc;
-} GenericLoaderState;
+};
#define TYPE_GENERIC_LOADER "loader"
-#define GENERIC_LOADER(obj) OBJECT_CHECK(GenericLoaderState, (obj), \
- TYPE_GENERIC_LOADER)
+OBJECT_DECLARE_SIMPLE_TYPE(GenericLoaderState, GENERIC_LOADER)
#endif
diff --git a/include/hw/core/resetcontainer.h b/include/hw/core/resetcontainer.h
new file mode 100644
index 0000000000..23db0c7a88
--- /dev/null
+++ b/include/hw/core/resetcontainer.h
@@ -0,0 +1,48 @@
+/*
+ * Reset container
+ *
+ * Copyright (c) 2024 Linaro, Ltd
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_RESETCONTAINER_H
+#define HW_RESETCONTAINER_H
+
+/*
+ * The "reset container" is an object which implements the Resettable
+ * interface. It contains a list of arbitrary other objects which also
+ * implement Resettable. Resetting the reset container resets all the
+ * objects in it.
+ */
+
+#include "qom/object.h"
+
+#define TYPE_RESETTABLE_CONTAINER "resettable-container"
+OBJECT_DECLARE_TYPE(ResettableContainer, ResettableContainerClass, RESETTABLE_CONTAINER)
+
+/**
+ * resettable_container_add: Add a resettable object to the container
+ * @rc: container
+ * @obj: object to add to the container
+ *
+ * Add @obj to the ResettableContainer @rc. @obj must implement the
+ * Resettable interface.
+ *
+ * When @rc is reset, it will reset every object that has been added
+ * to it, in the order they were added.
+ */
+void resettable_container_add(ResettableContainer *rc, Object *obj);
+
+/**
+ * resettable_container_remove: Remove an object from the container
+ * @rc: container
+ * @obj: object to remove from the container
+ *
+ * Remove @obj from the ResettableContainer @rc. @obj must have been
+ * previously added to this container.
+ */
+void resettable_container_remove(ResettableContainer *rc, Object *obj);
+
+#endif
diff --git a/include/hw/core/split-irq.h b/include/hw/core/split-irq.h
index bb87157c5a..ff8852f407 100644
--- a/include/hw/core/split-irq.h
+++ b/include/hw/core/split-irq.h
@@ -35,7 +35,6 @@
#ifndef HW_SPLIT_IRQ_H
#define HW_SPLIT_IRQ_H
-#include "hw/irq.h"
#include "hw/sysbus.h"
#include "qom/object.h"
@@ -43,9 +42,8 @@
#define MAX_SPLIT_LINES 16
-typedef struct SplitIRQ SplitIRQ;
-#define SPLIT_IRQ(obj) OBJECT_CHECK(SplitIRQ, (obj), TYPE_SPLIT_IRQ)
+OBJECT_DECLARE_SIMPLE_TYPE(SplitIRQ, SPLIT_IRQ)
struct SplitIRQ {
DeviceState parent_obj;
diff --git a/include/hw/arm/sysbus-fdt.h b/include/hw/core/sysbus-fdt.h
index 340c382cdd..340c382cdd 100644
--- a/include/hw/arm/sysbus-fdt.h
+++ b/include/hw/core/sysbus-fdt.h
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
new file mode 100644
index 0000000000..24d003fe04
--- /dev/null
+++ b/include/hw/core/sysemu-cpu-ops.h
@@ -0,0 +1,92 @@
+/*
+ * CPU operations specific to system emulation
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SYSEMU_CPU_OPS_H
+#define SYSEMU_CPU_OPS_H
+
+#include "hw/core/cpu.h"
+
+/*
+ * struct SysemuCPUOps: System operations specific to a CPU class
+ */
+typedef struct SysemuCPUOps {
+ /**
+ * @get_memory_mapping: Callback for obtaining the memory mappings.
+ */
+ bool (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
+ Error **errp);
+ /**
+ * @get_paging_enabled: Callback for inquiring whether paging is enabled.
+ */
+ bool (*get_paging_enabled)(const CPUState *cpu);
+ /**
+ * @get_phys_page_debug: Callback for obtaining a physical address.
+ */
+ hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
+ /**
+ * @get_phys_page_attrs_debug: Callback for obtaining a physical address
+ * and the associated memory transaction attributes to use for the
+ * access.
+ * CPUs which use memory transaction attributes should implement this
+ * instead of get_phys_page_debug.
+ */
+ hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
+ /**
+ * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
+ * a memory access with the specified memory transaction attributes.
+ */
+ int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
+ /**
+ * @get_crash_info: Callback for reporting guest crash information in
+ * GUEST_PANICKED events.
+ */
+ GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
+ /**
+ * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
+ * 32-bit VM coredump.
+ */
+ int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, DumpState *s);
+ /**
+ * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
+ * 64-bit VM coredump.
+ */
+ int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, DumpState *s);
+ /**
+ * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
+ * note to a 32-bit VM coredump.
+ */
+ int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
+ DumpState *s);
+ /**
+ * @write_elf64_qemunote: Callback for writing a CPU- and QEMU-specific ELF
+ * note to a 64-bit VM coredump.
+ */
+ int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
+ DumpState *s);
+ /**
+ * @virtio_is_big_endian: Callback to return %true if a CPU which supports
+ * runtime configurable endianness is currently big-endian.
+ * Non-configurable CPUs can use the default implementation of this method.
+ * This method should not be used by any callers other than the pre-1.0
+ * virtio devices.
+ */
+ bool (*virtio_is_big_endian)(CPUState *cpu);
+
+ /**
+ * @legacy_vmsd: Legacy state for migration.
+ * Do not use in new targets, use #DeviceClass::vmsd instead.
+ */
+ const VMStateDescription *legacy_vmsd;
+
+} SysemuCPUOps;
+
+#endif /* SYSEMU_CPU_OPS_H */
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
new file mode 100644
index 0000000000..bf8ff8e3ee
--- /dev/null
+++ b/include/hw/core/tcg-cpu-ops.h
@@ -0,0 +1,224 @@
+/*
+ * TCG CPU-specific operations
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef TCG_CPU_OPS_H
+#define TCG_CPU_OPS_H
+
+#include "hw/core/cpu.h"
+
+struct TCGCPUOps {
+ /**
+ * @initialize: Initialize TCG state
+ *
+ * Called when the first CPU is realized.
+ */
+ void (*initialize)(void);
+ /**
+ * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
+ *
+ * This is called when we abandon execution of a TB before starting it,
+ * and must set all parts of the CPU state which the previous TB in the
+ * chain may not have updated.
+ * By default, when this is NULL, a call is made to @set_pc(tb->pc).
+ *
+ * If more state needs to be restored, the target must implement a
+ * function to restore all the state, and register it here.
+ */
+ void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
+ /**
+ * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn
+ *
+ * This is called when we unwind state in the middle of a TB,
+ * usually before raising an exception. Set all part of the CPU
+ * state which are tracked insn-by-insn in the target-specific
+ * arguments to start_insn, passed as @data.
+ */
+ void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb,
+ const uint64_t *data);
+
+ /** @cpu_exec_enter: Callback for cpu_exec preparation */
+ void (*cpu_exec_enter)(CPUState *cpu);
+ /** @cpu_exec_exit: Callback for cpu_exec cleanup */
+ void (*cpu_exec_exit)(CPUState *cpu);
+ /** @debug_excp_handler: Callback for handling debug exceptions */
+ void (*debug_excp_handler)(CPUState *cpu);
+
+#ifdef NEED_CPU_H
+#ifdef CONFIG_USER_ONLY
+ /**
+ * @fake_user_interrupt: Callback for 'fake exception' handling.
+ *
+ * Simulate 'fake exception' which will be handled outside the
+ * cpu execution loop (hack for x86 user mode).
+ */
+ void (*fake_user_interrupt)(CPUState *cpu);
+
+ /**
+ * record_sigsegv:
+ * @cpu: cpu context
+ * @addr: faulting guest address
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * We are about to raise SIGSEGV with si_code set for @maperr,
+ * and si_addr set for @addr. Record anything further needed
+ * for the signal ucontext_t.
+ *
+ * If the emulated kernel does not provide anything to the signal
+ * handler with anything besides the user context registers, and
+ * the siginfo_t, then this hook need do nothing and may be omitted.
+ * Otherwise, record the data and return; the caller will raise
+ * the signal, unwind the cpu state, and return to the main loop.
+ *
+ * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided
+ * so that a "normal" cpu exception can be raised. In this case,
+ * the signal must be raised by the architecture cpu_loop.
+ */
+ void (*record_sigsegv)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+ /**
+ * record_sigbus:
+ * @cpu: cpu context
+ * @addr: misaligned guest address
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * We are about to raise SIGBUS with si_code BUS_ADRALN,
+ * and si_addr set for @addr. Record anything further needed
+ * for the signal ucontext_t.
+ *
+ * If the emulated kernel does not provide the signal handler with
+ * anything besides the user context registers, and the siginfo_t,
+ * then this hook need do nothing and may be omitted.
+ * Otherwise, record the data and return; the caller will raise
+ * the signal, unwind the cpu state, and return to the main loop.
+ *
+ * If it is simpler to re-use the sysemu do_unaligned_access code,
+ * @ra is provided so that a "normal" cpu exception can be raised.
+ * In this case, the signal must be raised by the architecture cpu_loop.
+ */
+ void (*record_sigbus)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra);
+#else
+ /** @do_interrupt: Callback for interrupt handling. */
+ void (*do_interrupt)(CPUState *cpu);
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
+ /** @cpu_exec_halt: Callback for handling halt in cpu_exec */
+ void (*cpu_exec_halt)(CPUState *cpu);
+ /**
+ * @tlb_fill: Handle a softmmu tlb miss
+ *
+ * If the access is valid, call tlb_set_page and return true;
+ * if the access is invalid and probe is true, return false;
+ * otherwise raise an exception and do not return.
+ */
+ bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+ /**
+ * @do_transaction_failed: Callback for handling failed memory transactions
+ * (ie bus faults or external aborts; not MMU faults)
+ */
+ void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+ /**
+ * @do_unaligned_access: Callback for unaligned access handling
+ * The callback must exit via raising an exception.
+ */
+ G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+ /**
+ * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
+ */
+ vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
+
+ /**
+ * @debug_check_watchpoint: return true if the architectural
+ * watchpoint whose address has matched should really fire, used by ARM
+ * and RISC-V
+ */
+ bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
+
+ /**
+ * @debug_check_breakpoint: return true if the architectural
+ * breakpoint whose PC has matched should really fire.
+ */
+ bool (*debug_check_breakpoint)(CPUState *cpu);
+
+ /**
+ * @io_recompile_replay_branch: Callback for cpu_io_recompile.
+ *
+ * The cpu has been stopped, and cpu_restore_state_from_tb has been
+ * called. If the faulting instruction is in a delay slot, and the
+ * target architecture requires re-execution of the branch, then
+ * adjust the cpu state as required and return true.
+ */
+ bool (*io_recompile_replay_branch)(CPUState *cpu,
+ const TranslationBlock *tb);
+ /**
+ * @need_replay_interrupt: Return %true if @interrupt_request
+ * needs to be recorded for replay purposes.
+ */
+ bool (*need_replay_interrupt)(int interrupt_request);
+#endif /* !CONFIG_USER_ONLY */
+#endif /* NEED_CPU_H */
+
+};
+
+#if defined(CONFIG_USER_ONLY)
+
+static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs atr, int fl, uintptr_t ra)
+{
+}
+
+static inline int cpu_watchpoint_address_matches(CPUState *cpu,
+ vaddr addr, vaddr len)
+{
+ return 0;
+}
+
+#else
+
+/**
+ * cpu_check_watchpoint:
+ * @cpu: cpu context
+ * @addr: guest virtual address
+ * @len: access length
+ * @attrs: memory access attributes
+ * @flags: watchpoint access type
+ * @ra: unwind return address
+ *
+ * Check for a watchpoint hit in [addr, addr+len) of the type
+ * specified by @flags. Exit via exception with a hit.
+ */
+void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs attrs, int flags, uintptr_t ra);
+
+/**
+ * cpu_watchpoint_address_matches:
+ * @cpu: cpu context
+ * @addr: guest virtual address
+ * @len: access length
+ *
+ * Return the watchpoint flags that apply to [addr, addr+len).
+ * If no watchpoint is registered for the range, the result is 0.
+ */
+int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
+
+#endif
+
+#endif /* TCG_CPU_OPS_H */
diff --git a/include/hw/cpu/a15mpcore.h b/include/hw/cpu/a15mpcore.h
index b423533d20..75d39e5458 100644
--- a/include/hw/cpu/a15mpcore.h
+++ b/include/hw/cpu/a15mpcore.h
@@ -22,14 +22,14 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gic.h"
+#include "qom/object.h"
/* A15MP private memory region. */
#define TYPE_A15MPCORE_PRIV "a15mpcore_priv"
-#define A15MPCORE_PRIV(obj) \
- OBJECT_CHECK(A15MPPrivState, (obj), TYPE_A15MPCORE_PRIV)
+OBJECT_DECLARE_SIMPLE_TYPE(A15MPPrivState, A15MPCORE_PRIV)
-typedef struct A15MPPrivState {
+struct A15MPPrivState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -39,6 +39,6 @@ typedef struct A15MPPrivState {
MemoryRegion container;
GICState gic;
-} A15MPPrivState;
+};
#endif
diff --git a/include/hw/cpu/a9mpcore.h b/include/hw/cpu/a9mpcore.h
index 5d67ca22c4..e0396ab6af 100644
--- a/include/hw/cpu/a9mpcore.h
+++ b/include/hw/cpu/a9mpcore.h
@@ -15,12 +15,12 @@
#include "hw/misc/a9scu.h"
#include "hw/timer/arm_mptimer.h"
#include "hw/timer/a9gtimer.h"
+#include "qom/object.h"
#define TYPE_A9MPCORE_PRIV "a9mpcore_priv"
-#define A9MPCORE_PRIV(obj) \
- OBJECT_CHECK(A9MPPrivState, (obj), TYPE_A9MPCORE_PRIV)
+OBJECT_DECLARE_SIMPLE_TYPE(A9MPPrivState, A9MPCORE_PRIV)
-typedef struct A9MPPrivState {
+struct A9MPPrivState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -34,6 +34,6 @@ typedef struct A9MPPrivState {
A9GTimerState gtimer;
ARMMPTimerState mptimer;
ARMMPTimerState wdt;
-} A9MPPrivState;
+};
#endif
diff --git a/include/hw/cpu/arm11mpcore.h b/include/hw/cpu/arm11mpcore.h
index 6196109ca2..2cac8c1232 100644
--- a/include/hw/cpu/arm11mpcore.h
+++ b/include/hw/cpu/arm11mpcore.h
@@ -14,12 +14,12 @@
#include "hw/misc/arm11scu.h"
#include "hw/intc/arm_gic.h"
#include "hw/timer/arm_mptimer.h"
+#include "qom/object.h"
#define TYPE_ARM11MPCORE_PRIV "arm11mpcore_priv"
-#define ARM11MPCORE_PRIV(obj) \
- OBJECT_CHECK(ARM11MPCorePriveState, (obj), TYPE_ARM11MPCORE_PRIV)
+OBJECT_DECLARE_SIMPLE_TYPE(ARM11MPCorePriveState, ARM11MPCORE_PRIV)
-typedef struct ARM11MPCorePriveState {
+struct ARM11MPCorePriveState {
SysBusDevice parent_obj;
uint32_t num_cpu;
@@ -30,6 +30,6 @@ typedef struct ARM11MPCorePriveState {
GICState gic;
ARMMPTimerState mptimer;
ARMMPTimerState wdtimer;
-} ARM11MPCorePriveState;
+};
#endif
diff --git a/include/hw/cpu/cluster.h b/include/hw/cpu/cluster.h
index 7381823243..53fbf36af5 100644
--- a/include/hw/cpu/cluster.h
+++ b/include/hw/cpu/cluster.h
@@ -20,8 +20,8 @@
#ifndef HW_CPU_CLUSTER_H
#define HW_CPU_CLUSTER_H
-#include "qemu/osdep.h"
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
/*
* CPU Cluster type
@@ -34,11 +34,34 @@
* Arm big.LITTLE system) they should be in different clusters. If the CPUs do
* not have the same view of memory (for example the main CPU and a management
* controller processor) they should be in different clusters.
+ *
+ * A cluster is created by creating an object of TYPE_CPU_CLUSTER, and then
+ * adding the CPUs to it as QOM child objects (e.g. using the
+ * object_initialize_child() or object_property_add_child() functions).
+ * The CPUs may be either direct children of the cluster object, or indirect
+ * children (e.g. children of children of the cluster object).
+ *
+ * All CPUs must be added as children before the cluster is realized.
+ * (Regrettably QOM provides no way to prevent adding children to a realized
+ * object and no way for the parent to be notified when a new child is added
+ * to it, so this restriction is not checked for, but the system will not
+ * behave correctly if it is not adhered to. The cluster will assert that
+ * it contains at least one CPU, which should catch most inadvertent
+ * violations of this constraint.)
+ *
+ * A CPU which is not put into any cluster will be considered implicitly
+ * to be in a cluster with all the other "loose" CPUs, so all CPUs that are
+ * not assigned to clusters must be identical.
*/
#define TYPE_CPU_CLUSTER "cpu-cluster"
-#define CPU_CLUSTER(obj) \
- OBJECT_CHECK(CPUClusterState, (obj), TYPE_CPU_CLUSTER)
+OBJECT_DECLARE_SIMPLE_TYPE(CPUClusterState, CPU_CLUSTER)
+
+/*
+ * This limit is imposed by TCG, which puts the cluster ID into an
+ * 8 bit field (and uses all-1s for the default "not in any cluster").
+ */
+#define MAX_CLUSTERS 255
/**
* CPUClusterState:
@@ -47,12 +70,12 @@
*
* State of a CPU cluster.
*/
-typedef struct CPUClusterState {
+struct CPUClusterState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
uint32_t cluster_id;
-} CPUClusterState;
+};
#endif
diff --git a/include/hw/cpu/core.h b/include/hw/cpu/core.h
index b7470644d8..98ab91647e 100644
--- a/include/hw/cpu/core.h
+++ b/include/hw/cpu/core.h
@@ -9,21 +9,21 @@
#ifndef HW_CPU_CORE_H
#define HW_CPU_CORE_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
#define TYPE_CPU_CORE "cpu-core"
-#define CPU_CORE(obj) \
- OBJECT_CHECK(CPUCore, (obj), TYPE_CPU_CORE)
+OBJECT_DECLARE_SIMPLE_TYPE(CPUCore, CPU_CORE)
-typedef struct CPUCore {
+struct CPUCore {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
int core_id;
int nr_threads;
-} CPUCore;
+};
/* Note: topology field names need to be kept in sync with
* 'CpuInstanceProperties' */
diff --git a/include/hw/cris/etraxfs.h b/include/hw/cris/etraxfs.h
index 8da965addb..012c4e9974 100644
--- a/include/hw/cris/etraxfs.h
+++ b/include/hw/cris/etraxfs.h
@@ -27,24 +27,13 @@
#include "net/net.h"
#include "hw/cris/etraxfs_dma.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "qapi/error.h"
-/* Instantiate an ETRAXFS Ethernet MAC. */
-static inline DeviceState *
-etraxfs_eth_init(NICInfo *nd, hwaddr base, int phyaddr,
- void *dma_out, void *dma_in)
-{
- DeviceState *dev;
- qemu_check_nic_model(nd, "fseth");
-
- dev = qdev_create(NULL, "etraxfs-eth");
- qdev_set_nic_properties(dev, nd);
- qdev_prop_set_uint32(dev, "phyaddr", phyaddr);
- qdev_prop_set_ptr(dev, "dma_out", dma_out);
- qdev_prop_set_ptr(dev, "dma_in", dma_in);
- qdev_init_nofail(dev);
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
- return dev;
-}
+DeviceState *etraxfs_eth_init(hwaddr base, int phyaddr,
+ struct etraxfs_dma_client *dma_out,
+ struct etraxfs_dma_client *dma_in);
static inline DeviceState *etraxfs_ser_create(hwaddr addr,
qemu_irq irq,
@@ -53,10 +42,10 @@ static inline DeviceState *etraxfs_ser_create(hwaddr addr,
DeviceState *dev;
SysBusDevice *s;
- dev = qdev_create(NULL, "etraxfs,serial");
+ dev = qdev_new("etraxfs-serial");
s = SYS_BUS_DEVICE(dev);
qdev_prop_set_chr(dev, "chardev", chr);
- qdev_init_nofail(dev);
+ sysbus_realize_and_unref(s, &error_fatal);
sysbus_mmio_map(s, 0, addr);
sysbus_connect_irq(s, 0, irq);
return dev;
diff --git a/include/hw/cris/etraxfs_dma.h b/include/hw/cris/etraxfs_dma.h
index f6f33e0980..095d76b956 100644
--- a/include/hw/cris/etraxfs_dma.h
+++ b/include/hw/cris/etraxfs_dma.h
@@ -1,6 +1,8 @@
#ifndef HW_ETRAXFS_DMA_H
#define HW_ETRAXFS_DMA_H
+#include "exec/hwaddr.h"
+
struct dma_context_metadata {
/* data descriptor md */
uint16_t metadata;
diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h
new file mode 100644
index 0000000000..75e47b6864
--- /dev/null
+++ b/include/hw/cxl/cxl.h
@@ -0,0 +1,70 @@
+/*
+ * QEMU CXL Support
+ *
+ * Copyright (c) 2020 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_H
+#define CXL_H
+
+
+#include "qapi/qapi-types-machine.h"
+#include "qapi/qapi-visit-machine.h"
+#include "hw/pci/pci_host.h"
+#include "cxl_pci.h"
+#include "cxl_component.h"
+#include "cxl_device.h"
+
+#define CXL_CACHE_LINE_SIZE 64
+#define CXL_COMPONENT_REG_BAR_IDX 0
+#define CXL_DEVICE_REG_BAR_IDX 2
+
+#define CXL_WINDOW_MAX 10
+
+typedef struct PXBCXLDev PXBCXLDev;
+
+typedef struct CXLFixedWindow {
+ uint64_t size;
+ char **targets;
+ PXBCXLDev *target_hbs[16];
+ uint8_t num_targets;
+ uint8_t enc_int_ways;
+ uint8_t enc_int_gran;
+ /* Todo: XOR based interleaving */
+ MemoryRegion mr;
+ hwaddr base;
+} CXLFixedWindow;
+
+typedef struct CXLState {
+ bool is_enabled;
+ MemoryRegion host_mr;
+ unsigned int next_mr_idx;
+ GList *fixed_windows;
+ CXLFixedMemoryWindowOptionsList *cfmw_list;
+} CXLState;
+
+struct CXLHost {
+ PCIHostState parent_obj;
+
+ CXLComponentState cxl_cstate;
+ bool passthrough;
+};
+
+#define TYPE_PXB_CXL_HOST "pxb-cxl-host"
+OBJECT_DECLARE_SIMPLE_TYPE(CXLHost, PXB_CXL_HOST)
+
+#define TYPE_CXL_USP "cxl-upstream"
+
+typedef struct CXLUpstreamPort CXLUpstreamPort;
+DECLARE_INSTANCE_CHECKER(CXLUpstreamPort, CXL_USP, TYPE_CXL_USP)
+CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp);
+
+#define TYPE_CXL_DSP "cxl-downstream"
+
+typedef struct CXLDownstreamPort CXLDownstreamPort;
+DECLARE_INSTANCE_CHECKER(CXLDownstreamPort, CXL_DSP, TYPE_CXL_DSP)
+
+#endif
diff --git a/include/hw/cxl/cxl_cdat.h b/include/hw/cxl/cxl_cdat.h
new file mode 100644
index 0000000000..17a09066dc
--- /dev/null
+++ b/include/hw/cxl/cxl_cdat.h
@@ -0,0 +1,172 @@
+/*
+ * CXL CDAT Structure
+ *
+ * Copyright (C) 2021 Avery Design Systems, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_CDAT_H
+#define CXL_CDAT_H
+
+#include "hw/cxl/cxl_pci.h"
+#include "hw/pci/pcie_doe.h"
+
+/*
+ * Reference:
+ * Coherent Device Attribute Table (CDAT) Specification, Rev. 1.03, July. 2022
+ * Compute Express Link (CXL) Specification, Rev. 3.1, Aug. 2023
+ */
+/* Table Access DOE - CXL r3.1 8.1.11 */
+#define CXL_DOE_TABLE_ACCESS 2
+#define CXL_DOE_PROTOCOL_CDAT ((CXL_DOE_TABLE_ACCESS << 16) | CXL_VENDOR_ID)
+
+/* Read Entry - CXL r3.1 8.1.11.1 */
+#define CXL_DOE_TAB_TYPE_CDAT 0
+#define CXL_DOE_TAB_ENT_MAX 0xFFFF
+
+/* Read Entry Request - CXL r3.1 8.1.11.1 Table 8-13 */
+#define CXL_DOE_TAB_REQ 0
+typedef struct CDATReq {
+ DOEHeader header;
+ uint8_t req_code;
+ uint8_t table_type;
+ uint16_t entry_handle;
+} QEMU_PACKED CDATReq;
+
+/* Read Entry Response - CXL r3.1 8.1.11.1 Table 8-14 */
+#define CXL_DOE_TAB_RSP 0
+typedef struct CDATRsp {
+ DOEHeader header;
+ uint8_t rsp_code;
+ uint8_t table_type;
+ uint16_t entry_handle;
+} QEMU_PACKED CDATRsp;
+
+/* CDAT Table Format - CDAT Table 1 */
+#define CXL_CDAT_REV 2
+typedef struct CDATTableHeader {
+ uint32_t length;
+ uint8_t revision;
+ uint8_t checksum;
+ uint8_t reserved[6];
+ uint32_t sequence;
+} QEMU_PACKED CDATTableHeader;
+
+/* CDAT Structure Types - CDAT Table 2 */
+typedef enum {
+ CDAT_TYPE_DSMAS = 0,
+ CDAT_TYPE_DSLBIS = 1,
+ CDAT_TYPE_DSMSCIS = 2,
+ CDAT_TYPE_DSIS = 3,
+ CDAT_TYPE_DSEMTS = 4,
+ CDAT_TYPE_SSLBIS = 5,
+} CDATType;
+
+typedef struct CDATSubHeader {
+ uint8_t type;
+ uint8_t reserved;
+ uint16_t length;
+} CDATSubHeader;
+
+/* Device Scoped Memory Affinity Structure - CDAT Table 3 */
+typedef struct CDATDsmas {
+ CDATSubHeader header;
+ uint8_t DSMADhandle;
+ uint8_t flags;
+#define CDAT_DSMAS_FLAG_NV (1 << 2)
+#define CDAT_DSMAS_FLAG_SHAREABLE (1 << 3)
+#define CDAT_DSMAS_FLAG_HW_COHERENT (1 << 4)
+#define CDAT_DSMAS_FLAG_DYNAMIC_CAP (1 << 5)
+ uint16_t reserved;
+ uint64_t DPA_base;
+ uint64_t DPA_length;
+} CDATDsmas;
+QEMU_BUILD_BUG_ON(sizeof(CDATDsmas) != 24);
+
+/* Device Scoped Latency and Bandwidth Information Structure - CDAT Table 5 */
+typedef struct CDATDslbis {
+ CDATSubHeader header;
+ uint8_t handle;
+ /* Definitions of these fields refer directly to HMAT fields */
+ uint8_t flags;
+ uint8_t data_type;
+ uint8_t reserved;
+ uint64_t entry_base_unit;
+ uint16_t entry[3];
+ uint16_t reserved2;
+} CDATDslbis;
+QEMU_BUILD_BUG_ON(sizeof(CDATDslbis) != 24);
+
+/* Device Scoped Memory Side Cache Information Structure - CDAT Table 6 */
+typedef struct CDATDsmscis {
+ CDATSubHeader header;
+ uint8_t DSMAS_handle;
+ uint8_t reserved[3];
+ uint64_t memory_side_cache_size;
+ uint32_t cache_attributes;
+} QEMU_PACKED CDATDsmscis;
+
+/* Device Scoped Initiator Structure - CDAT Table 7 */
+typedef struct CDATDsis {
+ CDATSubHeader header;
+ uint8_t flags;
+ uint8_t handle;
+ uint16_t reserved;
+} QEMU_PACKED CDATDsis;
+
+/* Device Scoped EFI Memory Type Structure - CDAT Table 8 */
+typedef struct CDATDsemts {
+ CDATSubHeader header;
+ uint8_t DSMAS_handle;
+ uint8_t EFI_memory_type_attr;
+ uint16_t reserved;
+ uint64_t DPA_offset;
+ uint64_t DPA_length;
+} CDATDsemts;
+QEMU_BUILD_BUG_ON(sizeof(CDATDsemts) != 24);
+
+/* Switch Scoped Latency and Bandwidth Information Structure - CDAT Table 9 */
+typedef struct CDATSslbisHeader {
+ CDATSubHeader header;
+ uint8_t data_type;
+ uint8_t reserved[3];
+ uint64_t entry_base_unit;
+} CDATSslbisHeader;
+QEMU_BUILD_BUG_ON(sizeof(CDATSslbisHeader) != 16);
+
+#define CDAT_PORT_ID_USP 0x100
+/* Switch Scoped Latency and Bandwidth Entry - CDAT Table 10 */
+typedef struct CDATSslbe {
+ uint16_t port_x_id;
+ uint16_t port_y_id;
+ uint16_t latency_bandwidth;
+ uint16_t reserved;
+} CDATSslbe;
+QEMU_BUILD_BUG_ON(sizeof(CDATSslbe) != 8);
+
+typedef struct CDATSslbis {
+ CDATSslbisHeader sslbis_header;
+ CDATSslbe sslbe[];
+} CDATSslbis;
+
+typedef struct CDATEntry {
+ void *base;
+ uint32_t length;
+} CDATEntry;
+
+typedef struct CDATObject {
+ CDATEntry *entry;
+ int entry_len;
+
+ int (*build_cdat_table)(CDATSubHeader ***cdat_table, void *priv);
+ void (*free_cdat_table)(CDATSubHeader **cdat_table, int num, void *priv);
+ bool to_update;
+ void *private;
+ char *filename;
+ uint8_t *buf;
+ struct CDATSubHeader **built_buf;
+ int built_buf_len;
+} CDATObject;
+#endif /* CXL_CDAT_H */
diff --git a/include/hw/cxl/cxl_component.h b/include/hw/cxl/cxl_component.h
new file mode 100644
index 0000000000..5012fab6f7
--- /dev/null
+++ b/include/hw/cxl/cxl_component.h
@@ -0,0 +1,280 @@
+/*
+ * QEMU CXL Component
+ *
+ * Copyright (c) 2020 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_COMPONENT_H
+#define CXL_COMPONENT_H
+
+/* CXL r3.1 Section 8.2.4: CXL.cache and CXL.mem Registers */
+#define CXL2_COMPONENT_IO_REGION_SIZE 0x1000
+#define CXL2_COMPONENT_CM_REGION_SIZE 0x1000
+#define CXL2_COMPONENT_BLOCK_SIZE 0x10000
+
+#include "qemu/range.h"
+#include "hw/cxl/cxl_cdat.h"
+#include "hw/register.h"
+#include "qapi/error.h"
+
+enum reg_type {
+ CXL2_DEVICE,
+ CXL2_TYPE3_DEVICE,
+ CXL2_LOGICAL_DEVICE,
+ CXL2_ROOT_PORT,
+ CXL2_RC,
+ CXL2_UPSTREAM_PORT,
+ CXL2_DOWNSTREAM_PORT,
+ CXL3_SWITCH_MAILBOX_CCI,
+};
+
+/*
+ * Capability registers are defined at the top of the CXL.cache/mem region and
+ * are packed. For our purposes we will always define the caps in the same
+ * order.
+ * CXL r3.1 Table 8-22: CXL_CAPABILITY_ID Assignment for details.
+ */
+
+/* CXL r3.1 Section 8.2.4.1: CXL Capability Header Register */
+#define CXL_CAPABILITY_VERSION 1
+REG32(CXL_CAPABILITY_HEADER, 0)
+ FIELD(CXL_CAPABILITY_HEADER, ID, 0, 16)
+ FIELD(CXL_CAPABILITY_HEADER, VERSION, 16, 4)
+ FIELD(CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 20, 4)
+ FIELD(CXL_CAPABILITY_HEADER, ARRAY_SIZE, 24, 8)
+
+#define CXLx_CAPABILITY_HEADER(type, offset) \
+ REG32(CXL_##type##_CAPABILITY_HEADER, offset) \
+ FIELD(CXL_##type##_CAPABILITY_HEADER, ID, 0, 16) \
+ FIELD(CXL_##type##_CAPABILITY_HEADER, VERSION, 16, 4) \
+ FIELD(CXL_##type##_CAPABILITY_HEADER, PTR, 20, 12)
+CXLx_CAPABILITY_HEADER(RAS, 0x4)
+CXLx_CAPABILITY_HEADER(LINK, 0x8)
+CXLx_CAPABILITY_HEADER(HDM, 0xc)
+CXLx_CAPABILITY_HEADER(EXTSEC, 0x10)
+CXLx_CAPABILITY_HEADER(SNOOP, 0x14)
+
+/*
+ * Capability structures contain the actual registers that the CXL component
+ * implements. Some of these are specific to certain types of components, but
+ * this implementation leaves enough space regardless.
+ */
+
+/* CXL r3.1 Section 8.2.4.17: CXL RAS Capability Structure */
+#define CXL_RAS_CAPABILITY_VERSION 3
+/* Give ample space for caps before this */
+#define CXL_RAS_REGISTERS_OFFSET 0x80
+#define CXL_RAS_REGISTERS_SIZE 0x58
+REG32(CXL_RAS_UNC_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET)
+#define CXL_RAS_UNC_ERR_CACHE_DATA_PARITY 0
+#define CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY 1
+#define CXL_RAS_UNC_ERR_CACHE_BE_PARITY 2
+#define CXL_RAS_UNC_ERR_CACHE_DATA_ECC 3
+#define CXL_RAS_UNC_ERR_MEM_DATA_PARITY 4
+#define CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY 5
+#define CXL_RAS_UNC_ERR_MEM_BE_PARITY 6
+#define CXL_RAS_UNC_ERR_MEM_DATA_ECC 7
+#define CXL_RAS_UNC_ERR_REINIT_THRESHOLD 8
+#define CXL_RAS_UNC_ERR_RSVD_ENCODING 9
+#define CXL_RAS_UNC_ERR_POISON_RECEIVED 10
+#define CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW 11
+#define CXL_RAS_UNC_ERR_INTERNAL 14
+#define CXL_RAS_UNC_ERR_CXL_IDE_TX 15
+#define CXL_RAS_UNC_ERR_CXL_IDE_RX 16
+#define CXL_RAS_UNC_ERR_CXL_UNUSED 63 /* Magic value */
+REG32(CXL_RAS_UNC_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x4)
+REG32(CXL_RAS_UNC_ERR_SEVERITY, CXL_RAS_REGISTERS_OFFSET + 0x8)
+REG32(CXL_RAS_COR_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET + 0xc)
+#define CXL_RAS_COR_ERR_CACHE_DATA_ECC 0
+#define CXL_RAS_COR_ERR_MEM_DATA_ECC 1
+#define CXL_RAS_COR_ERR_CRC_THRESHOLD 2
+#define CXL_RAS_COR_ERR_RETRY_THRESHOLD 3
+#define CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED 4
+#define CXL_RAS_COR_ERR_MEM_POISON_RECEIVED 5
+#define CXL_RAS_COR_ERR_PHYSICAL 6
+REG32(CXL_RAS_COR_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x10)
+REG32(CXL_RAS_ERR_CAP_CTRL, CXL_RAS_REGISTERS_OFFSET + 0x14)
+ FIELD(CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER, 0, 6)
+ FIELD(CXL_RAS_ERR_CAP_CTRL, MULTIPLE_HEADER_RECORDING_CAP, 9, 1)
+ FIELD(CXL_RAS_ERR_POISON_ENABLED, POISON_ENABLED, 13, 1)
+REG32(CXL_RAS_ERR_HEADER0, CXL_RAS_REGISTERS_OFFSET + 0x18)
+#define CXL_RAS_ERR_HEADER_NUM 32
+/* Offset 0x18 - 0x58 reserved for RAS logs */
+
+/* CXL r3.1 Section 8.2.4.18: CXL Security Capability Structure */
+#define CXL_SEC_REGISTERS_OFFSET \
+ (CXL_RAS_REGISTERS_OFFSET + CXL_RAS_REGISTERS_SIZE)
+#define CXL_SEC_REGISTERS_SIZE 0 /* We don't implement 1.1 downstream ports */
+
+/* CXL r3.1 Section 8.2.4.19: CXL Link Capability Structure */
+#define CXL_LINK_CAPABILITY_VERSION 2
+#define CXL_LINK_REGISTERS_OFFSET \
+ (CXL_SEC_REGISTERS_OFFSET + CXL_SEC_REGISTERS_SIZE)
+#define CXL_LINK_REGISTERS_SIZE 0x50
+
+/* CXL r3.1 Section 8.2.4.20: CXL HDM Decoder Capability Structure */
+#define HDM_DECODE_MAX 10 /* Maximum decoders for Devices */
+#define CXL_HDM_CAPABILITY_VERSION 3
+#define CXL_HDM_REGISTERS_OFFSET \
+ (CXL_LINK_REGISTERS_OFFSET + CXL_LINK_REGISTERS_SIZE)
+#define CXL_HDM_REGISTERS_SIZE (0x10 + 0x20 * HDM_DECODE_MAX)
+#define HDM_DECODER_INIT(n) \
+ REG32(CXL_HDM_DECODER##n##_BASE_LO, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x10) \
+ FIELD(CXL_HDM_DECODER##n##_BASE_LO, L, 28, 4) \
+ REG32(CXL_HDM_DECODER##n##_BASE_HI, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x14) \
+ REG32(CXL_HDM_DECODER##n##_SIZE_LO, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x18) \
+ REG32(CXL_HDM_DECODER##n##_SIZE_HI, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x1C) \
+ REG32(CXL_HDM_DECODER##n##_CTRL, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x20) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, IG, 0, 4) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, IW, 4, 4) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, LOCK_ON_COMMIT, 8, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, COMMIT, 9, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, COMMITTED, 10, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, ERR, 11, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, TYPE, 12, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, BI, 13, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, UIO, 14, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, UIG, 16, 4) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, UIW, 20, 4) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, ISP, 24, 4) \
+ REG32(CXL_HDM_DECODER##n##_TARGET_LIST_LO, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x24) \
+ REG32(CXL_HDM_DECODER##n##_TARGET_LIST_HI, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x28) \
+ REG32(CXL_HDM_DECODER##n##_DPA_SKIP_LO, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x24) \
+ REG32(CXL_HDM_DECODER##n##_DPA_SKIP_HI, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x28)
+
+REG32(CXL_HDM_DECODER_CAPABILITY, CXL_HDM_REGISTERS_OFFSET)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT, 0, 4)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 4, 4)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 8, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 9, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 10, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 11, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, 16_WAY, 12, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, UIO, 13, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, UIO_DECODER_COUNT, 16, 4)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, MEMDATA_NXM_CAP, 20, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, SUPPORTED_COHERENCY_MODEL, 21, 2)
+REG32(CXL_HDM_DECODER_GLOBAL_CONTROL, CXL_HDM_REGISTERS_OFFSET + 4)
+ FIELD(CXL_HDM_DECODER_GLOBAL_CONTROL, POISON_ON_ERR_EN, 0, 1)
+ FIELD(CXL_HDM_DECODER_GLOBAL_CONTROL, HDM_DECODER_ENABLE, 1, 1)
+
+/* Support 4 decoders at all levels of topology */
+#define CXL_HDM_DECODER_COUNT 4
+
+HDM_DECODER_INIT(0);
+HDM_DECODER_INIT(1);
+HDM_DECODER_INIT(2);
+HDM_DECODER_INIT(3);
+
+/*
+ * CXL r3.1 Section 8.2.4.21: CXL Extended Security Capability Structure
+ * (Root complex only)
+ */
+#define EXTSEC_ENTRY_MAX 256
+#define CXL_EXTSEC_CAP_VERSION 2
+#define CXL_EXTSEC_REGISTERS_OFFSET \
+ (CXL_HDM_REGISTERS_OFFSET + CXL_HDM_REGISTERS_SIZE)
+#define CXL_EXTSEC_REGISTERS_SIZE (8 * EXTSEC_ENTRY_MAX + 4)
+
+/* CXL r3.1 Section 8.2.4.22: CXL IDE Capability Structure */
+#define CXL_IDE_CAP_VERSION 2
+#define CXL_IDE_REGISTERS_OFFSET \
+ (CXL_EXTSEC_REGISTERS_OFFSET + CXL_EXTSEC_REGISTERS_SIZE)
+#define CXL_IDE_REGISTERS_SIZE 0x24
+
+/* CXL r3.1 Section 8.2.4.23 - CXL Snoop Filter Capability Structure */
+#define CXL_SNOOP_CAP_VERSION 1
+#define CXL_SNOOP_REGISTERS_OFFSET \
+ (CXL_IDE_REGISTERS_OFFSET + CXL_IDE_REGISTERS_SIZE)
+#define CXL_SNOOP_REGISTERS_SIZE 0x8
+
+QEMU_BUILD_BUG_MSG((CXL_SNOOP_REGISTERS_OFFSET +
+ CXL_SNOOP_REGISTERS_SIZE) >= 0x1000,
+ "No space for registers");
+
+typedef struct component_registers {
+ /*
+ * Main memory region to be registered with QEMU core.
+ */
+ MemoryRegion component_registers;
+
+ /*
+ * CXL r3.1 Table 8-21: CXL Subsystem Component Register Ranges
+ * 0x0000 - 0x0fff CXL.io registers
+ * 0x1000 - 0x1fff CXL.cache and CXL.mem
+ * 0x2000 - 0xdfff Implementation specific
+ * 0xe000 - 0xe3ff CXL ARB/MUX registers
+ * 0xe400 - 0xffff RSVD
+ */
+ uint32_t io_registers[CXL2_COMPONENT_IO_REGION_SIZE >> 2];
+ MemoryRegion io;
+
+ uint32_t cache_mem_registers[CXL2_COMPONENT_CM_REGION_SIZE >> 2];
+ uint32_t cache_mem_regs_write_mask[CXL2_COMPONENT_CM_REGION_SIZE >> 2];
+ MemoryRegion cache_mem;
+
+ MemoryRegion impl_specific;
+ MemoryRegion arb_mux;
+ MemoryRegion rsvd;
+
+ /* special_ops is used for any component that needs any specific handling */
+ MemoryRegionOps *special_ops;
+} ComponentRegisters;
+
+/*
+ * A CXL component represents all entities in a CXL hierarchy. This includes,
+ * host bridges, root ports, upstream/downstream switch ports, and devices
+ */
+typedef struct cxl_component {
+ ComponentRegisters crb;
+ union {
+ struct {
+ Range dvsecs[CXL20_MAX_DVSEC];
+ uint16_t dvsec_offset;
+ struct PCIDevice *pdev;
+ };
+ };
+
+ CDATObject cdat;
+} CXLComponentState;
+
+void cxl_component_register_block_init(Object *obj,
+ CXLComponentState *cxl_cstate,
+ const char *type);
+void cxl_component_register_init_common(uint32_t *reg_state,
+ uint32_t *write_msk,
+ enum reg_type type);
+
+void cxl_component_create_dvsec(CXLComponentState *cxl_cstate,
+ enum reg_type cxl_dev_type, uint16_t length,
+ uint16_t type, uint8_t rev, uint8_t *body);
+
+int cxl_decoder_count_enc(int count);
+int cxl_decoder_count_dec(int enc_cnt);
+
+uint8_t cxl_interleave_ways_enc(int iw, Error **errp);
+int cxl_interleave_ways_dec(uint8_t iw_enc, Error **errp);
+uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp);
+
+hwaddr cxl_decode_ig(int ig);
+
+CXLComponentState *cxl_get_hb_cstate(PCIHostState *hb);
+bool cxl_get_hb_passthrough(PCIHostState *hb);
+
+void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp);
+void cxl_doe_cdat_release(CXLComponentState *cxl_cstate);
+void cxl_doe_cdat_update(CXLComponentState *cxl_cstate, Error **errp);
+
+#endif
diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h
new file mode 100644
index 0000000000..279b276bda
--- /dev/null
+++ b/include/hw/cxl/cxl_device.h
@@ -0,0 +1,506 @@
+/*
+ * QEMU CXL Devices
+ *
+ * Copyright (c) 2020 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_DEVICE_H
+#define CXL_DEVICE_H
+
+#include "hw/cxl/cxl_component.h"
+#include "hw/pci/pci_device.h"
+#include "hw/register.h"
+#include "hw/cxl/cxl_events.h"
+
+/*
+ * The following is how a CXL device's Memory Device registers are laid out.
+ * The only requirement from the spec is that the capabilities array and the
+ * capability headers start at offset 0 and are contiguously packed. The headers
+ * themselves provide offsets to the register fields. For this emulation, the
+ * actual registers * will start at offset 0x80 (m == 0x80). No secondary
+ * mailbox is implemented which means that the offset of the start of the
+ * mailbox payload (n) is given by
+ * n = m + sizeof(mailbox registers) + sizeof(device registers).
+ *
+ * +---------------------------------+
+ * | |
+ * | Memory Device Registers |
+ * | |
+ * n + PAYLOAD_SIZE_MAX -----------------------------------
+ * ^ | |
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | | Mailbox Payload |
+ * | | |
+ * | | |
+ * | | |
+ * n -----------------------------------
+ * ^ | Mailbox Registers |
+ * | | |
+ * | -----------------------------------
+ * | | |
+ * | | Device Registers |
+ * | | |
+ * m ---------------------------------->
+ * ^ | Memory Device Capability Header|
+ * | -----------------------------------
+ * | | Mailbox Capability Header |
+ * | -----------------------------------
+ * | | Device Capability Header |
+ * | -----------------------------------
+ * | | Device Cap Array Register |
+ * 0 +---------------------------------+
+ *
+ */
+
+/* CXL r3.1 Figure 8-12: CXL Device Registers */
+#define CXL_DEVICE_CAP_HDR1_OFFSET 0x10
+/* CXL r3.1 Section 8.2.8.2: CXL Device Capability Header Register */
+#define CXL_DEVICE_CAP_REG_SIZE 0x10
+
+/*
+ * CXL r3.1 Section 8.2.8.2.1: CXL Device Capabilities +
+ * CXL r3.1 Section 8.2.8.5: Memory Device Capabilities
+ */
+#define CXL_DEVICE_CAPS_MAX 4
+#define CXL_CAPS_SIZE \
+ (CXL_DEVICE_CAP_REG_SIZE * (CXL_DEVICE_CAPS_MAX + 1)) /* +1 for header */
+
+#define CXL_DEVICE_STATUS_REGISTERS_OFFSET 0x80 /* Read comment above */
+/*
+ * CXL r3.1 Section 8.2.8.3: Device Status Registers
+ * As it is the only Device Status Register in CXL r3.1
+ */
+#define CXL_DEVICE_STATUS_REGISTERS_LENGTH 0x8
+
+#define CXL_MAILBOX_REGISTERS_OFFSET \
+ (CXL_DEVICE_STATUS_REGISTERS_OFFSET + CXL_DEVICE_STATUS_REGISTERS_LENGTH)
+/* CXL r3.1 Figure 8-13: Mailbox Registers */
+#define CXL_MAILBOX_REGISTERS_SIZE 0x20
+#define CXL_MAILBOX_PAYLOAD_SHIFT 11
+#define CXL_MAILBOX_MAX_PAYLOAD_SIZE (1 << CXL_MAILBOX_PAYLOAD_SHIFT)
+#define CXL_MAILBOX_REGISTERS_LENGTH \
+ (CXL_MAILBOX_REGISTERS_SIZE + CXL_MAILBOX_MAX_PAYLOAD_SIZE)
+
+#define CXL_MEMORY_DEVICE_REGISTERS_OFFSET \
+ (CXL_MAILBOX_REGISTERS_OFFSET + CXL_MAILBOX_REGISTERS_LENGTH)
+#define CXL_MEMORY_DEVICE_REGISTERS_LENGTH 0x8
+
+#define CXL_MMIO_SIZE \
+ (CXL_DEVICE_CAP_REG_SIZE + CXL_DEVICE_STATUS_REGISTERS_LENGTH + \
+ CXL_MAILBOX_REGISTERS_LENGTH + CXL_MEMORY_DEVICE_REGISTERS_LENGTH)
+
+/* CXL r3.1 Table 8-34: Command Return Codes */
+typedef enum {
+ CXL_MBOX_SUCCESS = 0x0,
+ CXL_MBOX_BG_STARTED = 0x1,
+ CXL_MBOX_INVALID_INPUT = 0x2,
+ CXL_MBOX_UNSUPPORTED = 0x3,
+ CXL_MBOX_INTERNAL_ERROR = 0x4,
+ CXL_MBOX_RETRY_REQUIRED = 0x5,
+ CXL_MBOX_BUSY = 0x6,
+ CXL_MBOX_MEDIA_DISABLED = 0x7,
+ CXL_MBOX_FW_XFER_IN_PROGRESS = 0x8,
+ CXL_MBOX_FW_XFER_OUT_OF_ORDER = 0x9,
+ CXL_MBOX_FW_AUTH_FAILED = 0xa,
+ CXL_MBOX_FW_INVALID_SLOT = 0xb,
+ CXL_MBOX_FW_ROLLEDBACK = 0xc,
+ CXL_MBOX_FW_REST_REQD = 0xd,
+ CXL_MBOX_INVALID_HANDLE = 0xe,
+ CXL_MBOX_INVALID_PA = 0xf,
+ CXL_MBOX_INJECT_POISON_LIMIT = 0x10,
+ CXL_MBOX_PERMANENT_MEDIA_FAILURE = 0x11,
+ CXL_MBOX_ABORTED = 0x12,
+ CXL_MBOX_INVALID_SECURITY_STATE = 0x13,
+ CXL_MBOX_INCORRECT_PASSPHRASE = 0x14,
+ CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15,
+ CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16,
+ CXL_MBOX_INVALID_LOG = 0x17,
+ CXL_MBOX_INTERRUPTED = 0x18,
+ CXL_MBOX_UNSUPPORTED_FEATURE_VERSION = 0x19,
+ CXL_MBOX_UNSUPPORTED_FEATURE_SELECTION_VALUE = 0x1a,
+ CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS = 0x1b,
+ CXL_MBOX_FEATURE_TRANSFER_OUT_OF_ORDER = 0x1c,
+ CXL_MBOX_RESOURCES_EXHAUSTED = 0x1d,
+ CXL_MBOX_INVALID_EXTENT_LIST = 0x1e,
+ CXL_MBOX_TRANSFER_OUT_OF_ORDER = 0x1f,
+ CXL_MBOX_REQUEST_ABORT_NOTSUP = 0x20,
+ CXL_MBOX_MAX = 0x20
+} CXLRetCode;
+
+typedef struct CXLCCI CXLCCI;
+typedef struct cxl_device_state CXLDeviceState;
+struct cxl_cmd;
+typedef CXLRetCode (*opcode_handler)(const struct cxl_cmd *cmd,
+ uint8_t *payload_in, size_t len_in,
+ uint8_t *payload_out, size_t *len_out,
+ CXLCCI *cci);
+struct cxl_cmd {
+ const char *name;
+ opcode_handler handler;
+ ssize_t in;
+ uint16_t effect; /* Reported in CEL */
+};
+
+typedef struct CXLEvent {
+ CXLEventRecordRaw data;
+ QSIMPLEQ_ENTRY(CXLEvent) node;
+} CXLEvent;
+
+typedef struct CXLEventLog {
+ uint16_t next_handle;
+ uint16_t overflow_err_count;
+ uint64_t first_overflow_timestamp;
+ uint64_t last_overflow_timestamp;
+ bool irq_enabled;
+ int irq_vec;
+ QemuMutex lock;
+ QSIMPLEQ_HEAD(, CXLEvent) events;
+} CXLEventLog;
+
+typedef struct CXLCCI {
+ const struct cxl_cmd (*cxl_cmd_set)[256];
+ struct cel_log {
+ uint16_t opcode;
+ uint16_t effect;
+ } cel_log[1 << 16];
+ size_t cel_size;
+
+ /* background command handling (times in ms) */
+ struct {
+ uint16_t opcode;
+ uint16_t complete_pct;
+ uint16_t ret_code; /* Current value of retcode */
+ uint64_t starttime;
+ /* set by each bg cmd, cleared by the bg_timer when complete */
+ uint64_t runtime;
+ QEMUTimer *timer;
+ } bg;
+ size_t payload_max;
+ /* Pointer to device hosting the CCI */
+ DeviceState *d;
+ /* Pointer to the device hosting the protocol conversion */
+ DeviceState *intf;
+} CXLCCI;
+
+typedef struct cxl_device_state {
+ MemoryRegion device_registers;
+
+ /* CXL r3.1 Section 8.2.8.3: Device Status Registers */
+ struct {
+ MemoryRegion device;
+ union {
+ uint8_t dev_reg_state[CXL_DEVICE_STATUS_REGISTERS_LENGTH];
+ uint16_t dev_reg_state16[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 2];
+ uint32_t dev_reg_state32[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 4];
+ uint64_t dev_reg_state64[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 8];
+ };
+ uint64_t event_status;
+ };
+ MemoryRegion memory_device;
+ struct {
+ MemoryRegion caps;
+ union {
+ uint32_t caps_reg_state32[CXL_CAPS_SIZE / 4];
+ uint64_t caps_reg_state64[CXL_CAPS_SIZE / 8];
+ };
+ };
+
+ /* CXL r3.1 Section 8.2.8.4: Mailbox Registers */
+ struct {
+ MemoryRegion mailbox;
+ uint16_t payload_size;
+ uint8_t mbox_msi_n;
+ union {
+ uint8_t mbox_reg_state[CXL_MAILBOX_REGISTERS_LENGTH];
+ uint16_t mbox_reg_state16[CXL_MAILBOX_REGISTERS_LENGTH / 2];
+ uint32_t mbox_reg_state32[CXL_MAILBOX_REGISTERS_LENGTH / 4];
+ uint64_t mbox_reg_state64[CXL_MAILBOX_REGISTERS_LENGTH / 8];
+ };
+ };
+
+ /* Stash the memory device status value */
+ uint64_t memdev_status;
+
+ struct {
+ bool set;
+ uint64_t last_set;
+ uint64_t host_set;
+ } timestamp;
+
+ /* memory region size, HDM */
+ uint64_t mem_size;
+ uint64_t pmem_size;
+ uint64_t vmem_size;
+
+ const struct cxl_cmd (*cxl_cmd_set)[256];
+ CXLEventLog event_logs[CXL_EVENT_TYPE_MAX];
+} CXLDeviceState;
+
+/* Initialize the register block for a device */
+void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev,
+ CXLCCI *cci);
+
+typedef struct CXLType3Dev CXLType3Dev;
+typedef struct CSWMBCCIDev CSWMBCCIDev;
+/* Set up default values for the register block */
+void cxl_device_register_init_t3(CXLType3Dev *ct3d);
+void cxl_device_register_init_swcci(CSWMBCCIDev *sw);
+
+/*
+ * CXL r3.1 Section 8.2.8.1: CXL Device Capabilities Array Register
+ * Documented as a 128 bit register, but 64 bit accesses and the second
+ * 64 bits are currently reserved.
+ */
+REG64(CXL_DEV_CAP_ARRAY, 0)
+ FIELD(CXL_DEV_CAP_ARRAY, CAP_ID, 0, 16)
+ FIELD(CXL_DEV_CAP_ARRAY, CAP_VERSION, 16, 8)
+ FIELD(CXL_DEV_CAP_ARRAY, CAP_COUNT, 32, 16)
+
+void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
+ bool available);
+
+/*
+ * Helper macro to initialize capability headers for CXL devices.
+ *
+ * In CXL r3.1 Section 8.2.8.2: CXL Device Capability Header Register, this is
+ * listed as a 128b register, but in CXL r3.1 Section 8.2.8: CXL Device Register
+ * Interface, it says:
+ * > No registers defined in Section 8.2.8 are larger than 64-bits wide so that
+ * > is the maximum access size allowed for these registers. If this rule is not
+ * > followed, the behavior is undefined.
+ *
+ * > To illustrate how the fields fit together, the layouts ... are shown as
+ * > wider than a 64 bit register. Implementations are expected to use any size
+ * > accesses for this information up to 64 bits without lost of functionality
+ *
+ * Here we've chosen to make it 4 dwords.
+ */
+#define CXL_DEVICE_CAPABILITY_HEADER_REGISTER(n, offset) \
+ REG32(CXL_DEV_##n##_CAP_HDR0, offset) \
+ FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_ID, 0, 16) \
+ FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_VERSION, 16, 8) \
+ REG32(CXL_DEV_##n##_CAP_HDR1, offset + 4) \
+ FIELD(CXL_DEV_##n##_CAP_HDR1, CAP_OFFSET, 0, 32) \
+ REG32(CXL_DEV_##n##_CAP_HDR2, offset + 8) \
+ FIELD(CXL_DEV_##n##_CAP_HDR2, CAP_LENGTH, 0, 32)
+
+CXL_DEVICE_CAPABILITY_HEADER_REGISTER(DEVICE_STATUS, CXL_DEVICE_CAP_HDR1_OFFSET)
+CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MAILBOX, CXL_DEVICE_CAP_HDR1_OFFSET + \
+ CXL_DEVICE_CAP_REG_SIZE)
+CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE,
+ CXL_DEVICE_CAP_HDR1_OFFSET +
+ CXL_DEVICE_CAP_REG_SIZE * 2)
+
+void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max);
+void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
+ DeviceState *d, size_t payload_max);
+void cxl_init_cci(CXLCCI *cci, size_t payload_max);
+int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
+ size_t len_in, uint8_t *pl_in,
+ size_t *len_out, uint8_t *pl_out,
+ bool *bg_started);
+void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
+ DeviceState *intf,
+ size_t payload_max);
+
+void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d,
+ DeviceState *intf, size_t payload_max);
+
+#define cxl_device_cap_init(dstate, reg, cap_id, ver) \
+ do { \
+ uint32_t *cap_hdrs = dstate->caps_reg_state32; \
+ int which = R_CXL_DEV_##reg##_CAP_HDR0; \
+ cap_hdrs[which] = \
+ FIELD_DP32(cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, \
+ CAP_ID, cap_id); \
+ cap_hdrs[which] = FIELD_DP32( \
+ cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, CAP_VERSION, ver); \
+ cap_hdrs[which + 1] = \
+ FIELD_DP32(cap_hdrs[which + 1], CXL_DEV_##reg##_CAP_HDR1, \
+ CAP_OFFSET, CXL_##reg##_REGISTERS_OFFSET); \
+ cap_hdrs[which + 2] = \
+ FIELD_DP32(cap_hdrs[which + 2], CXL_DEV_##reg##_CAP_HDR2, \
+ CAP_LENGTH, CXL_##reg##_REGISTERS_LENGTH); \
+ } while (0)
+
+/* CXL r3.2 Section 8.2.8.3.1: Event Status Register */
+#define CXL_DEVICE_STATUS_VERSION 2
+REG64(CXL_DEV_EVENT_STATUS, 0)
+ FIELD(CXL_DEV_EVENT_STATUS, EVENT_STATUS, 0, 32)
+
+#define CXL_DEV_MAILBOX_VERSION 1
+/* CXL r3.1 Section 8.2.8.4.3: Mailbox Capabilities Register */
+REG32(CXL_DEV_MAILBOX_CAP, 0)
+ FIELD(CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, 0, 5)
+ FIELD(CXL_DEV_MAILBOX_CAP, INT_CAP, 5, 1)
+ FIELD(CXL_DEV_MAILBOX_CAP, BG_INT_CAP, 6, 1)
+ FIELD(CXL_DEV_MAILBOX_CAP, MSI_N, 7, 4)
+ FIELD(CXL_DEV_MAILBOX_CAP, MBOX_READY_TIME, 11, 8)
+ FIELD(CXL_DEV_MAILBOX_CAP, TYPE, 19, 4)
+
+/* CXL r3.1 Section 8.2.8.4.4: Mailbox Control Register */
+REG32(CXL_DEV_MAILBOX_CTRL, 4)
+ FIELD(CXL_DEV_MAILBOX_CTRL, DOORBELL, 0, 1)
+ FIELD(CXL_DEV_MAILBOX_CTRL, INT_EN, 1, 1)
+ FIELD(CXL_DEV_MAILBOX_CTRL, BG_INT_EN, 2, 1)
+
+/* CXL r3.1 Section 8.2.8.4.5: Command Register */
+REG64(CXL_DEV_MAILBOX_CMD, 8)
+ FIELD(CXL_DEV_MAILBOX_CMD, COMMAND, 0, 8)
+ FIELD(CXL_DEV_MAILBOX_CMD, COMMAND_SET, 8, 8)
+ FIELD(CXL_DEV_MAILBOX_CMD, LENGTH, 16, 20)
+
+/* CXL r3.1 Section 8.2.8.4.6: Mailbox Status Register */
+REG64(CXL_DEV_MAILBOX_STS, 0x10)
+ FIELD(CXL_DEV_MAILBOX_STS, BG_OP, 0, 1)
+ FIELD(CXL_DEV_MAILBOX_STS, ERRNO, 32, 16)
+ FIELD(CXL_DEV_MAILBOX_STS, VENDOR_ERRNO, 48, 16)
+
+/* CXL r3.1 Section 8.2.8.4.7: Background Command Status Register */
+REG64(CXL_DEV_BG_CMD_STS, 0x18)
+ FIELD(CXL_DEV_BG_CMD_STS, OP, 0, 16)
+ FIELD(CXL_DEV_BG_CMD_STS, PERCENTAGE_COMP, 16, 7)
+ FIELD(CXL_DEV_BG_CMD_STS, RET_CODE, 32, 16)
+ FIELD(CXL_DEV_BG_CMD_STS, VENDOR_RET_CODE, 48, 16)
+
+/* CXL r3.1 Section 8.2.8.4.8: Command Payload Registers */
+REG32(CXL_DEV_CMD_PAYLOAD, 0x20)
+
+/* CXL r3.1 Section 8.2.8.4.1: Memory Device Status Registers */
+#define CXL_MEM_DEV_STATUS_VERSION 1
+REG64(CXL_MEM_DEV_STS, 0)
+ FIELD(CXL_MEM_DEV_STS, FATAL, 0, 1)
+ FIELD(CXL_MEM_DEV_STS, FW_HALT, 1, 1)
+ FIELD(CXL_MEM_DEV_STS, MEDIA_STATUS, 2, 2)
+ FIELD(CXL_MEM_DEV_STS, MBOX_READY, 4, 1)
+ FIELD(CXL_MEM_DEV_STS, RESET_NEEDED, 5, 3)
+
+static inline void __toggle_media(CXLDeviceState *cxl_dstate, int val)
+{
+ uint64_t dev_status_reg;
+
+ dev_status_reg = cxl_dstate->memdev_status;
+ dev_status_reg = FIELD_DP64(dev_status_reg, CXL_MEM_DEV_STS, MEDIA_STATUS,
+ val);
+ cxl_dstate->memdev_status = dev_status_reg;
+}
+#define cxl_dev_disable_media(cxlds) \
+ do { __toggle_media((cxlds), 0x3); } while (0)
+#define cxl_dev_enable_media(cxlds) \
+ do { __toggle_media((cxlds), 0x1); } while (0)
+
+static inline bool sanitize_running(CXLCCI *cci)
+{
+ return !!cci->bg.runtime && cci->bg.opcode == 0x4400;
+}
+
+typedef struct CXLError {
+ QTAILQ_ENTRY(CXLError) node;
+ int type; /* Error code as per FE definition */
+ uint32_t header[CXL_RAS_ERR_HEADER_NUM];
+} CXLError;
+
+typedef QTAILQ_HEAD(, CXLError) CXLErrorList;
+
+typedef struct CXLPoison {
+ uint64_t start, length;
+ uint8_t type;
+#define CXL_POISON_TYPE_EXTERNAL 0x1
+#define CXL_POISON_TYPE_INTERNAL 0x2
+#define CXL_POISON_TYPE_INJECTED 0x3
+ QLIST_ENTRY(CXLPoison) node;
+} CXLPoison;
+
+typedef QLIST_HEAD(, CXLPoison) CXLPoisonList;
+#define CXL_POISON_LIST_LIMIT 256
+
+struct CXLType3Dev {
+ /* Private */
+ PCIDevice parent_obj;
+
+ /* Properties */
+ HostMemoryBackend *hostmem; /* deprecated */
+ HostMemoryBackend *hostvmem;
+ HostMemoryBackend *hostpmem;
+ HostMemoryBackend *lsa;
+ uint64_t sn;
+
+ /* State */
+ AddressSpace hostvmem_as;
+ AddressSpace hostpmem_as;
+ CXLComponentState cxl_cstate;
+ CXLDeviceState cxl_dstate;
+ CXLCCI cci; /* Primary PCI mailbox CCI */
+ /* Always initialized as no way to know if a VDM might show up */
+ CXLCCI vdm_fm_owned_ld_mctp_cci;
+ CXLCCI ld0_cci;
+
+ /* DOE */
+ DOECap doe_cdat;
+
+ /* Error injection */
+ CXLErrorList error_list;
+
+ /* Poison Injection - cache */
+ CXLPoisonList poison_list;
+ unsigned int poison_list_cnt;
+ bool poison_list_overflowed;
+ uint64_t poison_list_overflow_ts;
+};
+
+#define TYPE_CXL_TYPE3 "cxl-type3"
+OBJECT_DECLARE_TYPE(CXLType3Dev, CXLType3Class, CXL_TYPE3)
+
+struct CXLType3Class {
+ /* Private */
+ PCIDeviceClass parent_class;
+
+ /* public */
+ uint64_t (*get_lsa_size)(CXLType3Dev *ct3d);
+
+ uint64_t (*get_lsa)(CXLType3Dev *ct3d, void *buf, uint64_t size,
+ uint64_t offset);
+ void (*set_lsa)(CXLType3Dev *ct3d, const void *buf, uint64_t size,
+ uint64_t offset);
+ bool (*set_cacheline)(CXLType3Dev *ct3d, uint64_t dpa_offset,
+ uint8_t *data);
+};
+
+struct CSWMBCCIDev {
+ PCIDevice parent_obj;
+ PCIDevice *target;
+ CXLComponentState cxl_cstate;
+ CXLDeviceState cxl_dstate;
+ CXLCCI *cci;
+};
+
+#define TYPE_CXL_SWITCH_MAILBOX_CCI "cxl-switch-mailbox-cci"
+OBJECT_DECLARE_TYPE(CSWMBCCIDev, CSWMBCCIClass, CXL_SWITCH_MAILBOX_CCI)
+
+MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs);
+MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
+ unsigned size, MemTxAttrs attrs);
+
+uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds);
+
+void cxl_event_init(CXLDeviceState *cxlds, int start_msg_num);
+bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type,
+ CXLEventRecordRaw *event);
+CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl,
+ uint8_t log_type, int max_recs,
+ size_t *len);
+CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds,
+ CXLClearEventPayload *pl);
+
+void cxl_event_irq_assert(CXLType3Dev *ct3d);
+
+void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d);
+
+#endif
diff --git a/include/hw/cxl/cxl_events.h b/include/hw/cxl/cxl_events.h
new file mode 100644
index 0000000000..5170b8dbf8
--- /dev/null
+++ b/include/hw/cxl/cxl_events.h
@@ -0,0 +1,169 @@
+/*
+ * QEMU CXL Events
+ *
+ * Copyright (c) 2022 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_EVENTS_H
+#define CXL_EVENTS_H
+
+#include "qemu/uuid.h"
+
+/*
+ * CXL r3.1 section 8.2.9.2.2: Get Event Records (Opcode 0100h); Table 8-52
+ *
+ * Define these as the bit position for the event status register for ease of
+ * setting the status.
+ */
+typedef enum CXLEventLogType {
+ CXL_EVENT_TYPE_INFO = 0,
+ CXL_EVENT_TYPE_WARN = 1,
+ CXL_EVENT_TYPE_FAIL = 2,
+ CXL_EVENT_TYPE_FATAL = 3,
+ CXL_EVENT_TYPE_DYNAMIC_CAP = 4,
+ CXL_EVENT_TYPE_MAX
+} CXLEventLogType;
+
+/*
+ * Common Event Record Format
+ * CXL r3.1 section 8.2.9.2.1: Event Records; Table 8-43
+ */
+#define CXL_EVENT_REC_HDR_RES_LEN 0xf
+typedef struct CXLEventRecordHdr {
+ QemuUUID id;
+ uint8_t length;
+ uint8_t flags[3];
+ uint16_t handle;
+ uint16_t related_handle;
+ uint64_t timestamp;
+ uint8_t maint_op_class;
+ uint8_t reserved[CXL_EVENT_REC_HDR_RES_LEN];
+} QEMU_PACKED CXLEventRecordHdr;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+typedef struct CXLEventRecordRaw {
+ CXLEventRecordHdr hdr;
+ uint8_t data[CXL_EVENT_RECORD_DATA_LENGTH];
+} QEMU_PACKED CXLEventRecordRaw;
+#define CXL_EVENT_RECORD_SIZE (sizeof(CXLEventRecordRaw))
+
+/*
+ * Get Event Records output payload
+ * CXL r3.1 section 8.2.9.2.2; Table 8-53
+ */
+#define CXL_GET_EVENT_FLAG_OVERFLOW BIT(0)
+#define CXL_GET_EVENT_FLAG_MORE_RECORDS BIT(1)
+typedef struct CXLGetEventPayload {
+ uint8_t flags;
+ uint8_t reserved1;
+ uint16_t overflow_err_count;
+ uint64_t first_overflow_timestamp;
+ uint64_t last_overflow_timestamp;
+ uint16_t record_count;
+ uint8_t reserved2[0xa];
+ CXLEventRecordRaw records[];
+} QEMU_PACKED CXLGetEventPayload;
+#define CXL_EVENT_PAYLOAD_HDR_SIZE (sizeof(CXLGetEventPayload))
+
+/*
+ * Clear Event Records input payload
+ * CXL r3.1 section 8.2.9.2.3; Table 8-54
+ */
+typedef struct CXLClearEventPayload {
+ uint8_t event_log; /* CXLEventLogType */
+ uint8_t clear_flags;
+ uint8_t nr_recs;
+ uint8_t reserved[3];
+ uint16_t handle[];
+} CXLClearEventPayload;
+
+/*
+ * Event Interrupt Policy
+ *
+ * CXL r3.1 section 8.2.9.2.4; Table 8-55
+ */
+typedef enum CXLEventIntMode {
+ CXL_INT_NONE = 0x00,
+ CXL_INT_MSI_MSIX = 0x01,
+ CXL_INT_FW = 0x02,
+ CXL_INT_RES = 0x03,
+} CXLEventIntMode;
+#define CXL_EVENT_INT_MODE_MASK 0x3
+#define CXL_EVENT_INT_SETTING(vector) \
+ ((((uint8_t)vector & 0xf) << 4) | CXL_INT_MSI_MSIX)
+typedef struct CXLEventInterruptPolicy {
+ uint8_t info_settings;
+ uint8_t warn_settings;
+ uint8_t failure_settings;
+ uint8_t fatal_settings;
+ uint8_t dyn_cap_settings;
+} QEMU_PACKED CXLEventInterruptPolicy;
+/* DCD is optional but other fields are not */
+#define CXL_EVENT_INT_SETTING_MIN_LEN 4
+
+/*
+ * General Media Event Record
+ * CXL r3.1 Section 8.2.9.2.1.1; Table 8-45
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+#define CXL_EVENT_GEN_MED_RES_SIZE 0x2e
+typedef struct CXLEventGenMedia {
+ CXLEventRecordHdr hdr;
+ uint64_t phys_addr;
+ uint8_t descriptor;
+ uint8_t type;
+ uint8_t transaction_type;
+ uint16_t validity_flags;
+ uint8_t channel;
+ uint8_t rank;
+ uint8_t device[3];
+ uint8_t component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ uint8_t reserved[CXL_EVENT_GEN_MED_RES_SIZE];
+} QEMU_PACKED CXLEventGenMedia;
+
+/*
+ * DRAM Event Record
+ * CXL r3.1 Section 8.2.9.2.1.2: Table 8-46
+ * All fields little endian.
+ */
+typedef struct CXLEventDram {
+ CXLEventRecordHdr hdr;
+ uint64_t phys_addr;
+ uint8_t descriptor;
+ uint8_t type;
+ uint8_t transaction_type;
+ uint16_t validity_flags;
+ uint8_t channel;
+ uint8_t rank;
+ uint8_t nibble_mask[3];
+ uint8_t bank_group;
+ uint8_t bank;
+ uint8_t row[3];
+ uint16_t column;
+ uint64_t correction_mask[4];
+ uint8_t reserved[0x17];
+} QEMU_PACKED CXLEventDram;
+
+/*
+ * Memory Module Event Record
+ * CXL r3.1 Section 8.2.9.2.1.3: Table 8-47
+ * All fields little endian.
+ */
+typedef struct CXLEventMemoryModule {
+ CXLEventRecordHdr hdr;
+ uint8_t type;
+ uint8_t health_status;
+ uint8_t media_status;
+ uint8_t additional_status;
+ uint8_t life_used;
+ int16_t temperature;
+ uint32_t dirty_shutdown_count;
+ uint32_t corrected_volatile_error_count;
+ uint32_t corrected_persistent_error_count;
+ uint8_t reserved[0x3d];
+} QEMU_PACKED CXLEventMemoryModule;
+
+#endif /* CXL_EVENTS_H */
diff --git a/include/hw/cxl/cxl_host.h b/include/hw/cxl/cxl_host.h
new file mode 100644
index 0000000000..c9bc9c7c50
--- /dev/null
+++ b/include/hw/cxl/cxl_host.h
@@ -0,0 +1,22 @@
+/*
+ * QEMU CXL Host Setup
+ *
+ * Copyright (c) 2022 Huawei
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "hw/cxl/cxl.h"
+#include "hw/boards.h"
+
+#ifndef CXL_HOST_H
+#define CXL_HOST_H
+
+void cxl_machine_init(Object *obj, CXLState *state);
+void cxl_fmws_link_targets(CXLState *stat, Error **errp);
+void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp);
+
+extern const MemoryRegionOps cfmws_ops;
+
+#endif
diff --git a/include/hw/cxl/cxl_pci.h b/include/hw/cxl/cxl_pci.h
new file mode 100644
index 0000000000..d0855ed78b
--- /dev/null
+++ b/include/hw/cxl/cxl_pci.h
@@ -0,0 +1,194 @@
+/*
+ * QEMU CXL PCI interfaces
+ *
+ * Copyright (c) 2020 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_PCI_H
+#define CXL_PCI_H
+
+
+#define CXL_VENDOR_ID 0x1e98
+
+#define PCIE_DVSEC_HEADER1_OFFSET 0x4 /* Offset from start of extend cap */
+#define PCIE_DVSEC_ID_OFFSET 0x8
+
+#define PCIE_CXL_DEVICE_DVSEC_LENGTH 0x3C
+#define PCIE_CXL31_DEVICE_DVSEC_REVID 3
+
+#define EXTENSIONS_PORT_DVSEC_LENGTH 0x28
+#define EXTENSIONS_PORT_DVSEC_REVID 0
+
+#define GPF_PORT_DVSEC_LENGTH 0x10
+#define GPF_PORT_DVSEC_REVID 0
+
+#define GPF_DEVICE_DVSEC_LENGTH 0x10
+#define GPF_DEVICE_DVSEC_REVID 0
+
+#define PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH 0x20
+#define PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID 2
+
+#define REG_LOC_DVSEC_LENGTH 0x24
+#define REG_LOC_DVSEC_REVID 0
+
+enum {
+ PCIE_CXL_DEVICE_DVSEC = 0,
+ NON_CXL_FUNCTION_MAP_DVSEC = 2,
+ EXTENSIONS_PORT_DVSEC = 3,
+ GPF_PORT_DVSEC = 4,
+ GPF_DEVICE_DVSEC = 5,
+ PCIE_FLEXBUS_PORT_DVSEC = 7,
+ REG_LOC_DVSEC = 8,
+ MLD_DVSEC = 9,
+ CXL20_MAX_DVSEC
+};
+
+typedef struct DVSECHeader {
+ uint32_t cap_hdr;
+ uint32_t dv_hdr1;
+ uint16_t dv_hdr2;
+} QEMU_PACKED DVSECHeader;
+QEMU_BUILD_BUG_ON(sizeof(DVSECHeader) != 10);
+
+/*
+ * CXL r3.1 Table 8-2: CXL DVSEC ID Assignment
+ * Devices must implement certain DVSEC IDs, and can [optionally]
+ * implement others.
+ * (x) - IDs in Table 8-2.
+ *
+ * CXL RCD (D1): 0, [2], [5], 7, [8], A - Not emulated yet
+ * CXL RCD USP (UP1): 7, [8] - Not emulated yet
+ * CXL RCH DSP (DP1): 7, [8]
+ * CXL SLD (D2): 0, [2], 5, 7, 8, [A]
+ * CXL LD (LD): 0, [2], 5, 7, 8
+ * CXL RP (R): 3, 4, 7, 8
+ * CXL Switch USP (USP): [2], 7, 8
+ * CXL Switch DSP (DSP): 3, 4, 7, 8
+ * FM-Owned LD (FMLD): 0, [2], 7, 8, 9
+ */
+
+/*
+ * CXL r3.1 Section 8.1.3: PCIe DVSEC for Devices
+ * DVSEC ID: 0, Revision: 3
+ */
+typedef struct CXLDVSECDevice {
+ DVSECHeader hdr;
+ uint16_t cap;
+ uint16_t ctrl;
+ uint16_t status;
+ uint16_t ctrl2;
+ uint16_t status2;
+ uint16_t lock;
+ uint16_t cap2;
+ uint32_t range1_size_hi;
+ uint32_t range1_size_lo;
+ uint32_t range1_base_hi;
+ uint32_t range1_base_lo;
+ uint32_t range2_size_hi;
+ uint32_t range2_size_lo;
+ uint32_t range2_base_hi;
+ uint32_t range2_base_lo;
+ uint16_t cap3;
+ uint16_t resv;
+} QEMU_PACKED CXLDVSECDevice;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDevice) != PCIE_CXL_DEVICE_DVSEC_LENGTH);
+
+/*
+ * CXL r3.1 Section 8.1.5: CXL Extensions DVSEC for Ports
+ * DVSEC ID: 3, Revision: 0
+ */
+typedef struct CXLDVSECPortExt {
+ DVSECHeader hdr;
+ uint16_t status;
+ uint16_t control;
+ uint8_t alt_bus_base;
+ uint8_t alt_bus_limit;
+ uint16_t alt_memory_base;
+ uint16_t alt_memory_limit;
+ uint16_t alt_prefetch_base;
+ uint16_t alt_prefetch_limit;
+ uint32_t alt_prefetch_base_high;
+ uint32_t alt_prefetch_limit_high;
+ uint32_t rcrb_base;
+ uint32_t rcrb_base_high;
+} CXLDVSECPortExt;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortExt) != 0x28);
+
+#define PORT_CONTROL_OFFSET 0xc
+#define PORT_CONTROL_UNMASK_SBR 1
+#define PORT_CONTROL_ALT_MEMID_EN 4
+
+/*
+ * CXL r3.1 Section 8.1.6: GPF DVSEC for CXL Port
+ * DVSEC ID: 4, Revision: 0
+ */
+typedef struct CXLDVSECPortGPF {
+ DVSECHeader hdr;
+ uint16_t rsvd;
+ uint16_t phase1_ctrl;
+ uint16_t phase2_ctrl;
+} CXLDVSECPortGPF;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortGPF) != 0x10);
+
+/*
+ * CXL r3.1 Section 8.1.7: GPF DVSEC for CXL Device
+ * DVSEC ID: 5, Revision 0
+ */
+typedef struct CXLDVSECDeviceGPF {
+ DVSECHeader hdr;
+ uint16_t phase2_duration;
+ uint32_t phase2_power;
+} CXLDVSECDeviceGPF;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDeviceGPF) != 0x10);
+
+/*
+ * CXL r3.1 Section 8.1.8: PCIe DVSEC for Flex Bus Port
+ * CXL r3.1 Section 8.2.1.3: Flex Bus Port DVSEC
+ * DVSEC ID: 7, Revision 2
+ */
+typedef struct CXLDVSECPortFlexBus {
+ DVSECHeader hdr;
+ uint16_t cap;
+ uint16_t ctrl;
+ uint16_t status;
+ uint32_t rcvd_mod_ts_data_phase1;
+ uint32_t cap2;
+ uint32_t ctrl2;
+ uint32_t status2;
+} CXLDVSECPortFlexBus;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortFlexBus) != 0x20);
+
+/*
+ * CXL r3.1 Section 8.1.9: Register Locator DVSEC
+ * DVSEC ID: 8, Revision 0
+ */
+typedef struct CXLDVSECRegisterLocator {
+ DVSECHeader hdr;
+ uint16_t rsvd;
+ uint32_t reg0_base_lo;
+ uint32_t reg0_base_hi;
+ uint32_t reg1_base_lo;
+ uint32_t reg1_base_hi;
+ uint32_t reg2_base_lo;
+ uint32_t reg2_base_hi;
+} CXLDVSECRegisterLocator;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECRegisterLocator) != 0x24);
+
+/* BAR Equivalence Indicator */
+#define BEI_BAR_10H 0
+#define BEI_BAR_14H 1
+#define BEI_BAR_18H 2
+#define BEI_BAR_1cH 3
+#define BEI_BAR_20H 4
+#define BEI_BAR_24H 5
+
+/* Register Block Identifier */
+#define RBI_EMPTY 0
+#define RBI_COMPONENT_REG (1 << 8)
+#define RBI_BAR_VIRT_ACL (2 << 8)
+#define RBI_CXL_DEVICE_REG (3 << 8)
+
+#endif
diff --git a/include/hw/devices.h b/include/hw/devices.h
deleted file mode 100644
index 0e27feb0c2..0000000000
--- a/include/hw/devices.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef QEMU_DEVICES_H
-#define QEMU_DEVICES_H
-
-/* Devices that have nowhere better to go. */
-
-#include "hw/hw.h"
-
-/* smc91c111.c */
-void smc91c111_init(NICInfo *, uint32_t, qemu_irq);
-
-/* lan9118.c */
-void lan9118_init(NICInfo *, uint32_t, qemu_irq);
-
-/* tsc210x.c */
-uWireSlave *tsc2102_init(qemu_irq pint);
-uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav);
-I2SCodec *tsc210x_codec(uWireSlave *chip);
-uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len);
-void tsc210x_set_transform(uWireSlave *chip,
- MouseTransformInfo *info);
-void tsc210x_key_event(uWireSlave *chip, int key, int down);
-
-/* tsc2005.c */
-void *tsc2005_init(qemu_irq pintdav);
-uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len);
-void tsc2005_set_transform(void *opaque, MouseTransformInfo *info);
-
-/* stellaris_input.c */
-void stellaris_gamepad_init(int n, qemu_irq *irq, const int *keycode);
-
-/* blizzard.c */
-void *s1d13745_init(qemu_irq gpio_int);
-void s1d13745_write(void *opaque, int dc, uint16_t value);
-void s1d13745_write_block(void *opaque, int dc,
- void *buf, size_t len, int pitch);
-uint16_t s1d13745_read(void *opaque, int dc);
-
-/* cbus.c */
-typedef struct {
- qemu_irq clk;
- qemu_irq dat;
- qemu_irq sel;
-} CBus;
-CBus *cbus_init(qemu_irq dat_out);
-void cbus_attach(CBus *bus, void *slave_opaque);
-
-void *retu_init(qemu_irq irq, int vilma);
-void *tahvo_init(qemu_irq irq, int betty);
-
-void retu_key_event(void *retu, int state);
-
-/* tc6393xb.c */
-typedef struct TC6393xbState TC6393xbState;
-#define TC6393XB_RAM 0x110000 /* amount of ram for Video and USB */
-TC6393xbState *tc6393xb_init(struct MemoryRegion *sysmem,
- uint32_t base, qemu_irq irq);
-void tc6393xb_gpio_out_set(TC6393xbState *s, int line,
- qemu_irq handler);
-qemu_irq *tc6393xb_gpio_in_get(TC6393xbState *s);
-qemu_irq tc6393xb_l3v_get(TC6393xbState *s);
-
-#endif
diff --git a/include/hw/display/bcm2835_fb.h b/include/hw/display/bcm2835_fb.h
index 228988ba05..49541bf08f 100644
--- a/include/hw/display/bcm2835_fb.h
+++ b/include/hw/display/bcm2835_fb.h
@@ -5,7 +5,8 @@
* Rasperry Pi 2 emulation and refactoring Copyright (c) 2015, Microsoft
* Written by Andrew Baumann
*
- * This code is licensed under the GNU GPLv2 and later.
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2835_FB_H
@@ -13,9 +14,12 @@
#include "hw/sysbus.h"
#include "ui/console.h"
+#include "qom/object.h"
+
+#define UPPER_RAM_BASE 0x40000000
#define TYPE_BCM2835_FB "bcm2835-fb"
-#define BCM2835_FB(obj) OBJECT_CHECK(BCM2835FBState, (obj), TYPE_BCM2835_FB)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835FBState, BCM2835_FB)
/*
* Configuration information about the fb which the guest can program
@@ -31,7 +35,7 @@ typedef struct {
uint32_t alpha;
} BCM2835FBConfig;
-typedef struct {
+struct BCM2835FBState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -48,7 +52,7 @@ typedef struct {
BCM2835FBConfig config;
BCM2835FBConfig initial_config;
-} BCM2835FBState;
+};
void bcm2835_fb_reconfigure(BCM2835FBState *s, BCM2835FBConfig *newconfig);
diff --git a/include/hw/display/blizzard.h b/include/hw/display/blizzard.h
new file mode 100644
index 0000000000..5b33018835
--- /dev/null
+++ b/include/hw/display/blizzard.h
@@ -0,0 +1,21 @@
+/*
+ * Epson S1D13744/S1D13745 (Blizzard/Hailstorm/Tornado) LCD/TV controller.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_DISPLAY_BLIZZARD_H
+#define HW_DISPLAY_BLIZZARD_H
+
+
+void *s1d13745_init(qemu_irq gpio_int);
+void s1d13745_write(void *opaque, int dc, uint16_t value);
+void s1d13745_write_block(void *opaque, int dc,
+ void *buf, size_t len, int pitch);
+uint16_t s1d13745_read(void *opaque, int dc);
+
+#endif
diff --git a/include/hw/display/dpcd.h b/include/hw/display/dpcd.h
index 6880ee36a3..a4e37abf6f 100644
--- a/include/hw/display/dpcd.h
+++ b/include/hw/display/dpcd.h
@@ -24,11 +24,11 @@
#ifndef DPCD_H
#define DPCD_H
+#include "qom/object.h"
-typedef struct DPCDState DPCDState;
#define TYPE_DPCD "dpcd"
-#define DPCD(obj) OBJECT_CHECK(DPCDState, (obj), TYPE_DPCD)
+OBJECT_DECLARE_SIMPLE_TYPE(DPCDState, DPCD)
/* DCPD Revision. */
#define DPCD_REVISION 0x00
diff --git a/include/hw/display/edid.h b/include/hw/display/edid.h
index bacf170889..520f8ec202 100644
--- a/include/hw/display/edid.h
+++ b/include/hw/display/edid.h
@@ -1,17 +1,17 @@
#ifndef EDID_H
#define EDID_H
-#include "hw/hw.h"
-
typedef struct qemu_edid_info {
const char *vendor; /* http://www.uefi.org/pnp_id_list */
const char *name;
const char *serial;
- uint32_t dpi;
+ uint16_t width_mm;
+ uint16_t height_mm;
uint32_t prefx;
uint32_t prefy;
uint32_t maxx;
uint32_t maxy;
+ uint32_t refresh_rate;
} qemu_edid_info;
void qemu_edid_generate(uint8_t *edid, size_t size,
@@ -20,8 +20,13 @@ size_t qemu_edid_size(uint8_t *edid);
void qemu_edid_region_io(MemoryRegion *region, Object *owner,
uint8_t *edid, size_t size);
-#define DEFINE_EDID_PROPERTIES(_state, _edid_info) \
- DEFINE_PROP_UINT32("xres", _state, _edid_info.prefx, 0), \
- DEFINE_PROP_UINT32("yres", _state, _edid_info.prefy, 0)
+uint32_t qemu_edid_dpi_to_mm(uint32_t dpi, uint32_t res);
+
+#define DEFINE_EDID_PROPERTIES(_state, _edid_info) \
+ DEFINE_PROP_UINT32("xres", _state, _edid_info.prefx, 0), \
+ DEFINE_PROP_UINT32("yres", _state, _edid_info.prefy, 0), \
+ DEFINE_PROP_UINT32("xmax", _state, _edid_info.maxx, 0), \
+ DEFINE_PROP_UINT32("ymax", _state, _edid_info.maxy, 0), \
+ DEFINE_PROP_UINT32("refresh_rate", _state, _edid_info.refresh_rate, 0)
#endif /* EDID_H */
diff --git a/include/hw/i2c/i2c-ddc.h b/include/hw/display/i2c-ddc.h
index c29443c5af..94b5880587 100644
--- a/include/hw/i2c/i2c-ddc.h
+++ b/include/hw/display/i2c-ddc.h
@@ -20,6 +20,8 @@
#define I2C_DDC_H
#include "hw/display/edid.h"
+#include "hw/i2c/i2c.h"
+#include "qom/object.h"
/* A simple I2C slave which just returns the contents of its EDID blob. */
struct I2CDDCState {
@@ -32,9 +34,8 @@ struct I2CDDCState {
uint8_t edid_blob[128];
};
-typedef struct I2CDDCState I2CDDCState;
#define TYPE_I2CDDC "i2c-ddc"
-#define I2CDDC(obj) OBJECT_CHECK(I2CDDCState, (obj), TYPE_I2CDDC)
+OBJECT_DECLARE_SIMPLE_TYPE(I2CDDCState, I2CDDC)
#endif /* I2C_DDC_H */
diff --git a/include/hw/display/macfb.h b/include/hw/display/macfb.h
new file mode 100644
index 0000000000..27cebefc9e
--- /dev/null
+++ b/include/hw/display/macfb.h
@@ -0,0 +1,101 @@
+/*
+ * QEMU Motorola 680x0 Macintosh Video Card Emulation
+ * Copyright (c) 2012-2018 Laurent Vivier
+ *
+ * some parts from QEMU G364 framebuffer Emulator.
+ * Copyright (c) 2007-2011 Herve Poussineau
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MACFB_H
+#define MACFB_H
+
+#include "exec/memory.h"
+#include "hw/irq.h"
+#include "hw/nubus/nubus.h"
+#include "hw/sysbus.h"
+#include "ui/console.h"
+#include "qemu/timer.h"
+
+typedef enum {
+ MACFB_DISPLAY_APPLE_21_COLOR = 0,
+ MACFB_DISPLAY_APPLE_PORTRAIT = 1,
+ MACFB_DISPLAY_APPLE_12_RGB = 2,
+ MACFB_DISPLAY_APPLE_2PAGE_MONO = 3,
+ MACFB_DISPLAY_NTSC_UNDERSCAN = 4,
+ MACFB_DISPLAY_NTSC_OVERSCAN = 5,
+ MACFB_DISPLAY_APPLE_12_MONO = 6,
+ MACFB_DISPLAY_APPLE_13_RGB = 7,
+ MACFB_DISPLAY_16_COLOR = 8,
+ MACFB_DISPLAY_PAL1_UNDERSCAN = 9,
+ MACFB_DISPLAY_PAL1_OVERSCAN = 10,
+ MACFB_DISPLAY_PAL2_UNDERSCAN = 11,
+ MACFB_DISPLAY_PAL2_OVERSCAN = 12,
+ MACFB_DISPLAY_VGA = 13,
+ MACFB_DISPLAY_SVGA = 14,
+} MacfbDisplayType;
+
+typedef struct MacFbMode {
+ uint8_t type;
+ uint8_t depth;
+ uint32_t mode_ctrl1;
+ uint32_t mode_ctrl2;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t offset;
+} MacFbMode;
+
+#define MACFB_CTRL_TOPADDR 0x200
+#define MACFB_NUM_REGS (MACFB_CTRL_TOPADDR / sizeof(uint32_t))
+
+typedef struct MacfbState {
+ MemoryRegion mem_vram;
+ MemoryRegion mem_ctrl;
+ QemuConsole *con;
+
+ uint8_t *vram;
+ uint32_t vram_bit_mask;
+ uint32_t palette_current;
+ uint8_t color_palette[256 * 3];
+ uint32_t width, height; /* in pixels */
+ uint8_t depth;
+ uint8_t type;
+
+ uint32_t regs[MACFB_NUM_REGS];
+ MacFbMode *mode;
+
+ QEMUTimer *vbl_timer;
+ qemu_irq irq;
+} MacfbState;
+
+#define TYPE_MACFB "sysbus-macfb"
+OBJECT_DECLARE_SIMPLE_TYPE(MacfbSysBusState, MACFB)
+
+struct MacfbSysBusState {
+ SysBusDevice busdev;
+
+ MacfbState macfb;
+};
+
+#define TYPE_NUBUS_MACFB "nubus-macfb"
+OBJECT_DECLARE_TYPE(MacfbNubusState, MacfbNubusDeviceClass, NUBUS_MACFB)
+
+struct MacfbNubusDeviceClass {
+ DeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+};
+
+
+struct MacfbNubusState {
+ NubusDevice busdev;
+
+ MacfbState macfb;
+};
+
+#endif
diff --git a/include/hw/display/ramfb.h b/include/hw/display/ramfb.h
index b33a2c467b..a7e0019144 100644
--- a/include/hw/display/ramfb.h
+++ b/include/hw/display/ramfb.h
@@ -1,11 +1,15 @@
#ifndef RAMFB_H
#define RAMFB_H
+#include "migration/vmstate.h"
+
/* ramfb.c */
typedef struct RAMFBState RAMFBState;
void ramfb_display_update(QemuConsole *con, RAMFBState *s);
RAMFBState *ramfb_setup(Error **errp);
+extern const VMStateDescription ramfb_vmstate;
+
/* ramfb-standalone.c */
#define TYPE_RAMFB_DEVICE "ramfb"
diff --git a/include/hw/display/tc6393xb.h b/include/hw/display/tc6393xb.h
new file mode 100644
index 0000000000..f9263bf98a
--- /dev/null
+++ b/include/hw/display/tc6393xb.h
@@ -0,0 +1,21 @@
+/*
+ * Toshiba TC6393XB I/O Controller.
+ * Found in Sharp Zaurus SL-6000 (tosa) or some
+ * Toshiba e-Series PDAs.
+ *
+ * Copyright (c) 2007 Hervé Poussineau
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_DISPLAY_TC6393XB_H
+#define HW_DISPLAY_TC6393XB_H
+
+typedef struct TC6393xbState TC6393xbState;
+
+TC6393xbState *tc6393xb_init(struct MemoryRegion *sysmem,
+ uint32_t base, qemu_irq irq);
+qemu_irq tc6393xb_l3v_get(TC6393xbState *s);
+
+#endif
diff --git a/include/hw/display/vga.h b/include/hw/display/vga.h
index 0401a3a292..a79aa2909b 100644
--- a/include/hw/display/vga.h
+++ b/include/hw/display/vga.h
@@ -9,7 +9,11 @@
#ifndef QEMU_HW_DISPLAY_VGA_H
#define QEMU_HW_DISPLAY_VGA_H
-#include "exec/memory.h"
+/*
+ * modules can reference this symbol to avoid being loaded
+ * into system emulators without vga support
+ */
+extern bool have_vga;
enum vga_retrace_method {
VGA_RETRACE_DUMB,
@@ -18,8 +22,6 @@ enum vga_retrace_method {
extern enum vga_retrace_method vga_retrace_method;
-int isa_vga_mm_init(hwaddr vram_base,
- hwaddr ctrl_base, int it_shift,
- MemoryRegion *address_space);
+#define TYPE_VGA_MMIO "vga-mmio"
#endif
diff --git a/include/hw/display/xlnx_dp.h b/include/hw/display/xlnx_dp.h
index 26b759cd44..e86a87f235 100644
--- a/include/hw/display/xlnx_dp.h
+++ b/include/hw/display/xlnx_dp.h
@@ -19,37 +19,43 @@
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
- *
*/
+#ifndef XLNX_DP_H
+#define XLNX_DP_H
+
#include "hw/sysbus.h"
#include "ui/console.h"
#include "hw/misc/auxbus.h"
#include "hw/i2c/i2c.h"
#include "hw/display/dpcd.h"
-#include "hw/i2c/i2c-ddc.h"
+#include "hw/display/i2c-ddc.h"
#include "qemu/fifo8.h"
#include "qemu/units.h"
#include "hw/dma/xlnx_dpdma.h"
#include "audio/audio.h"
-
-#ifndef XLNX_DP_H
-#define XLNX_DP_H
+#include "qom/object.h"
+#include "hw/ptimer.h"
#define AUD_CHBUF_MAX_DEPTH (32 * KiB)
#define MAX_QEMU_BUFFER_SIZE (4 * KiB)
-#define DP_CORE_REG_ARRAY_SIZE (0x3AF >> 2)
+#define DP_CORE_REG_OFFSET (0x0000)
+#define DP_CORE_REG_ARRAY_SIZE (0x3B0 >> 2)
+#define DP_AVBUF_REG_OFFSET (0xB000)
#define DP_AVBUF_REG_ARRAY_SIZE (0x238 >> 2)
-#define DP_VBLEND_REG_ARRAY_SIZE (0x1DF >> 2)
+#define DP_VBLEND_REG_OFFSET (0xA000)
+#define DP_VBLEND_REG_ARRAY_SIZE (0x1E0 >> 2)
+#define DP_AUDIO_REG_OFFSET (0xC000)
#define DP_AUDIO_REG_ARRAY_SIZE (0x50 >> 2)
+#define DP_CONTAINER_SIZE (0xC050)
struct PixmanPlane {
pixman_format_code_t format;
DisplaySurface *surface;
};
-typedef struct XlnxDPState {
+struct XlnxDPState {
/*< private >*/
SysBusDevice parent_obj;
@@ -102,9 +108,11 @@ typedef struct XlnxDPState {
*/
DPCDState *dpcd;
I2CDDCState *edid;
-} XlnxDPState;
+
+ ptimer_state *vblank;
+};
#define TYPE_XLNX_DP "xlnx.v-dp"
-#define XLNX_DP(obj) OBJECT_CHECK(XlnxDPState, (obj), TYPE_XLNX_DP)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxDPState, XLNX_DP)
-#endif /* !XLNX_DP_H */
+#endif
diff --git a/include/hw/dma/bcm2835_dma.h b/include/hw/dma/bcm2835_dma.h
index 60138f4d31..1d26b1d8d0 100644
--- a/include/hw/dma/bcm2835_dma.h
+++ b/include/hw/dma/bcm2835_dma.h
@@ -1,13 +1,15 @@
/*
* Raspberry Pi emulation (c) 2012 Gregory Estrade
- * This code is licensed under the GNU GPLv2 and later.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2835_DMA_H
#define BCM2835_DMA_H
-#include "qemu-common.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
typedef struct {
uint32_t cs;
@@ -24,12 +26,11 @@ typedef struct {
} BCM2835DMAChan;
#define TYPE_BCM2835_DMA "bcm2835-dma"
-#define BCM2835_DMA(obj) \
- OBJECT_CHECK(BCM2835DMAState, (obj), TYPE_BCM2835_DMA)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835DMAState, BCM2835_DMA)
#define BCM2835_DMA_NCHANS 16
-typedef struct {
+struct BCM2835DMAState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -41,6 +42,6 @@ typedef struct {
BCM2835DMAChan chan[BCM2835_DMA_NCHANS];
uint32_t int_status;
uint32_t enable;
-} BCM2835DMAState;
+};
#endif
diff --git a/include/hw/dma/i8257.h b/include/hw/dma/i8257.h
index 2cab50bb6c..4342e4a91e 100644
--- a/include/hw/dma/i8257.h
+++ b/include/hw/dma/i8257.h
@@ -1,11 +1,12 @@
#ifndef HW_I8257_H
#define HW_I8257_H
-#include "hw/hw.h"
#include "hw/isa/isa.h"
#include "exec/ioport.h"
+#include "qom/object.h"
#define TYPE_I8257 "i8257"
+OBJECT_DECLARE_SIMPLE_TYPE(I8257State, I8257)
typedef struct I8257Regs {
int now[2];
@@ -19,7 +20,7 @@ typedef struct I8257Regs {
void *opaque;
} I8257Regs;
-typedef struct I8257State {
+struct I8257State {
/* <private> */
ISADevice parent_obj;
@@ -42,8 +43,8 @@ typedef struct I8257State {
int running;
PortioList portio_page;
PortioList portio_pageh;
-} I8257State;
+};
-void i8257_dma_init(ISABus *bus, bool high_page_enable);
+void i8257_dma_init(Object *parent, ISABus *bus, bool high_page_enable);
#endif
diff --git a/include/hw/dma/pl080.h b/include/hw/dma/pl080.h
index 9d4b3df143..3c9659e438 100644
--- a/include/hw/dma/pl080.h
+++ b/include/hw/dma/pl080.h
@@ -10,11 +10,12 @@
* (at your option) any later version.
*/
-/* This is a model of the Arm PrimeCell PL080/PL081 DMA controller:
+/*
+ * This is a model of the Arm PrimeCell PL080/PL081 DMA controller:
* The PL080 TRM is:
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0196g/DDI0196.pdf
+ * https://developer.arm.com/documentation/ddi0196/latest
* and the PL081 TRM is:
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0218e/DDI0218.pdf
+ * https://developer.arm.com/documentation/ddi0218/latest
*
* QEMU interface:
* + sysbus IRQ 0: DMACINTR combined interrupt line
@@ -29,6 +30,7 @@
#define HW_DMA_PL080_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define PL080_MAX_CHANNELS 8
@@ -42,9 +44,9 @@ typedef struct {
#define TYPE_PL080 "pl080"
#define TYPE_PL081 "pl081"
-#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080)
+OBJECT_DECLARE_SIMPLE_TYPE(PL080State, PL080)
-typedef struct PL080State {
+struct PL080State {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -66,6 +68,6 @@ typedef struct PL080State {
MemoryRegion *downstream;
AddressSpace downstream_as;
-} PL080State;
+};
#endif
diff --git a/include/hw/dma/sifive_pdma.h b/include/hw/dma/sifive_pdma.h
new file mode 100644
index 0000000000..8c6cfa7f32
--- /dev/null
+++ b/include/hw/dma/sifive_pdma.h
@@ -0,0 +1,59 @@
+/*
+ * SiFive Platform DMA emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef SIFIVE_PDMA_H
+#define SIFIVE_PDMA_H
+
+#include "hw/sysbus.h"
+
+struct sifive_pdma_chan {
+ uint32_t control;
+ uint32_t next_config;
+ uint64_t next_bytes;
+ uint64_t next_dst;
+ uint64_t next_src;
+ uint32_t exec_config;
+ uint64_t exec_bytes;
+ uint64_t exec_dst;
+ uint64_t exec_src;
+ int state;
+};
+
+#define SIFIVE_PDMA_CHANS 4
+#define SIFIVE_PDMA_IRQS (SIFIVE_PDMA_CHANS * 2)
+#define SIFIVE_PDMA_REG_SIZE 0x100000
+#define SIFIVE_PDMA_CHAN_NO(reg) ((reg & (SIFIVE_PDMA_REG_SIZE - 1)) >> 12)
+
+typedef struct SiFivePDMAState {
+ SysBusDevice parent;
+ MemoryRegion iomem;
+ qemu_irq irq[SIFIVE_PDMA_IRQS];
+
+ struct sifive_pdma_chan chan[SIFIVE_PDMA_CHANS];
+} SiFivePDMAState;
+
+#define TYPE_SIFIVE_PDMA "sifive.pdma"
+
+#define SIFIVE_PDMA(obj) \
+ OBJECT_CHECK(SiFivePDMAState, (obj), TYPE_SIFIVE_PDMA)
+
+#endif /* SIFIVE_PDMA_H */
diff --git a/include/hw/dma/xlnx-zdma.h b/include/hw/dma/xlnx-zdma.h
index 0b240b4c3c..efc75217d5 100644
--- a/include/hw/dma/xlnx-zdma.h
+++ b/include/hw/dma/xlnx-zdma.h
@@ -32,6 +32,7 @@
#include "hw/sysbus.h"
#include "hw/register.h"
#include "sysemu/dma.h"
+#include "qom/object.h"
#define ZDMA_R_MAX (0x204 / 4)
@@ -50,12 +51,12 @@ typedef union {
uint32_t words[4];
} XlnxZDMADescr;
-typedef struct XlnxZDMA {
+struct XlnxZDMA {
SysBusDevice parent_obj;
MemoryRegion iomem;
MemTxAttrs attr;
MemoryRegion *dma_mr;
- AddressSpace *dma_as;
+ AddressSpace dma_as;
qemu_irq irq_zdma_ch_imr;
struct {
@@ -74,11 +75,10 @@ typedef struct XlnxZDMA {
/* We don't model the common bufs. Must be at least 16 bytes
to model write only mode. */
uint8_t buf[2048];
-} XlnxZDMA;
+};
#define TYPE_XLNX_ZDMA "xlnx.zdma"
-#define XLNX_ZDMA(obj) \
- OBJECT_CHECK(XlnxZDMA, (obj), TYPE_XLNX_ZDMA)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZDMA, XLNX_ZDMA)
#endif /* XLNX_ZDMA_H */
diff --git a/include/hw/dma/xlnx-zynq-devcfg.h b/include/hw/dma/xlnx-zynq-devcfg.h
index 9f5119a89a..e4cf085d70 100644
--- a/include/hw/dma/xlnx-zynq-devcfg.h
+++ b/include/hw/dma/xlnx-zynq-devcfg.h
@@ -25,14 +25,15 @@
*/
#ifndef XLNX_ZYNQ_DEVCFG_H
+#define XLNX_ZYNQ_DEVCFG_H
#include "hw/register.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_XLNX_ZYNQ_DEVCFG "xlnx.ps7-dev-cfg"
-#define XLNX_ZYNQ_DEVCFG(obj) \
- OBJECT_CHECK(XlnxZynqDevcfg, (obj), TYPE_XLNX_ZYNQ_DEVCFG)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG)
#define XLNX_ZYNQ_DEVCFG_R_MAX (0x100 / 4)
@@ -45,7 +46,7 @@ typedef struct XlnxZynqDevcfgDMACmd {
uint32_t dest_len;
} XlnxZynqDevcfgDMACmd;
-typedef struct XlnxZynqDevcfg {
+struct XlnxZynqDevcfg {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -56,7 +57,6 @@ typedef struct XlnxZynqDevcfg {
uint32_t regs[XLNX_ZYNQ_DEVCFG_R_MAX];
RegisterInfo regs_info[XLNX_ZYNQ_DEVCFG_R_MAX];
-} XlnxZynqDevcfg;
+};
-#define XLNX_ZYNQ_DEVCFG_H
#endif
diff --git a/include/hw/dma/xlnx_csu_dma.h b/include/hw/dma/xlnx_csu_dma.h
new file mode 100644
index 0000000000..922ab80eb6
--- /dev/null
+++ b/include/hw/dma/xlnx_csu_dma.h
@@ -0,0 +1,72 @@
+/*
+ * Xilinx Platform CSU Stream DMA emulation
+ *
+ * This implementation is based on
+ * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XLNX_CSU_DMA_H
+#define XLNX_CSU_DMA_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/ptimer.h"
+#include "hw/stream.h"
+
+#define TYPE_XLNX_CSU_DMA "xlnx.csu_dma"
+
+#define XLNX_CSU_DMA_R_MAX (0x2c / 4)
+
+typedef struct XlnxCSUDMA {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ MemTxAttrs attr;
+ MemoryRegion *dma_mr;
+ AddressSpace dma_as;
+ qemu_irq irq;
+ StreamSink *tx_dev; /* Used as generic StreamSink */
+ ptimer_state *src_timer;
+
+ uint16_t width;
+ bool is_dst;
+ bool r_size_last_word;
+
+ StreamCanPushNotifyFn notify;
+ void *notify_opaque;
+
+ uint32_t regs[XLNX_CSU_DMA_R_MAX];
+ RegisterInfo regs_info[XLNX_CSU_DMA_R_MAX];
+} XlnxCSUDMA;
+
+OBJECT_DECLARE_TYPE(XlnxCSUDMA, XlnxCSUDMAClass, XLNX_CSU_DMA)
+
+struct XlnxCSUDMAClass {
+ SysBusDeviceClass parent_class;
+
+ /*
+ * read: Start a read transfer on a Xilinx CSU DMA engine
+ *
+ * @s: the Xilinx CSU DMA engine to start the transfer on
+ * @addr: the address to read
+ * @len: the number of bytes to read at 'addr'
+ *
+ * @return a MemTxResult indicating whether the operation succeeded ('len'
+ * bytes were read) or failed.
+ */
+ MemTxResult (*read)(XlnxCSUDMA *s, hwaddr addr, uint32_t len);
+};
+
+#endif
diff --git a/include/hw/dma/xlnx_dpdma.h b/include/hw/dma/xlnx_dpdma.h
index 7a304a5bb4..40537a848b 100644
--- a/include/hw/dma/xlnx_dpdma.h
+++ b/include/hw/dma/xlnx_dpdma.h
@@ -28,6 +28,7 @@
#include "hw/sysbus.h"
#include "ui/console.h"
#include "sysemu/dma.h"
+#include "qom/object.h"
#define XLNX_DPDMA_REG_ARRAY_SIZE (0x1000 >> 2)
@@ -42,10 +43,9 @@ struct XlnxDPDMAState {
qemu_irq irq;
};
-typedef struct XlnxDPDMAState XlnxDPDMAState;
#define TYPE_XLNX_DPDMA "xlnx.dpdma"
-#define XLNX_DPDMA(obj) OBJECT_CHECK(XlnxDPDMAState, (obj), TYPE_XLNX_DPDMA)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxDPDMAState, XLNX_DPDMA)
/*
* xlnx_dpdma_start_operation: Start the operation on the specified channel. The
diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h
index e2cb675195..9c35d1b9da 100644
--- a/include/hw/elf_ops.h
+++ b/include/hw/elf_ops.h
@@ -1,30 +1,30 @@
static void glue(bswap_ehdr, SZ)(struct elfhdr *ehdr)
{
- bswap16s(&ehdr->e_type); /* Object file type */
- bswap16s(&ehdr->e_machine); /* Architecture */
- bswap32s(&ehdr->e_version); /* Object file version */
- bswapSZs(&ehdr->e_entry); /* Entry point virtual address */
- bswapSZs(&ehdr->e_phoff); /* Program header table file offset */
- bswapSZs(&ehdr->e_shoff); /* Section header table file offset */
- bswap32s(&ehdr->e_flags); /* Processor-specific flags */
- bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
- bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
- bswap16s(&ehdr->e_phnum); /* Program header table entry count */
- bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
- bswap16s(&ehdr->e_shnum); /* Section header table entry count */
- bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
+ bswap16s(&ehdr->e_type); /* Object file type */
+ bswap16s(&ehdr->e_machine); /* Architecture */
+ bswap32s(&ehdr->e_version); /* Object file version */
+ bswapSZs(&ehdr->e_entry); /* Entry point virtual address */
+ bswapSZs(&ehdr->e_phoff); /* Program header table file offset */
+ bswapSZs(&ehdr->e_shoff); /* Section header table file offset */
+ bswap32s(&ehdr->e_flags); /* Processor-specific flags */
+ bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
+ bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
+ bswap16s(&ehdr->e_phnum); /* Program header table entry count */
+ bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
+ bswap16s(&ehdr->e_shnum); /* Section header table entry count */
+ bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
}
static void glue(bswap_phdr, SZ)(struct elf_phdr *phdr)
{
- bswap32s(&phdr->p_type); /* Segment type */
- bswapSZs(&phdr->p_offset); /* Segment file offset */
- bswapSZs(&phdr->p_vaddr); /* Segment virtual address */
- bswapSZs(&phdr->p_paddr); /* Segment physical address */
- bswapSZs(&phdr->p_filesz); /* Segment size in file */
- bswapSZs(&phdr->p_memsz); /* Segment size in memory */
- bswap32s(&phdr->p_flags); /* Segment flags */
- bswapSZs(&phdr->p_align); /* Segment alignment */
+ bswap32s(&phdr->p_type); /* Segment type */
+ bswapSZs(&phdr->p_offset); /* Segment file offset */
+ bswapSZs(&phdr->p_vaddr); /* Segment virtual address */
+ bswapSZs(&phdr->p_paddr); /* Segment physical address */
+ bswapSZs(&phdr->p_filesz); /* Segment size in file */
+ bswapSZs(&phdr->p_memsz); /* Segment size in memory */
+ bswap32s(&phdr->p_flags); /* Segment flags */
+ bswapSZs(&phdr->p_align); /* Segment alignment */
}
static void glue(bswap_shdr, SZ)(struct elf_shdr *shdr)
@@ -104,19 +104,21 @@ static int glue(symcmp, SZ)(const void *s0, const void *s1)
: ((sym0->st_value > sym1->st_value) ? 1 : 0);
}
-static int glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
- int clear_lsb, symbol_fn_t sym_cb)
+static void glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
+ int clear_lsb, symbol_fn_t sym_cb)
{
- struct elf_shdr *symtab, *strtab, *shdr_table = NULL;
- struct elf_sym *syms = NULL;
+ struct elf_shdr *symtab, *strtab;
+ g_autofree struct elf_shdr *shdr_table = NULL;
+ g_autofree struct elf_sym *syms = NULL;
+ g_autofree char *str = NULL;
struct syminfo *s;
int nsyms, i;
- char *str = NULL;
shdr_table = load_at(fd, ehdr->e_shoff,
sizeof(struct elf_shdr) * ehdr->e_shnum);
- if (!shdr_table)
- return -1;
+ if (!shdr_table) {
+ return;
+ }
if (must_swab) {
for (i = 0; i < ehdr->e_shnum; i++) {
@@ -125,23 +127,25 @@ static int glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
}
symtab = glue(find_section, SZ)(shdr_table, ehdr->e_shnum, SHT_SYMTAB);
- if (!symtab)
- goto fail;
+ if (!symtab) {
+ return;
+ }
syms = load_at(fd, symtab->sh_offset, symtab->sh_size);
- if (!syms)
- goto fail;
+ if (!syms) {
+ return;
+ }
nsyms = symtab->sh_size / sizeof(struct elf_sym);
/* String table */
if (symtab->sh_link >= ehdr->e_shnum) {
- goto fail;
+ return;
}
strtab = &shdr_table[symtab->sh_link];
str = load_at(fd, strtab->sh_offset, strtab->sh_size);
if (!str) {
- goto fail;
+ return;
}
i = 0;
@@ -170,8 +174,13 @@ static int glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
}
i++;
}
- syms = g_realloc(syms, nsyms * sizeof(*syms));
+ /* check we have symbols left */
+ if (nsyms == 0) {
+ return;
+ }
+
+ syms = g_realloc(syms, nsyms * sizeof(*syms));
qsort(syms, nsyms, sizeof(*syms), glue(symcmp, SZ));
for (i = 0; i < nsyms - 1; i++) {
if (syms[i].st_size == 0) {
@@ -182,18 +191,11 @@ static int glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
/* Commit */
s = g_malloc0(sizeof(*s));
s->lookup_symbol = glue(lookup_symbol, SZ);
- glue(s->disas_symtab.elf, SZ) = syms;
+ glue(s->disas_symtab.elf, SZ) = g_steal_pointer(&syms);
s->disas_num_syms = nsyms;
- s->disas_strtab = str;
+ s->disas_strtab = g_steal_pointer(&str);
s->next = syminfos;
syminfos = s;
- g_free(shdr_table);
- return 0;
- fail:
- g_free(syms);
- g_free(str);
- g_free(shdr_table);
- return -1;
}
static int glue(elf_reloc, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
@@ -265,23 +267,71 @@ fail:
return ret;
}
-static int glue(load_elf, SZ)(const char *name, int fd,
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque,
- int must_swab, uint64_t *pentry,
- uint64_t *lowaddr, uint64_t *highaddr,
- int elf_machine, int clear_lsb, int data_swab,
- AddressSpace *as, bool load_rom,
- symbol_fn_t sym_cb)
+/*
+ * Given 'nhdr', a pointer to a range of ELF Notes, search through them
+ * for a note matching type 'elf_note_type' and return a pointer to
+ * the matching ELF note.
+ */
+static struct elf_note *glue(get_elf_note_type, SZ)(struct elf_note *nhdr,
+ elf_word note_size,
+ elf_word phdr_align,
+ elf_word elf_note_type)
+{
+ elf_word nhdr_size = sizeof(struct elf_note);
+ elf_word elf_note_entry_offset = 0;
+ elf_word note_type;
+ elf_word nhdr_namesz;
+ elf_word nhdr_descsz;
+
+ if (nhdr == NULL) {
+ return NULL;
+ }
+
+ note_type = nhdr->n_type;
+ while (note_type != elf_note_type) {
+ nhdr_namesz = nhdr->n_namesz;
+ nhdr_descsz = nhdr->n_descsz;
+
+ elf_note_entry_offset = nhdr_size +
+ QEMU_ALIGN_UP(nhdr_namesz, phdr_align) +
+ QEMU_ALIGN_UP(nhdr_descsz, phdr_align);
+
+ /*
+ * If the offset calculated in this iteration exceeds the
+ * supplied size, we are done and no matching note was found.
+ */
+ if (elf_note_entry_offset > note_size) {
+ return NULL;
+ }
+
+ /* skip to the next ELF Note entry */
+ nhdr = (void *)nhdr + elf_note_entry_offset;
+ note_type = nhdr->n_type;
+ }
+
+ return nhdr;
+}
+
+static ssize_t glue(load_elf, SZ)(const char *name, int fd,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque,
+ int must_swab, uint64_t *pentry,
+ uint64_t *lowaddr, uint64_t *highaddr,
+ uint32_t *pflags, int elf_machine,
+ int clear_lsb, int data_swab,
+ AddressSpace *as, bool load_rom,
+ symbol_fn_t sym_cb)
{
struct elfhdr ehdr;
struct elf_phdr *phdr = NULL, *ph;
- int size, i, total_size;
- elf_word mem_size, file_size;
+ int size, i;
+ ssize_t total_size;
+ elf_word mem_size, file_size, data_offset;
uint64_t addr, low = (uint64_t)-1, high = 0;
+ GMappedFile *mapped_file = NULL;
uint8_t *data = NULL;
- char label[128];
- int ret = ELF_LOAD_FAILED;
+ ssize_t ret = ELF_LOAD_FAILED;
if (read(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr))
goto fail;
@@ -319,14 +369,6 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
}
break;
- case EM_MOXIE:
- if (ehdr.e_machine != EM_MOXIE) {
- if (ehdr.e_machine != EM_MOXIE_OLD) {
- ret = ELF_LOAD_WRONG_ARCH;
- goto fail;
- }
- }
- break;
case EM_MIPS:
case EM_NANOMIPS:
if ((ehdr.e_machine != EM_MIPS) &&
@@ -342,8 +384,12 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
}
- if (pentry)
- *pentry = (uint64_t)(elf_sword)ehdr.e_entry;
+ if (pflags) {
+ *pflags = ehdr.e_flags;
+ }
+ if (pentry) {
+ *pentry = ehdr.e_entry;
+ }
glue(load_symbols, SZ)(&ehdr, fd, must_swab, clear_lsb, sym_cb);
@@ -363,20 +409,32 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
}
+ /*
+ * Since we want to be able to modify the mapped buffer, we set the
+ * 'writable' parameter to 'true'. Modifications to the buffer are not
+ * written back to the file.
+ */
+ mapped_file = g_mapped_file_new_from_fd(fd, true, NULL);
+ if (!mapped_file) {
+ goto fail;
+ }
+
total_size = 0;
for(i = 0; i < ehdr.e_phnum; i++) {
ph = &phdr[i];
if (ph->p_type == PT_LOAD) {
mem_size = ph->p_memsz; /* Size of the ROM */
file_size = ph->p_filesz; /* Size of the allocated data */
- data = g_malloc0(file_size);
- if (ph->p_filesz > 0) {
- if (lseek(fd, ph->p_offset, SEEK_SET) < 0) {
- goto fail;
- }
- if (read(fd, data, file_size) != file_size) {
+ data_offset = ph->p_offset; /* Offset where the data is located */
+
+ if (file_size > 0) {
+ if (g_mapped_file_get_length(mapped_file) <
+ file_size + data_offset) {
goto fail;
}
+
+ data = (uint8_t *)g_mapped_file_get_contents(mapped_file);
+ data += data_offset;
}
/* The ELF spec is somewhat vague about the purpose of the
@@ -426,6 +484,11 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
}
+ if (mem_size > SSIZE_MAX - total_size) {
+ ret = ELF_LOAD_TOO_BIG;
+ goto fail;
+ }
+
/* address_offset is hack for kernel images that are
linked at the wrong physical address. */
if (translate_fn) {
@@ -437,7 +500,7 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
if (data_swab) {
- int j;
+ elf_word j;
for (j = 0; j < file_size; j += (1 << data_swab)) {
uint8_t *dp = data + j;
switch (data_swab) {
@@ -467,25 +530,45 @@ static int glue(load_elf, SZ)(const char *name, int fd,
*pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr;
}
- if (mem_size == 0) {
- /* Some ELF files really do have segments of zero size;
- * just ignore them rather than trying to create empty
- * ROM blobs, because the zero-length blob can falsely
- * trigger the overlapping-ROM-blobs check.
- */
- g_free(data);
- } else {
+ /* Some ELF files really do have segments of zero size;
+ * just ignore them rather than trying to create empty
+ * ROM blobs, because the zero-length blob can falsely
+ * trigger the overlapping-ROM-blobs check.
+ */
+ if (mem_size != 0) {
if (load_rom) {
- snprintf(label, sizeof(label), "phdr #%d: %s", i, name);
-
- /* rom_add_elf_program() seize the ownership of 'data' */
- rom_add_elf_program(label, data, file_size, mem_size,
- addr, as);
+ g_autofree char *label =
+ g_strdup_printf("%s ELF program header segment %d",
+ name, i);
+
+ /*
+ * rom_add_elf_program() takes its own reference to
+ * 'mapped_file'.
+ */
+ rom_add_elf_program(label, mapped_file, data, file_size,
+ mem_size, addr, as);
} else {
- address_space_write(as ? as : &address_space_memory,
- addr, MEMTXATTRS_UNSPECIFIED,
- data, file_size);
- g_free(data);
+ MemTxResult res;
+
+ res = address_space_write(as ? as : &address_space_memory,
+ addr, MEMTXATTRS_UNSPECIFIED,
+ data, file_size);
+ if (res != MEMTX_OK) {
+ goto fail;
+ }
+ /*
+ * We need to zero'ify the space that is not copied
+ * from file
+ */
+ if (file_size < mem_size) {
+ res = address_space_set(as ? as : &address_space_memory,
+ addr + file_size, 0,
+ mem_size - file_size,
+ MEMTXATTRS_UNSPECIFIED);
+ if (res != MEMTX_OK) {
+ goto fail;
+ }
+ }
}
}
@@ -496,16 +579,49 @@ static int glue(load_elf, SZ)(const char *name, int fd,
high = addr + mem_size;
data = NULL;
+
+ } else if (ph->p_type == PT_NOTE && elf_note_fn) {
+ struct elf_note *nhdr = NULL;
+
+ file_size = ph->p_filesz; /* Size of the range of ELF notes */
+ data_offset = ph->p_offset; /* Offset where the notes are located */
+
+ if (file_size > 0) {
+ if (g_mapped_file_get_length(mapped_file) <
+ file_size + data_offset) {
+ goto fail;
+ }
+
+ data = (uint8_t *)g_mapped_file_get_contents(mapped_file);
+ data += data_offset;
+ }
+
+ /*
+ * Search the ELF notes to find one with a type matching the
+ * value passed in via 'translate_opaque'
+ */
+ nhdr = (struct elf_note *)data;
+ assert(translate_opaque != NULL);
+ nhdr = glue(get_elf_note_type, SZ)(nhdr, file_size, ph->p_align,
+ *(uint64_t *)translate_opaque);
+ if (nhdr != NULL) {
+ elf_note_fn((void *)nhdr, (void *)&ph->p_align, SZ == 64);
+ }
+ data = NULL;
}
}
- g_free(phdr);
- if (lowaddr)
- *lowaddr = (uint64_t)(elf_sword)low;
- if (highaddr)
- *highaddr = (uint64_t)(elf_sword)high;
- return total_size;
+
+ if (lowaddr) {
+ *lowaddr = low;
+ }
+ if (highaddr) {
+ *highaddr = high;
+ }
+ ret = total_size;
fail:
- g_free(data);
+ if (mapped_file) {
+ g_mapped_file_unref(mapped_file);
+ }
g_free(phdr);
return ret;
}
diff --git a/include/hw/empty_slot.h b/include/hw/empty_slot.h
deleted file mode 100644
index 123a9f8989..0000000000
--- a/include/hw/empty_slot.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef HW_EMPTY_SLOT_H
-#define HW_EMPTY_SLOT_H
-
-/* empty_slot.c */
-void empty_slot_init(hwaddr addr, uint64_t slot_size);
-
-#endif
diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h
index eeb5a4d7b6..8d3fb2fb3b 100644
--- a/include/hw/firmware/smbios.h
+++ b/include/hw/firmware/smbios.h
@@ -1,6 +1,9 @@
#ifndef QEMU_SMBIOS_H
#define QEMU_SMBIOS_H
+#include "qapi/qapi-types-machine.h"
+#include "qemu/bitmap.h"
+
/*
* SMBIOS Support
*
@@ -14,8 +17,28 @@
*
*/
+extern uint8_t *usr_blobs;
+extern GArray *usr_blobs_sizes;
+
+typedef struct {
+ const char *vendor, *version, *date;
+ bool have_major_minor, uefi;
+ uint8_t major, minor;
+} smbios_type0_t;
+extern smbios_type0_t smbios_type0;
+
+typedef struct {
+ const char *manufacturer, *product, *version, *serial, *sku, *family;
+ /* uuid is in qemu_uuid */
+} smbios_type1_t;
+extern smbios_type1_t smbios_type1;
#define SMBIOS_MAX_TYPE 127
+extern DECLARE_BITMAP(smbios_have_binfile_bitmap, SMBIOS_MAX_TYPE + 1);
+extern DECLARE_BITMAP(smbios_have_fields_bitmap, SMBIOS_MAX_TYPE + 1);
+
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
/* memory area description, used by type 19 table */
struct smbios_phys_mem_area {
@@ -23,14 +46,6 @@ struct smbios_phys_mem_area {
uint64_t length;
};
-/*
- * SMBIOS spec defined tables
- */
-typedef enum SmbiosEntryPointType {
- SMBIOS_ENTRY_POINT_21,
- SMBIOS_ENTRY_POINT_30,
-} SmbiosEntryPointType;
-
/* SMBIOS Entry Point
* There are two types of entry points defined in the SMBIOS specification
* (see below). BIOS must place the entry point(s) at a 16-byte-aligned
@@ -162,6 +177,7 @@ struct smbios_type_3 {
uint8_t height;
uint8_t number_of_power_cords;
uint8_t contained_element_count;
+ uint8_t contained_element_record_length;
uint8_t sku_number_str;
/* contained elements follow */
} QEMU_PACKED;
@@ -192,6 +208,43 @@ struct smbios_type_4 {
uint8_t thread_count;
uint16_t processor_characteristics;
uint16_t processor_family2;
+ /* SMBIOS spec 3.0.0, Table 21 */
+ uint16_t core_count2;
+ uint16_t core_enabled2;
+ uint16_t thread_count2;
+} QEMU_PACKED;
+
+typedef enum smbios_type_4_len_ver {
+ SMBIOS_TYPE_4_LEN_V28 = offsetofend(struct smbios_type_4,
+ processor_family2),
+ SMBIOS_TYPE_4_LEN_V30 = offsetofend(struct smbios_type_4, thread_count2),
+} smbios_type_4_len_ver;
+
+/* SMBIOS type 8 - Port Connector Information */
+struct smbios_type_8 {
+ struct smbios_structure_header header;
+ uint8_t internal_reference_str;
+ uint8_t internal_connector_type;
+ uint8_t external_reference_str;
+ uint8_t external_connector_type;
+ uint8_t port_type;
+} QEMU_PACKED;
+
+/* SMBIOS type 9 - System Slots (v2.1+) */
+struct smbios_type_9 {
+ struct smbios_structure_header header;
+ uint8_t slot_designation;
+ uint8_t slot_type;
+ uint8_t slot_data_bus_width;
+ uint8_t current_usage;
+ uint8_t slot_length;
+ uint16_t slot_id;
+ uint8_t slot_characteristics1;
+ uint8_t slot_characteristics2;
+ /* SMBIOS spec v2.6+ */
+ uint16_t segment_group_number;
+ uint8_t bus_number;
+ uint8_t device_number;
} QEMU_PACKED;
/* SMBIOS type 11 - OEM strings */
@@ -257,19 +310,36 @@ struct smbios_type_32 {
uint8_t boot_status;
} QEMU_PACKED;
+/* SMBIOS type 41 - Onboard Devices Extended Information */
+struct smbios_type_41 {
+ struct smbios_structure_header header;
+ uint8_t reference_designation_str;
+ uint8_t device_type;
+ uint8_t device_type_instance;
+ uint16_t segment_group_number;
+ uint8_t bus_number;
+ uint8_t device_number;
+} QEMU_PACKED;
+
/* SMBIOS type 127 -- End-of-table */
struct smbios_type_127 {
struct smbios_structure_header header;
} QEMU_PACKED;
+bool smbios_validate_table(SmbiosEntryPointType ep_type, Error **errp);
+void smbios_add_usr_blob_size(size_t size);
void smbios_entry_add(QemuOpts *opts, Error **errp);
void smbios_set_cpuid(uint32_t version, uint32_t features);
void smbios_set_defaults(const char *manufacturer, const char *product,
- const char *version, bool legacy_mode,
- bool uuid_encoded, SmbiosEntryPointType ep_type);
-uint8_t *smbios_get_table_legacy(size_t *length);
-void smbios_get_tables(const struct smbios_phys_mem_area *mem_array,
+ const char *version,
+ bool uuid_encoded);
+void smbios_set_default_processor_family(uint16_t processor_family);
+uint8_t *smbios_get_table_legacy(size_t *length, Error **errp);
+void smbios_get_tables(MachineState *ms,
+ SmbiosEntryPointType ep_type,
+ const struct smbios_phys_mem_area *mem_array,
const unsigned int mem_array_size,
uint8_t **tables, size_t *tables_len,
- uint8_t **anchor, size_t *anchor_len);
+ uint8_t **anchor, size_t *anchor_len,
+ Error **errp);
#endif /* QEMU_SMBIOS_H */
diff --git a/include/hw/fsi/aspeed_apb2opb.h b/include/hw/fsi/aspeed_apb2opb.h
new file mode 100644
index 0000000000..f6a2387abf
--- /dev/null
+++ b/include/hw/fsi/aspeed_apb2opb.h
@@ -0,0 +1,46 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * ASPEED APB2OPB Bridge
+ * IBM On-Chip Peripheral Bus
+ */
+#ifndef FSI_ASPEED_APB2OPB_H
+#define FSI_ASPEED_APB2OPB_H
+
+#include "exec/memory.h"
+#include "hw/fsi/fsi-master.h"
+#include "hw/sysbus.h"
+
+#define TYPE_FSI_OPB "fsi.opb"
+
+#define TYPE_OP_BUS "opb"
+OBJECT_DECLARE_SIMPLE_TYPE(OPBus, OP_BUS)
+
+typedef struct OPBus {
+ BusState bus;
+
+ MemoryRegion mr;
+ AddressSpace as;
+} OPBus;
+
+#define TYPE_ASPEED_APB2OPB "aspeed.apb2opb"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedAPB2OPBState, ASPEED_APB2OPB)
+
+#define ASPEED_APB2OPB_NR_REGS ((0xe8 >> 2) + 1)
+
+#define ASPEED_FSI_NUM 2
+
+typedef struct AspeedAPB2OPBState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+
+ uint32_t regs[ASPEED_APB2OPB_NR_REGS];
+ qemu_irq irq;
+
+ OPBus opb[ASPEED_FSI_NUM];
+ FSIMasterState fsi[ASPEED_FSI_NUM];
+} AspeedAPB2OPBState;
+
+#endif /* FSI_ASPEED_APB2OPB_H */
diff --git a/include/hw/fsi/cfam.h b/include/hw/fsi/cfam.h
new file mode 100644
index 0000000000..7abc3b287b
--- /dev/null
+++ b/include/hw/fsi/cfam.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * IBM Common FRU Access Macro
+ */
+#ifndef FSI_CFAM_H
+#define FSI_CFAM_H
+
+#include "exec/memory.h"
+
+#include "hw/fsi/fsi.h"
+#include "hw/fsi/lbus.h"
+
+#define TYPE_FSI_CFAM "cfam"
+#define FSI_CFAM(obj) OBJECT_CHECK(FSICFAMState, (obj), TYPE_FSI_CFAM)
+
+/* P9-ism */
+#define CFAM_CONFIG_NR_REGS 0x28
+
+typedef struct FSICFAMState {
+ /* < private > */
+ FSISlaveState parent;
+
+ /* CFAM config address space */
+ MemoryRegion config_iomem;
+
+ MemoryRegion mr;
+
+ FSILBus lbus;
+ FSIScratchPad scratchpad;
+} FSICFAMState;
+
+#endif /* FSI_CFAM_H */
diff --git a/include/hw/fsi/fsi-master.h b/include/hw/fsi/fsi-master.h
new file mode 100644
index 0000000000..68e5f56db2
--- /dev/null
+++ b/include/hw/fsi/fsi-master.h
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * IBM Flexible Service Interface Master
+ */
+#ifndef FSI_FSI_MASTER_H
+#define FSI_FSI_MASTER_H
+
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
+#include "hw/fsi/fsi.h"
+#include "hw/fsi/cfam.h"
+
+#define TYPE_FSI_MASTER "fsi.master"
+OBJECT_DECLARE_SIMPLE_TYPE(FSIMasterState, FSI_MASTER)
+
+#define FSI_MASTER_NR_REGS ((0x2e0 >> 2) + 1)
+
+typedef struct FSIMasterState {
+ DeviceState parent;
+ MemoryRegion iomem;
+ MemoryRegion opb2fsi;
+
+ FSIBus bus;
+
+ uint32_t regs[FSI_MASTER_NR_REGS];
+ FSICFAMState cfam;
+} FSIMasterState;
+
+
+#endif /* FSI_FSI_H */
diff --git a/include/hw/fsi/fsi.h b/include/hw/fsi/fsi.h
new file mode 100644
index 0000000000..e00f6ef078
--- /dev/null
+++ b/include/hw/fsi/fsi.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * IBM Flexible Service Interface
+ */
+#ifndef FSI_FSI_H
+#define FSI_FSI_H
+
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
+#include "hw/fsi/lbus.h"
+#include "qemu/bitops.h"
+
+/* Bitwise operations at the word level. */
+#define BE_GENMASK(hb, lb) MAKE_64BIT_MASK((lb), ((hb) - (lb) + 1))
+
+#define TYPE_FSI_BUS "fsi.bus"
+OBJECT_DECLARE_SIMPLE_TYPE(FSIBus, FSI_BUS)
+
+typedef struct FSIBus {
+ BusState bus;
+} FSIBus;
+
+#define TYPE_FSI_SLAVE "fsi.slave"
+OBJECT_DECLARE_SIMPLE_TYPE(FSISlaveState, FSI_SLAVE)
+
+#define FSI_SLAVE_CONTROL_NR_REGS ((0x40 >> 2) + 1)
+
+typedef struct FSISlaveState {
+ DeviceState parent;
+
+ MemoryRegion iomem;
+ uint32_t regs[FSI_SLAVE_CONTROL_NR_REGS];
+} FSISlaveState;
+
+#endif /* FSI_FSI_H */
diff --git a/include/hw/fsi/lbus.h b/include/hw/fsi/lbus.h
new file mode 100644
index 0000000000..558268c013
--- /dev/null
+++ b/include/hw/fsi/lbus.h
@@ -0,0 +1,43 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * IBM Local bus and connected device structures.
+ */
+#ifndef FSI_LBUS_H
+#define FSI_LBUS_H
+
+#include "hw/qdev-core.h"
+#include "qemu/units.h"
+#include "exec/memory.h"
+
+#define TYPE_FSI_LBUS_DEVICE "fsi.lbus.device"
+OBJECT_DECLARE_SIMPLE_TYPE(FSILBusDevice, FSI_LBUS_DEVICE)
+
+typedef struct FSILBusDevice {
+ DeviceState parent;
+
+ MemoryRegion iomem;
+} FSILBusDevice;
+
+#define TYPE_FSI_LBUS "fsi.lbus"
+OBJECT_DECLARE_SIMPLE_TYPE(FSILBus, FSI_LBUS)
+
+typedef struct FSILBus {
+ BusState bus;
+
+ MemoryRegion mr;
+} FSILBus;
+
+#define TYPE_FSI_SCRATCHPAD "fsi.scratchpad"
+#define SCRATCHPAD(obj) OBJECT_CHECK(FSIScratchPad, (obj), TYPE_FSI_SCRATCHPAD)
+
+#define FSI_SCRATCHPAD_NR_REGS 4
+
+typedef struct FSIScratchPad {
+ FSILBusDevice parent;
+
+ uint32_t regs[FSI_SCRATCHPAD_NR_REGS];
+} FSIScratchPad;
+
+#endif /* FSI_LBUS_H */
diff --git a/include/hw/fw-path-provider.h b/include/hw/fw-path-provider.h
index 5df893a3d8..8e1d45651c 100644
--- a/include/hw/fw-path-provider.h
+++ b/include/hw/fw-path-provider.h
@@ -18,25 +18,23 @@
#ifndef FW_PATH_PROVIDER_H
#define FW_PATH_PROVIDER_H
-#include "qemu-common.h"
#include "qom/object.h"
#define TYPE_FW_PATH_PROVIDER "fw-path-provider"
-#define FW_PATH_PROVIDER_CLASS(klass) \
- OBJECT_CLASS_CHECK(FWPathProviderClass, (klass), TYPE_FW_PATH_PROVIDER)
-#define FW_PATH_PROVIDER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(FWPathProviderClass, (obj), TYPE_FW_PATH_PROVIDER)
+typedef struct FWPathProviderClass FWPathProviderClass;
+DECLARE_CLASS_CHECKERS(FWPathProviderClass, FW_PATH_PROVIDER,
+ TYPE_FW_PATH_PROVIDER)
#define FW_PATH_PROVIDER(obj) \
INTERFACE_CHECK(FWPathProvider, (obj), TYPE_FW_PATH_PROVIDER)
typedef struct FWPathProvider FWPathProvider;
-typedef struct FWPathProviderClass {
+struct FWPathProviderClass {
InterfaceClass parent_class;
char *(*get_dev_path)(FWPathProvider *p, BusState *bus, DeviceState *dev);
-} FWPathProviderClass;
+};
char *fw_path_provider_get_dev_path(FWPathProvider *p, BusState *bus,
DeviceState *dev);
diff --git a/include/hw/gpio/aspeed_gpio.h b/include/hw/gpio/aspeed_gpio.h
new file mode 100644
index 0000000000..904eecf62c
--- /dev/null
+++ b/include/hw/gpio/aspeed_gpio.h
@@ -0,0 +1,110 @@
+/*
+ * ASPEED GPIO Controller
+ *
+ * Copyright (C) 2017-2018 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef ASPEED_GPIO_H
+#define ASPEED_GPIO_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_ASPEED_GPIO "aspeed.gpio"
+OBJECT_DECLARE_TYPE(AspeedGPIOState, AspeedGPIOClass, ASPEED_GPIO)
+
+#define ASPEED_GPIO_MAX_NR_SETS 8
+#define ASPEED_GPIOS_PER_SET 32
+#define ASPEED_REGS_PER_BANK 14
+#define ASPEED_GPIO_MAX_NR_REGS (ASPEED_REGS_PER_BANK * ASPEED_GPIO_MAX_NR_SETS)
+#define ASPEED_GROUPS_PER_SET 4
+#define ASPEED_GPIO_NR_DEBOUNCE_REGS 3
+#define ASPEED_CHARS_PER_GROUP_LABEL 4
+
+typedef struct GPIOSets GPIOSets;
+
+typedef struct GPIOSetProperties {
+ uint32_t input;
+ uint32_t output;
+ char group_label[ASPEED_GROUPS_PER_SET][ASPEED_CHARS_PER_GROUP_LABEL];
+} GPIOSetProperties;
+
+enum GPIORegType {
+ gpio_not_a_reg,
+ gpio_reg_data_value,
+ gpio_reg_direction,
+ gpio_reg_int_enable,
+ gpio_reg_int_sens_0,
+ gpio_reg_int_sens_1,
+ gpio_reg_int_sens_2,
+ gpio_reg_int_status,
+ gpio_reg_reset_tolerant,
+ gpio_reg_debounce_1,
+ gpio_reg_debounce_2,
+ gpio_reg_cmd_source_0,
+ gpio_reg_cmd_source_1,
+ gpio_reg_data_read,
+ gpio_reg_input_mask,
+};
+
+/* GPIO index mode */
+enum GPIORegIndexType {
+ gpio_reg_idx_data = 0,
+ gpio_reg_idx_direction,
+ gpio_reg_idx_interrupt,
+ gpio_reg_idx_debounce,
+ gpio_reg_idx_tolerance,
+ gpio_reg_idx_cmd_src,
+ gpio_reg_idx_input_mask,
+ gpio_reg_idx_reserved,
+ gpio_reg_idx_new_w_cmd_src,
+ gpio_reg_idx_new_r_cmd_src,
+};
+
+typedef struct AspeedGPIOReg {
+ uint16_t set_idx;
+ enum GPIORegType type;
+} AspeedGPIOReg;
+
+struct AspeedGPIOClass {
+ SysBusDevice parent_obj;
+ const GPIOSetProperties *props;
+ uint32_t nr_gpio_pins;
+ uint32_t nr_gpio_sets;
+ const AspeedGPIOReg *reg_table;
+};
+
+struct AspeedGPIOState {
+ /* <private> */
+ SysBusDevice parent;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ int pending;
+ qemu_irq irq;
+ qemu_irq gpios[ASPEED_GPIO_MAX_NR_SETS][ASPEED_GPIOS_PER_SET];
+
+/* Parallel GPIO Registers */
+ uint32_t debounce_regs[ASPEED_GPIO_NR_DEBOUNCE_REGS];
+ struct GPIOSets {
+ uint32_t data_value; /* Reflects pin values */
+ uint32_t data_read; /* Contains last value written to data value */
+ uint32_t direction;
+ uint32_t int_enable;
+ uint32_t int_sens_0;
+ uint32_t int_sens_1;
+ uint32_t int_sens_2;
+ uint32_t int_status;
+ uint32_t reset_tol;
+ uint32_t cmd_source_0;
+ uint32_t cmd_source_1;
+ uint32_t debounce_1;
+ uint32_t debounce_2;
+ uint32_t input_mask;
+ } sets[ASPEED_GPIO_MAX_NR_SETS];
+};
+
+#endif /* ASPEED_GPIO_H */
diff --git a/include/hw/gpio/bcm2835_gpio.h b/include/hw/gpio/bcm2835_gpio.h
index 9f8e0c720c..1c53a05090 100644
--- a/include/hw/gpio/bcm2835_gpio.h
+++ b/include/hw/gpio/bcm2835_gpio.h
@@ -15,8 +15,10 @@
#define BCM2835_GPIO_H
#include "hw/sd/sd.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
-typedef struct BCM2835GpioState {
+struct BCM2835GpioState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -30,10 +32,9 @@ typedef struct BCM2835GpioState {
uint32_t lev0, lev1;
uint8_t sd_fsel;
qemu_irq out[54];
-} BCM2835GpioState;
+};
#define TYPE_BCM2835_GPIO "bcm2835_gpio"
-#define BCM2835_GPIO(obj) \
- OBJECT_CHECK(BCM2835GpioState, (obj), TYPE_BCM2835_GPIO)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835GpioState, BCM2835_GPIO)
#endif
diff --git a/include/hw/gpio/bcm2838_gpio.h b/include/hw/gpio/bcm2838_gpio.h
new file mode 100644
index 0000000000..f2a57a697f
--- /dev/null
+++ b/include/hw/gpio/bcm2838_gpio.h
@@ -0,0 +1,45 @@
+/*
+ * Raspberry Pi (BCM2838) GPIO Controller
+ * This implementation is based on bcm2835_gpio (hw/gpio/bcm2835_gpio.c)
+ *
+ * Copyright (c) 2022 Auriga LLC
+ *
+ * Authors:
+ * Lotosh, Aleksey <aleksey.lotosh@auriga.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef BCM2838_GPIO_H
+#define BCM2838_GPIO_H
+
+#include "hw/sd/sd.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_BCM2838_GPIO "bcm2838-gpio"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2838GpioState, BCM2838_GPIO)
+
+#define BCM2838_GPIO_REGS_SIZE 0x1000
+#define BCM2838_GPIO_NUM 58
+#define GPIO_PUP_PDN_CNTRL_NUM 4
+
+struct BCM2838GpioState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+
+ /* SDBus selector */
+ SDBus sdbus;
+ SDBus *sdbus_sdhci;
+ SDBus *sdbus_sdhost;
+
+ uint8_t fsel[BCM2838_GPIO_NUM];
+ uint32_t lev0, lev1;
+ uint8_t sd_fsel;
+ qemu_irq out[BCM2838_GPIO_NUM];
+ uint32_t pup_cntrl_reg[GPIO_PUP_PDN_CNTRL_NUM];
+};
+
+#endif
diff --git a/include/hw/gpio/imx_gpio.h b/include/hw/gpio/imx_gpio.h
index ffab437f23..227860b9f0 100644
--- a/include/hw/gpio/imx_gpio.h
+++ b/include/hw/gpio/imx_gpio.h
@@ -21,9 +21,10 @@
#define IMX_GPIO_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX_GPIO "imx.gpio"
-#define IMX_GPIO(obj) OBJECT_CHECK(IMXGPIOState, (obj), TYPE_IMX_GPIO)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXGPIOState, IMX_GPIO)
#define IMX_GPIO_MEM_SIZE 0x20
@@ -39,7 +40,7 @@
#define IMX_GPIO_PIN_COUNT 32
-typedef struct IMXGPIOState {
+struct IMXGPIOState {
/*< private >*/
SysBusDevice parent_obj;
@@ -58,6 +59,6 @@ typedef struct IMXGPIOState {
qemu_irq irq[2];
qemu_irq output[IMX_GPIO_PIN_COUNT];
-} IMXGPIOState;
+};
#endif /* IMX_GPIO_H */
diff --git a/include/hw/gpio/npcm7xx_gpio.h b/include/hw/gpio/npcm7xx_gpio.h
new file mode 100644
index 0000000000..b1d771bd77
--- /dev/null
+++ b/include/hw/gpio/npcm7xx_gpio.h
@@ -0,0 +1,55 @@
+/*
+ * Nuvoton NPCM7xx General Purpose Input / Output (GPIO)
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef NPCM7XX_GPIO_H
+#define NPCM7XX_GPIO_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/* Number of pins managed by each controller. */
+#define NPCM7XX_GPIO_NR_PINS (32)
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_GPIO_NR_REGS (0x80 / sizeof(uint32_t))
+
+typedef struct NPCM7xxGPIOState {
+ SysBusDevice parent;
+
+ /* Properties to be defined by the SoC */
+ uint32_t reset_pu;
+ uint32_t reset_pd;
+ uint32_t reset_osrc;
+ uint32_t reset_odsc;
+
+ MemoryRegion mmio;
+
+ qemu_irq irq;
+ qemu_irq output[NPCM7XX_GPIO_NR_PINS];
+
+ uint32_t pin_level;
+ uint32_t ext_level;
+ uint32_t ext_driven;
+
+ uint32_t regs[NPCM7XX_GPIO_NR_REGS];
+} NPCM7xxGPIOState;
+
+#define TYPE_NPCM7XX_GPIO "npcm7xx-gpio"
+#define NPCM7XX_GPIO(obj) \
+ OBJECT_CHECK(NPCM7xxGPIOState, (obj), TYPE_NPCM7XX_GPIO)
+
+#endif /* NPCM7XX_GPIO_H */
diff --git a/include/hw/gpio/nrf51_gpio.h b/include/hw/gpio/nrf51_gpio.h
index 337ee534bb..fcfa2bac17 100644
--- a/include/hw/gpio/nrf51_gpio.h
+++ b/include/hw/gpio/nrf51_gpio.h
@@ -27,8 +27,9 @@
#define NRF51_GPIO_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_NRF51_GPIO "nrf51_soc.gpio"
-#define NRF51_GPIO(obj) OBJECT_CHECK(NRF51GPIOState, (obj), TYPE_NRF51_GPIO)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51GPIOState, NRF51_GPIO)
#define NRF51_GPIO_PINS 32
@@ -42,12 +43,12 @@
#define NRF51_GPIO_REG_DIRSET 0x518
#define NRF51_GPIO_REG_DIRCLR 0x51C
#define NRF51_GPIO_REG_CNF_START 0x700
-#define NRF51_GPIO_REG_CNF_END 0x77F
+#define NRF51_GPIO_REG_CNF_END 0x77C
#define NRF51_GPIO_PULLDOWN 1
#define NRF51_GPIO_PULLUP 3
-typedef struct NRF51GPIOState {
+struct NRF51GPIOState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -63,7 +64,8 @@ typedef struct NRF51GPIOState {
uint32_t old_out_connected;
qemu_irq output[NRF51_GPIO_PINS];
-} NRF51GPIOState;
+ qemu_irq detect;
+};
#endif
diff --git a/include/hw/gpio/pca9552.h b/include/hw/gpio/pca9552.h
new file mode 100644
index 0000000000..c36525f0c3
--- /dev/null
+++ b/include/hw/gpio/pca9552.h
@@ -0,0 +1,38 @@
+/*
+ * PCA9552 I2C LED blinker
+ *
+ * Copyright (c) 2017-2018, IBM Corporation.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+#ifndef PCA9552_H
+#define PCA9552_H
+
+#include "hw/i2c/i2c.h"
+#include "qom/object.h"
+
+#define TYPE_PCA9552 "pca9552"
+#define TYPE_PCA955X "pca955x"
+typedef struct PCA955xState PCA955xState;
+DECLARE_INSTANCE_CHECKER(PCA955xState, PCA955X,
+ TYPE_PCA955X)
+
+#define PCA955X_NR_REGS 10
+#define PCA955X_PIN_COUNT_MAX 16
+
+struct PCA955xState {
+ /*< private >*/
+ I2CSlave i2c;
+ /*< public >*/
+
+ uint8_t len;
+ uint8_t pointer;
+
+ uint8_t regs[PCA955X_NR_REGS];
+ qemu_irq gpio_out[PCA955X_PIN_COUNT_MAX];
+ uint8_t ext_state[PCA955X_PIN_COUNT_MAX];
+ char *description; /* For debugging purpose only */
+};
+
+#endif
diff --git a/include/hw/misc/pca9552_regs.h b/include/hw/gpio/pca9552_regs.h
index d8051cfbd6..d8051cfbd6 100644
--- a/include/hw/misc/pca9552_regs.h
+++ b/include/hw/gpio/pca9552_regs.h
diff --git a/include/hw/gpio/pca9554.h b/include/hw/gpio/pca9554.h
new file mode 100644
index 0000000000..54bfc4c4c7
--- /dev/null
+++ b/include/hw/gpio/pca9554.h
@@ -0,0 +1,36 @@
+/*
+ * PCA9554 I/O port
+ *
+ * Copyright (c) 2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef PCA9554_H
+#define PCA9554_H
+
+#include "hw/i2c/i2c.h"
+#include "qom/object.h"
+
+#define TYPE_PCA9554 "pca9554"
+typedef struct PCA9554State PCA9554State;
+DECLARE_INSTANCE_CHECKER(PCA9554State, PCA9554,
+ TYPE_PCA9554)
+
+#define PCA9554_NR_REGS 4
+#define PCA9554_PIN_COUNT 8
+
+struct PCA9554State {
+ /*< private >*/
+ I2CSlave i2c;
+ /*< public >*/
+
+ uint8_t len;
+ uint8_t pointer;
+
+ uint8_t regs[PCA9554_NR_REGS];
+ qemu_irq gpio_out[PCA9554_PIN_COUNT];
+ uint8_t ext_state[PCA9554_PIN_COUNT];
+ char *description; /* For debugging purpose only */
+};
+
+#endif
diff --git a/include/hw/gpio/pca9554_regs.h b/include/hw/gpio/pca9554_regs.h
new file mode 100644
index 0000000000..602c4a90e0
--- /dev/null
+++ b/include/hw/gpio/pca9554_regs.h
@@ -0,0 +1,19 @@
+/*
+ * PCA9554 I/O port registers
+ *
+ * Copyright (c) 2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef PCA9554_REGS_H
+#define PCA9554_REGS_H
+
+/*
+ * Bits [0:1] are used to address a specific register.
+ */
+#define PCA9554_INPUT 0 /* read only input register */
+#define PCA9554_OUTPUT 1 /* read/write pin output state */
+#define PCA9554_POLARITY 2 /* Set polarity of input register */
+#define PCA9554_CONFIG 3 /* Set pins as inputs our ouputs */
+
+#endif
diff --git a/include/hw/gpio/pcf8574.h b/include/hw/gpio/pcf8574.h
new file mode 100644
index 0000000000..3291d7dbbc
--- /dev/null
+++ b/include/hw/gpio/pcf8574.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * NXP PCF8574 8-port I2C GPIO expansion chip.
+ *
+ * Copyright (c) 2024 KNS Group (YADRO).
+ * Written by Dmitrii Sharikhin <d.sharikhin@yadro.com>
+ */
+
+#ifndef _HW_GPIO_PCF8574
+#define _HW_GPIO_PCF8574
+
+#define TYPE_PCF8574 "pcf8574"
+
+#endif /* _HW_GPIO_PCF8574 */
diff --git a/include/hw/gpio/sifive_gpio.h b/include/hw/gpio/sifive_gpio.h
new file mode 100644
index 0000000000..fc53785c9d
--- /dev/null
+++ b/include/hw/gpio/sifive_gpio.h
@@ -0,0 +1,79 @@
+/*
+ * SiFive System-on-Chip general purpose input/output register definition
+ *
+ * Copyright 2019 AdaCore
+ *
+ * Base on nrf51_gpio.c:
+ *
+ * Copyright 2018 Steffen Görtz <contrib@steffen-goertz.de>
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef SIFIVE_GPIO_H
+#define SIFIVE_GPIO_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_SIFIVE_GPIO "sifive_soc.gpio"
+typedef struct SIFIVEGPIOState SIFIVEGPIOState;
+DECLARE_INSTANCE_CHECKER(SIFIVEGPIOState, SIFIVE_GPIO,
+ TYPE_SIFIVE_GPIO)
+
+#define SIFIVE_GPIO_PINS 32
+
+#define SIFIVE_GPIO_SIZE 0x100
+
+#define SIFIVE_GPIO_REG_VALUE 0x000
+#define SIFIVE_GPIO_REG_INPUT_EN 0x004
+#define SIFIVE_GPIO_REG_OUTPUT_EN 0x008
+#define SIFIVE_GPIO_REG_PORT 0x00C
+#define SIFIVE_GPIO_REG_PUE 0x010
+#define SIFIVE_GPIO_REG_DS 0x014
+#define SIFIVE_GPIO_REG_RISE_IE 0x018
+#define SIFIVE_GPIO_REG_RISE_IP 0x01C
+#define SIFIVE_GPIO_REG_FALL_IE 0x020
+#define SIFIVE_GPIO_REG_FALL_IP 0x024
+#define SIFIVE_GPIO_REG_HIGH_IE 0x028
+#define SIFIVE_GPIO_REG_HIGH_IP 0x02C
+#define SIFIVE_GPIO_REG_LOW_IE 0x030
+#define SIFIVE_GPIO_REG_LOW_IP 0x034
+#define SIFIVE_GPIO_REG_IOF_EN 0x038
+#define SIFIVE_GPIO_REG_IOF_SEL 0x03C
+#define SIFIVE_GPIO_REG_OUT_XOR 0x040
+
+struct SIFIVEGPIOState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ qemu_irq irq[SIFIVE_GPIO_PINS];
+ qemu_irq output[SIFIVE_GPIO_PINS];
+
+ uint32_t value; /* Actual value of the pin */
+ uint32_t input_en;
+ uint32_t output_en;
+ uint32_t port; /* Pin value requested by the user */
+ uint32_t pue;
+ uint32_t ds;
+ uint32_t rise_ie;
+ uint32_t rise_ip;
+ uint32_t fall_ie;
+ uint32_t fall_ip;
+ uint32_t high_ie;
+ uint32_t high_ip;
+ uint32_t low_ie;
+ uint32_t low_ip;
+ uint32_t iof_en;
+ uint32_t iof_sel;
+ uint32_t out_xor;
+ uint32_t in;
+ uint32_t in_mask;
+
+ /* config */
+ uint32_t ngpio;
+};
+
+#endif /* SIFIVE_GPIO_H */
diff --git a/include/hw/gpio/stm32l4x5_gpio.h b/include/hw/gpio/stm32l4x5_gpio.h
new file mode 100644
index 0000000000..878bd19fc9
--- /dev/null
+++ b/include/hw/gpio/stm32l4x5_gpio.h
@@ -0,0 +1,71 @@
+/*
+ * STM32L4x5 GPIO (General Purpose Input/Ouput)
+ *
+ * Copyright (c) 2024 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2024 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/*
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html
+ */
+
+#ifndef HW_STM32L4X5_GPIO_H
+#define HW_STM32L4X5_GPIO_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_GPIO "stm32l4x5-gpio"
+OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5GpioState, STM32L4X5_GPIO)
+
+#define NUM_GPIOS 8
+#define GPIO_NUM_PINS 16
+
+struct Stm32l4x5GpioState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ /* GPIO registers */
+ uint32_t moder;
+ uint32_t otyper;
+ uint32_t ospeedr;
+ uint32_t pupdr;
+ uint32_t idr;
+ uint32_t odr;
+ uint32_t lckr;
+ uint32_t afrl;
+ uint32_t afrh;
+ uint32_t ascr;
+
+ /* GPIO registers reset values */
+ uint32_t moder_reset;
+ uint32_t ospeedr_reset;
+ uint32_t pupdr_reset;
+
+ /*
+ * External driving of pins.
+ * The pins can be set externally through the device
+ * anonymous input GPIOs lines under certain conditions.
+ * The pin must not be in push-pull output mode,
+ * and can't be set high in open-drain mode.
+ * Pins driven externally and configured to
+ * output mode will in general be "disconnected"
+ * (see `get_gpio_pinmask_to_disconnect()`)
+ */
+ uint16_t disconnected_pins;
+ uint16_t pins_connected_high;
+
+ char *name;
+ Clock *clk;
+ qemu_irq pin[GPIO_NUM_PINS];
+};
+
+#endif
diff --git a/include/hw/hotplug.h b/include/hw/hotplug.h
index 6321e292fd..a9840ed485 100644
--- a/include/hw/hotplug.h
+++ b/include/hw/hotplug.h
@@ -16,10 +16,9 @@
#define TYPE_HOTPLUG_HANDLER "hotplug-handler"
-#define HOTPLUG_HANDLER_CLASS(klass) \
- OBJECT_CLASS_CHECK(HotplugHandlerClass, (klass), TYPE_HOTPLUG_HANDLER)
-#define HOTPLUG_HANDLER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(HotplugHandlerClass, (obj), TYPE_HOTPLUG_HANDLER)
+typedef struct HotplugHandlerClass HotplugHandlerClass;
+DECLARE_CLASS_CHECKERS(HotplugHandlerClass, HOTPLUG_HANDLER,
+ TYPE_HOTPLUG_HANDLER)
#define HOTPLUG_HANDLER(obj) \
INTERFACE_CHECK(HotplugHandler, (obj), TYPE_HOTPLUG_HANDLER)
@@ -49,8 +48,9 @@ typedef void (*hotplug_fn)(HotplugHandler *plug_handler,
* @unplug: unplug callback.
* Used for device removal with devices that implement
* asynchronous and synchronous (surprise) removal.
+ * @is_hotpluggable_bus: called to check if bus/its parent allow hotplug on bus
*/
-typedef struct HotplugHandlerClass {
+struct HotplugHandlerClass {
/* <private> */
InterfaceClass parent;
@@ -59,7 +59,8 @@ typedef struct HotplugHandlerClass {
hotplug_fn plug;
hotplug_fn unplug_request;
hotplug_fn unplug;
-} HotplugHandlerClass;
+ bool (*is_hotpluggable_bus)(HotplugHandler *plug_handler, BusState *bus);
+};
/**
* hotplug_handler_plug:
diff --git a/include/hw/hw.h b/include/hw/hw.h
index ab4950c312..045c1c8b09 100644
--- a/include/hw/hw.h
+++ b/include/hw/hw.h
@@ -1,4 +1,3 @@
-/* Declarations for use by hardware emulation. */
#ifndef QEMU_HW_H
#define QEMU_HW_H
@@ -6,15 +5,6 @@
#error Cannot include hw/hw.h from user emulation
#endif
-#include "exec/cpu-common.h"
-#include "qom/object.h"
-#include "exec/memory.h"
-#include "hw/irq.h"
-#include "migration/vmstate.h"
-#include "migration/qemu-file-types.h"
-#include "qemu/module.h"
-#include "sysemu/reset.h"
-
-void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+G_NORETURN void hw_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
#endif
diff --git a/include/hw/hyperv/dynmem-proto.h b/include/hw/hyperv/dynmem-proto.h
new file mode 100644
index 0000000000..68b8b606f2
--- /dev/null
+++ b/include/hw/hyperv/dynmem-proto.h
@@ -0,0 +1,430 @@
+#ifndef HW_HYPERV_DYNMEM_PROTO_H
+#define HW_HYPERV_DYNMEM_PROTO_H
+
+/*
+ * Hyper-V Dynamic Memory Protocol definitions
+ *
+ * Copyright (C) 2020-2023 Oracle and/or its affiliates.
+ *
+ * Based on drivers/hv/hv_balloon.c from Linux kernel:
+ * Copyright (c) 2012, Microsoft Corporation.
+ *
+ * Author: K. Y. Srinivasan <kys@microsoft.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+/*
+ * Protocol versions. The low word is the minor version, the high word the major
+ * version.
+ *
+ * History:
+ * Initial version 1.0
+ * Changed to 0.1 on 2009/03/25
+ * Changes to 0.2 on 2009/05/14
+ * Changes to 0.3 on 2009/12/03
+ * Changed to 1.0 on 2011/04/05
+ * Changed to 2.0 on 2019/12/10
+ */
+
+#define DYNMEM_MAKE_VERSION(Major, Minor) ((uint32_t)(((Major) << 16) | (Minor)))
+#define DYNMEM_MAJOR_VERSION(Version) ((uint32_t)(Version) >> 16)
+#define DYNMEM_MINOR_VERSION(Version) ((uint32_t)(Version) & 0xff)
+
+enum {
+ DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
+ DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
+ DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
+
+ DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
+ DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
+ DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
+
+ DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
+};
+
+
+
+/*
+ * Message Types
+ */
+
+enum dm_message_type {
+ /*
+ * Version 0.3
+ */
+ DM_ERROR = 0,
+ DM_VERSION_REQUEST = 1,
+ DM_VERSION_RESPONSE = 2,
+ DM_CAPABILITIES_REPORT = 3,
+ DM_CAPABILITIES_RESPONSE = 4,
+ DM_STATUS_REPORT = 5,
+ DM_BALLOON_REQUEST = 6,
+ DM_BALLOON_RESPONSE = 7,
+ DM_UNBALLOON_REQUEST = 8,
+ DM_UNBALLOON_RESPONSE = 9,
+ DM_MEM_HOT_ADD_REQUEST = 10,
+ DM_MEM_HOT_ADD_RESPONSE = 11,
+ DM_VERSION_03_MAX = 11,
+ /*
+ * Version 1.0.
+ */
+ DM_INFO_MESSAGE = 12,
+ DM_VERSION_1_MAX = 12,
+
+ /*
+ * Version 2.0
+ */
+ DM_MEM_HOT_REMOVE_REQUEST = 13,
+ DM_MEM_HOT_REMOVE_RESPONSE = 14
+};
+
+
+/*
+ * Structures defining the dynamic memory management
+ * protocol.
+ */
+
+union dm_version {
+ struct {
+ uint16_t minor_version;
+ uint16_t major_version;
+ };
+ uint32_t version;
+} QEMU_PACKED;
+
+
+union dm_caps {
+ struct {
+ uint64_t balloon:1;
+ uint64_t hot_add:1;
+ /*
+ * To support guests that may have alignment
+ * limitations on hot-add, the guest can specify
+ * its alignment requirements; a value of n
+ * represents an alignment of 2^n in mega bytes.
+ */
+ uint64_t hot_add_alignment:4;
+ uint64_t hot_remove:1;
+ uint64_t reservedz:57;
+ } cap_bits;
+ uint64_t caps;
+} QEMU_PACKED;
+
+union dm_mem_page_range {
+ struct {
+ /*
+ * The PFN number of the first page in the range.
+ * 40 bits is the architectural limit of a PFN
+ * number for AMD64.
+ */
+ uint64_t start_page:40;
+ /*
+ * The number of pages in the range.
+ */
+ uint64_t page_cnt:24;
+ } finfo;
+ uint64_t page_range;
+} QEMU_PACKED;
+
+
+
+/*
+ * The header for all dynamic memory messages:
+ *
+ * type: Type of the message.
+ * size: Size of the message in bytes; including the header.
+ * trans_id: The guest is responsible for manufacturing this ID.
+ */
+
+struct dm_header {
+ uint16_t type;
+ uint16_t size;
+ uint32_t trans_id;
+} QEMU_PACKED;
+
+/*
+ * A generic message format for dynamic memory.
+ * Specific message formats are defined later in the file.
+ */
+
+struct dm_message {
+ struct dm_header hdr;
+ uint8_t data[]; /* enclosed message */
+} QEMU_PACKED;
+
+
+/*
+ * Specific message types supporting the dynamic memory protocol.
+ */
+
+/*
+ * Version negotiation message. Sent from the guest to the host.
+ * The guest is free to try different versions until the host
+ * accepts the version.
+ *
+ * dm_version: The protocol version requested.
+ * is_last_attempt: If TRUE, this is the last version guest will request.
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct dm_version_request {
+ struct dm_header hdr;
+ union dm_version version;
+ uint32_t is_last_attempt:1;
+ uint32_t reservedz:31;
+} QEMU_PACKED;
+
+/*
+ * Version response message; Host to Guest and indicates
+ * if the host has accepted the version sent by the guest.
+ *
+ * is_accepted: If TRUE, host has accepted the version and the guest
+ * should proceed to the next stage of the protocol. FALSE indicates that
+ * guest should re-try with a different version.
+ *
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct dm_version_response {
+ struct dm_header hdr;
+ uint64_t is_accepted:1;
+ uint64_t reservedz:63;
+} QEMU_PACKED;
+
+/*
+ * Message reporting capabilities. This is sent from the guest to the
+ * host.
+ */
+
+struct dm_capabilities {
+ struct dm_header hdr;
+ union dm_caps caps;
+ uint64_t min_page_cnt;
+ uint64_t max_page_number;
+} QEMU_PACKED;
+
+/*
+ * Response to the capabilities message. This is sent from the host to the
+ * guest. This message notifies if the host has accepted the guest's
+ * capabilities. If the host has not accepted, the guest must shutdown
+ * the service.
+ *
+ * is_accepted: Indicates if the host has accepted guest's capabilities.
+ * reservedz: Must be 0.
+ */
+
+struct dm_capabilities_resp_msg {
+ struct dm_header hdr;
+ uint64_t is_accepted:1;
+ uint64_t hot_remove:1;
+ uint64_t suppress_pressure_reports:1;
+ uint64_t reservedz:61;
+} QEMU_PACKED;
+
+/*
+ * This message is used to report memory pressure from the guest.
+ * This message is not part of any transaction and there is no
+ * response to this message.
+ *
+ * num_avail: Available memory in pages.
+ * num_committed: Committed memory in pages.
+ * page_file_size: The accumulated size of all page files
+ * in the system in pages.
+ * zero_free: The number of zero and free pages.
+ * page_file_writes: The writes to the page file in pages.
+ * io_diff: An indicator of file cache efficiency or page file activity,
+ * calculated as File Cache Page Fault Count - Page Read Count.
+ * This value is in pages.
+ *
+ * Some of these metrics are Windows specific and fortunately
+ * the algorithm on the host side that computes the guest memory
+ * pressure only uses num_committed value.
+ */
+
+struct dm_status {
+ struct dm_header hdr;
+ uint64_t num_avail;
+ uint64_t num_committed;
+ uint64_t page_file_size;
+ uint64_t zero_free;
+ uint32_t page_file_writes;
+ uint32_t io_diff;
+} QEMU_PACKED;
+
+
+/*
+ * Message to ask the guest to allocate memory - balloon up message.
+ * This message is sent from the host to the guest. The guest may not be
+ * able to allocate as much memory as requested.
+ *
+ * num_pages: number of pages to allocate.
+ */
+
+struct dm_balloon {
+ struct dm_header hdr;
+ uint32_t num_pages;
+ uint32_t reservedz;
+} QEMU_PACKED;
+
+
+/*
+ * Balloon response message; this message is sent from the guest
+ * to the host in response to the balloon message.
+ *
+ * reservedz: Reserved; must be set to zero.
+ * more_pages: If FALSE, this is the last message of the transaction.
+ * if TRUE there will be at least one more message from the guest.
+ *
+ * range_count: The number of ranges in the range array.
+ *
+ * range_array: An array of page ranges returned to the host.
+ *
+ */
+
+struct dm_balloon_response {
+ struct dm_header hdr;
+ uint32_t reservedz;
+ uint32_t more_pages:1;
+ uint32_t range_count:31;
+ union dm_mem_page_range range_array[];
+} QEMU_PACKED;
+
+/*
+ * Un-balloon message; this message is sent from the host
+ * to the guest to give guest more memory.
+ *
+ * more_pages: If FALSE, this is the last message of the transaction.
+ * if TRUE there will be at least one more message from the guest.
+ *
+ * reservedz: Reserved; must be set to zero.
+ *
+ * range_count: The number of ranges in the range array.
+ *
+ * range_array: An array of page ranges returned to the host.
+ *
+ */
+
+struct dm_unballoon_request {
+ struct dm_header hdr;
+ uint32_t more_pages:1;
+ uint32_t reservedz:31;
+ uint32_t range_count;
+ union dm_mem_page_range range_array[];
+} QEMU_PACKED;
+
+/*
+ * Un-balloon response message; this message is sent from the guest
+ * to the host in response to an unballoon request.
+ *
+ */
+
+struct dm_unballoon_response {
+ struct dm_header hdr;
+} QEMU_PACKED;
+
+
+/*
+ * Hot add request message. Message sent from the host to the guest.
+ *
+ * range: Memory range to hot add.
+ * region: Explicit hot add memory region for guest to use. Optional.
+ *
+ */
+
+struct dm_hot_add {
+ struct dm_header hdr;
+ union dm_mem_page_range range;
+} QEMU_PACKED;
+
+struct dm_hot_add_with_region {
+ struct dm_header hdr;
+ union dm_mem_page_range range;
+ union dm_mem_page_range region;
+} QEMU_PACKED;
+
+/*
+ * Hot add response message.
+ * This message is sent by the guest to report the status of a hot add request.
+ * If page_count is less than the requested page count, then the host should
+ * assume all further hot add requests will fail, since this indicates that
+ * the guest has hit an upper physical memory barrier.
+ *
+ * Hot adds may also fail due to low resources; in this case, the guest must
+ * not complete this message until the hot add can succeed, and the host must
+ * not send a new hot add request until the response is sent.
+ * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
+ * times it fails the request.
+ *
+ *
+ * page_count: number of pages that were successfully hot added.
+ *
+ * result: result of the operation 1: success, 0: failure.
+ *
+ */
+
+struct dm_hot_add_response {
+ struct dm_header hdr;
+ uint32_t page_count;
+ uint32_t result;
+} QEMU_PACKED;
+
+struct dm_hot_remove {
+ struct dm_header hdr;
+ uint32_t virtual_node;
+ uint32_t page_count;
+ uint32_t qos_flags;
+ uint32_t reservedZ;
+} QEMU_PACKED;
+
+struct dm_hot_remove_response {
+ struct dm_header hdr;
+ uint32_t result;
+ uint32_t range_count;
+ uint64_t more_pages:1;
+ uint64_t reservedz:63;
+ union dm_mem_page_range range_array[];
+} QEMU_PACKED;
+
+#define DM_REMOVE_QOS_LARGE (1 << 0)
+#define DM_REMOVE_QOS_LOCAL (1 << 1)
+#define DM_REMOVE_QOS_MASK (0x3)
+
+/*
+ * Types of information sent from host to the guest.
+ */
+
+enum dm_info_type {
+ INFO_TYPE_MAX_PAGE_CNT = 0,
+ MAX_INFO_TYPE
+};
+
+
+/*
+ * Header for the information message.
+ */
+
+struct dm_info_header {
+ enum dm_info_type type;
+ uint32_t data_size;
+ uint8_t data[];
+} QEMU_PACKED;
+
+/*
+ * This message is sent from the host to the guest to pass
+ * some relevant information (win8 addition).
+ *
+ * reserved: no used.
+ * info_size: size of the information blob.
+ * info: information blob.
+ */
+
+struct dm_info_msg {
+ struct dm_header hdr;
+ uint32_t reserved;
+ uint32_t info_size;
+ uint8_t info[];
+};
+
+#endif
diff --git a/include/hw/hyperv/hv-balloon.h b/include/hw/hyperv/hv-balloon.h
new file mode 100644
index 0000000000..c1efe70fc2
--- /dev/null
+++ b/include/hw/hyperv/hv-balloon.h
@@ -0,0 +1,18 @@
+/*
+ * QEMU Hyper-V Dynamic Memory Protocol driver
+ *
+ * Copyright (C) 2020-2023 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_HV_BALLOON_H
+#define HW_HV_BALLOON_H
+
+#include "qom/object.h"
+
+#define TYPE_HV_BALLOON "hv-balloon"
+OBJECT_DECLARE_SIMPLE_TYPE(HvBalloon, HV_BALLOON)
+
+#endif
diff --git a/include/hw/hyperv/hyperv-proto.h b/include/hw/hyperv/hyperv-proto.h
index 21dc28aee9..4a2297307b 100644
--- a/include/hw/hyperv/hyperv-proto.h
+++ b/include/hw/hyperv/hyperv-proto.h
@@ -24,12 +24,17 @@
#define HV_STATUS_INVALID_PORT_ID 17
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+#define HV_STATUS_NOT_ACKNOWLEDGED 20
+#define HV_STATUS_NO_DATA 27
/*
* Hypercall numbers
*/
#define HV_POST_MESSAGE 0x005c
#define HV_SIGNAL_EVENT 0x005d
+#define HV_POST_DEBUG_DATA 0x0069
+#define HV_RETRIEVE_DEBUG_DATA 0x006a
+#define HV_RESET_DEBUG_SESSION 0x006b
#define HV_HYPERCALL_FAST (1u << 16)
/*
@@ -127,4 +132,51 @@ struct hyperv_event_flags_page {
struct hyperv_event_flags slot[HV_SINT_COUNT];
};
+/*
+ * Kernel debugger structures
+ */
+
+/* Options flags for hyperv_reset_debug_session */
+#define HV_DEBUG_PURGE_INCOMING_DATA 0x00000001
+#define HV_DEBUG_PURGE_OUTGOING_DATA 0x00000002
+struct hyperv_reset_debug_session_input {
+ uint32_t options;
+} __attribute__ ((__packed__));
+
+struct hyperv_reset_debug_session_output {
+ uint32_t host_ip;
+ uint32_t target_ip;
+ uint16_t host_port;
+ uint16_t target_port;
+ uint8_t host_mac[6];
+ uint8_t target_mac[6];
+} __attribute__ ((__packed__));
+
+/* Options for hyperv_post_debug_data */
+#define HV_DEBUG_POST_LOOP 0x00000001
+
+struct hyperv_post_debug_data_input {
+ uint32_t count;
+ uint32_t options;
+ /*uint8_t data[HV_HYP_PAGE_SIZE - 2 * sizeof(uint32_t)];*/
+} __attribute__ ((__packed__));
+
+struct hyperv_post_debug_data_output {
+ uint32_t pending_count;
+} __attribute__ ((__packed__));
+
+/* Options for hyperv_retrieve_debug_data */
+#define HV_DEBUG_RETRIEVE_LOOP 0x00000001
+#define HV_DEBUG_RETRIEVE_TEST_ACTIVITY 0x00000002
+
+struct hyperv_retrieve_debug_data_input {
+ uint32_t count;
+ uint32_t options;
+ uint64_t timeout;
+} __attribute__ ((__packed__));
+
+struct hyperv_retrieve_debug_data_output {
+ uint32_t retrieved_count;
+ uint32_t remaining_count;
+} __attribute__ ((__packed__));
#endif
diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h
index 597381cb01..d717b4e13d 100644
--- a/include/hw/hyperv/hyperv.h
+++ b/include/hw/hyperv/hyperv.h
@@ -79,5 +79,68 @@ void hyperv_synic_add(CPUState *cs);
void hyperv_synic_reset(CPUState *cs);
void hyperv_synic_update(CPUState *cs, bool enable,
hwaddr msg_page_addr, hwaddr event_page_addr);
+bool hyperv_is_synic_enabled(void);
+
+/*
+ * Process HVCALL_RESET_DEBUG_SESSION hypercall.
+ */
+uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa);
+/*
+ * Process HVCALL_RETREIVE_DEBUG_DATA hypercall.
+ */
+uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa,
+ bool fast);
+/*
+ * Process HVCALL_POST_DEBUG_DATA hypercall.
+ */
+uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast);
+
+uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count);
+uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count);
+void hyperv_syndbg_set_pending_page(uint64_t ingpa);
+uint64_t hyperv_syndbg_query_options(void);
+
+typedef enum HvSynthDbgMsgType {
+ HV_SYNDBG_MSG_CONNECTION_INFO,
+ HV_SYNDBG_MSG_SEND,
+ HV_SYNDBG_MSG_RECV,
+ HV_SYNDBG_MSG_SET_PENDING_PAGE,
+ HV_SYNDBG_MSG_QUERY_OPTIONS
+} HvDbgSynthMsgType;
+
+typedef struct HvSynDbgMsg {
+ HvDbgSynthMsgType type;
+ union {
+ struct {
+ uint32_t host_ip;
+ uint16_t host_port;
+ } connection_info;
+ struct {
+ uint64_t buf_gpa;
+ uint32_t count;
+ uint32_t pending_count;
+ bool is_raw;
+ } send;
+ struct {
+ uint64_t buf_gpa;
+ uint32_t count;
+ uint32_t options;
+ uint64_t timeout;
+ uint32_t retrieved_count;
+ bool is_raw;
+ } recv;
+ struct {
+ uint64_t buf_gpa;
+ } pending_page;
+ struct {
+ uint64_t options;
+ } query_options;
+ } u;
+} HvSynDbgMsg;
+typedef uint16_t (*HvSynDbgHandler)(void *context, HvSynDbgMsg *msg);
+void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context);
+
+bool hyperv_are_vmbus_recommended_features_enabled(void);
+void hyperv_set_vmbus_recommended_features_enabled(void);
#endif
diff --git a/include/hw/hyperv/vmbus-bridge.h b/include/hw/hyperv/vmbus-bridge.h
new file mode 100644
index 0000000000..1e5419574e
--- /dev/null
+++ b/include/hw/hyperv/vmbus-bridge.h
@@ -0,0 +1,34 @@
+/*
+ * QEMU Hyper-V VMBus root bridge
+ *
+ * Copyright (c) 2017-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_HYPERV_VMBUS_BRIDGE_H
+#define HW_HYPERV_VMBUS_BRIDGE_H
+
+#include "hw/sysbus.h"
+#include "hw/hyperv/vmbus.h"
+#include "qom/object.h"
+
+#define TYPE_VMBUS_BRIDGE "vmbus-bridge"
+
+struct VMBusBridge {
+ SysBusDevice parent_obj;
+
+ uint8_t irq;
+
+ VMBus *bus;
+};
+
+OBJECT_DECLARE_SIMPLE_TYPE(VMBusBridge, VMBUS_BRIDGE)
+
+static inline VMBusBridge *vmbus_bridge_find(void)
+{
+ return VMBUS_BRIDGE(object_resolve_path_type("", TYPE_VMBUS_BRIDGE, NULL));
+}
+
+#endif
diff --git a/include/hw/hyperv/vmbus-proto.h b/include/hw/hyperv/vmbus-proto.h
new file mode 100644
index 0000000000..4628d3b323
--- /dev/null
+++ b/include/hw/hyperv/vmbus-proto.h
@@ -0,0 +1,222 @@
+/*
+ * QEMU Hyper-V VMBus support
+ *
+ * Copyright (c) 2017-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_HYPERV_VMBUS_PROTO_H
+#define HW_HYPERV_VMBUS_PROTO_H
+
+#define VMBUS_VERSION_WS2008 ((0 << 16) | (13))
+#define VMBUS_VERSION_WIN7 ((1 << 16) | (1))
+#define VMBUS_VERSION_WIN8 ((2 << 16) | (4))
+#define VMBUS_VERSION_WIN8_1 ((3 << 16) | (0))
+#define VMBUS_VERSION_WIN10 ((4 << 16) | (0))
+#define VMBUS_VERSION_INVAL -1
+#define VMBUS_VERSION_CURRENT VMBUS_VERSION_WIN10
+
+#define VMBUS_MESSAGE_CONNECTION_ID 1
+#define VMBUS_EVENT_CONNECTION_ID 2
+#define VMBUS_MONITOR_CONNECTION_ID 3
+#define VMBUS_SINT 2
+
+#define VMBUS_MSG_INVALID 0
+#define VMBUS_MSG_OFFERCHANNEL 1
+#define VMBUS_MSG_RESCIND_CHANNELOFFER 2
+#define VMBUS_MSG_REQUESTOFFERS 3
+#define VMBUS_MSG_ALLOFFERS_DELIVERED 4
+#define VMBUS_MSG_OPENCHANNEL 5
+#define VMBUS_MSG_OPENCHANNEL_RESULT 6
+#define VMBUS_MSG_CLOSECHANNEL 7
+#define VMBUS_MSG_GPADL_HEADER 8
+#define VMBUS_MSG_GPADL_BODY 9
+#define VMBUS_MSG_GPADL_CREATED 10
+#define VMBUS_MSG_GPADL_TEARDOWN 11
+#define VMBUS_MSG_GPADL_TORNDOWN 12
+#define VMBUS_MSG_RELID_RELEASED 13
+#define VMBUS_MSG_INITIATE_CONTACT 14
+#define VMBUS_MSG_VERSION_RESPONSE 15
+#define VMBUS_MSG_UNLOAD 16
+#define VMBUS_MSG_UNLOAD_RESPONSE 17
+#define VMBUS_MSG_COUNT 18
+
+#define VMBUS_MESSAGE_SIZE_ALIGN sizeof(uint64_t)
+
+#define VMBUS_PACKET_INVALID 0x0
+#define VMBUS_PACKET_SYNCH 0x1
+#define VMBUS_PACKET_ADD_XFER_PAGESET 0x2
+#define VMBUS_PACKET_RM_XFER_PAGESET 0x3
+#define VMBUS_PACKET_ESTABLISH_GPADL 0x4
+#define VMBUS_PACKET_TEARDOWN_GPADL 0x5
+#define VMBUS_PACKET_DATA_INBAND 0x6
+#define VMBUS_PACKET_DATA_USING_XFER_PAGES 0x7
+#define VMBUS_PACKET_DATA_USING_GPADL 0x8
+#define VMBUS_PACKET_DATA_USING_GPA_DIRECT 0x9
+#define VMBUS_PACKET_CANCEL_REQUEST 0xa
+#define VMBUS_PACKET_COMP 0xb
+#define VMBUS_PACKET_DATA_USING_ADDITIONAL_PKT 0xc
+#define VMBUS_PACKET_ADDITIONAL_DATA 0xd
+
+#define VMBUS_CHANNEL_USER_DATA_SIZE 120
+
+#define VMBUS_OFFER_MONITOR_ALLOCATED 0x1
+#define VMBUS_OFFER_INTERRUPT_DEDICATED 0x1
+
+#define VMBUS_RING_BUFFER_FEAT_PENDING_SZ (1ul << 0)
+
+#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 0x1
+#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 0x2
+#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 0x4
+#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
+#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
+#define VMBUS_CHANNEL_PARENT_OFFER 0x200
+#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
+
+#define VMBUS_PACKET_FLAG_REQUEST_COMPLETION 1
+
+typedef struct vmbus_message_header {
+ uint32_t message_type;
+ uint32_t _padding;
+} vmbus_message_header;
+
+typedef struct vmbus_message_initiate_contact {
+ vmbus_message_header header;
+ uint32_t version_requested;
+ uint32_t target_vcpu;
+ uint64_t interrupt_page;
+ uint64_t monitor_page1;
+ uint64_t monitor_page2;
+} vmbus_message_initiate_contact;
+
+typedef struct vmbus_message_version_response {
+ vmbus_message_header header;
+ uint8_t version_supported;
+ uint8_t status;
+} vmbus_message_version_response;
+
+typedef struct vmbus_message_offer_channel {
+ vmbus_message_header header;
+ uint8_t type_uuid[16];
+ uint8_t instance_uuid[16];
+ uint64_t _reserved1;
+ uint64_t _reserved2;
+ uint16_t channel_flags;
+ uint16_t mmio_size_mb;
+ uint8_t user_data[VMBUS_CHANNEL_USER_DATA_SIZE];
+ uint16_t sub_channel_index;
+ uint16_t _reserved3;
+ uint32_t child_relid;
+ uint8_t monitor_id;
+ uint8_t monitor_flags;
+ uint16_t interrupt_flags;
+ uint32_t connection_id;
+} vmbus_message_offer_channel;
+
+typedef struct vmbus_message_rescind_channel_offer {
+ vmbus_message_header header;
+ uint32_t child_relid;
+} vmbus_message_rescind_channel_offer;
+
+typedef struct vmbus_gpa_range {
+ uint32_t byte_count;
+ uint32_t byte_offset;
+ uint64_t pfn_array[];
+} vmbus_gpa_range;
+
+typedef struct vmbus_message_gpadl_header {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t gpadl_id;
+ uint16_t range_buflen;
+ uint16_t rangecount;
+ vmbus_gpa_range range[];
+} QEMU_PACKED vmbus_message_gpadl_header;
+
+typedef struct vmbus_message_gpadl_body {
+ vmbus_message_header header;
+ uint32_t message_number;
+ uint32_t gpadl_id;
+ uint64_t pfn_array[];
+} vmbus_message_gpadl_body;
+
+typedef struct vmbus_message_gpadl_created {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t gpadl_id;
+ uint32_t status;
+} vmbus_message_gpadl_created;
+
+typedef struct vmbus_message_gpadl_teardown {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t gpadl_id;
+} vmbus_message_gpadl_teardown;
+
+typedef struct vmbus_message_gpadl_torndown {
+ vmbus_message_header header;
+ uint32_t gpadl_id;
+} vmbus_message_gpadl_torndown;
+
+typedef struct vmbus_message_open_channel {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t open_id;
+ uint32_t ring_buffer_gpadl_id;
+ uint32_t target_vp;
+ uint32_t ring_buffer_offset;
+ uint8_t user_data[VMBUS_CHANNEL_USER_DATA_SIZE];
+} vmbus_message_open_channel;
+
+typedef struct vmbus_message_open_result {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t open_id;
+ uint32_t status;
+} vmbus_message_open_result;
+
+typedef struct vmbus_message_close_channel {
+ vmbus_message_header header;
+ uint32_t child_relid;
+} vmbus_message_close_channel;
+
+typedef struct vmbus_ring_buffer {
+ uint32_t write_index;
+ uint32_t read_index;
+ uint32_t interrupt_mask;
+ uint32_t pending_send_sz;
+ uint32_t _reserved1[12];
+ uint32_t feature_bits;
+} vmbus_ring_buffer;
+
+typedef struct vmbus_packet_hdr {
+ uint16_t type;
+ uint16_t offset_qwords;
+ uint16_t len_qwords;
+ uint16_t flags;
+ uint64_t transaction_id;
+} vmbus_packet_hdr;
+
+typedef struct vmbus_pkt_gpa_direct {
+ uint32_t _reserved;
+ uint32_t rangecount;
+ vmbus_gpa_range range[];
+} vmbus_pkt_gpa_direct;
+
+typedef struct vmbus_xferpg_range {
+ uint32_t byte_count;
+ uint32_t byte_offset;
+} vmbus_xferpg_range;
+
+typedef struct vmbus_pkt_xferpg {
+ uint16_t buffer_id;
+ uint8_t sender_owns_set;
+ uint8_t _reserved;
+ uint32_t rangecount;
+ vmbus_xferpg_range range[];
+} vmbus_pkt_xferpg;
+
+#endif
diff --git a/include/hw/hyperv/vmbus.h b/include/hw/hyperv/vmbus.h
new file mode 100644
index 0000000000..5c505852f2
--- /dev/null
+++ b/include/hw/hyperv/vmbus.h
@@ -0,0 +1,226 @@
+/*
+ * QEMU Hyper-V VMBus
+ *
+ * Copyright (c) 2017-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_HYPERV_VMBUS_H
+#define HW_HYPERV_VMBUS_H
+
+#include "sysemu/sysemu.h"
+#include "sysemu/dma.h"
+#include "hw/qdev-core.h"
+#include "migration/vmstate.h"
+#include "hw/hyperv/vmbus-proto.h"
+#include "qemu/uuid.h"
+#include "qom/object.h"
+
+#define TYPE_VMBUS_DEVICE "vmbus-dev"
+
+OBJECT_DECLARE_TYPE(VMBusDevice, VMBusDeviceClass,
+ VMBUS_DEVICE)
+
+#define TYPE_VMBUS "vmbus"
+OBJECT_DECLARE_SIMPLE_TYPE(VMBus, VMBUS)
+
+/*
+ * Object wrapping a GPADL -- GPA Descriptor List -- an array of guest physical
+ * pages, to be used for various buffers shared between the host and the guest.
+ */
+typedef struct VMBusGpadl VMBusGpadl;
+/*
+ * VMBus channel -- a pair of ring buffers for either direction, placed within
+ * one GPADL, and the associated notification means.
+ */
+typedef struct VMBusChannel VMBusChannel;
+/*
+ * Base class for VMBus devices. Includes one or more channels. Identified by
+ * class GUID and instance GUID.
+ */
+
+typedef void(*VMBusChannelNotifyCb)(struct VMBusChannel *chan);
+
+struct VMBusDeviceClass {
+ DeviceClass parent;
+
+ QemuUUID classid;
+ QemuUUID instanceid; /* Fixed UUID for singleton devices */
+ uint16_t channel_flags;
+ uint16_t mmio_size_mb;
+
+ /* Extensions to standard device callbacks */
+ void (*vmdev_realize)(VMBusDevice *vdev, Error **errp);
+ void (*vmdev_unrealize)(VMBusDevice *vdev);
+ void (*vmdev_reset)(VMBusDevice *vdev);
+ /*
+ * Calculate the number of channels based on the device properties. Called
+ * at realize time.
+ **/
+ uint16_t (*num_channels)(VMBusDevice *vdev);
+ /*
+ * Device-specific actions to complete the otherwise successful process of
+ * opening a channel.
+ * Return 0 on success, -errno on failure.
+ */
+ int (*open_channel)(VMBusChannel *chan);
+ /*
+ * Device-specific actions to perform before closing a channel.
+ */
+ void (*close_channel)(VMBusChannel *chan);
+ /*
+ * Main device worker; invoked in response to notifications from either
+ * side, when there's work to do with the data in the channel ring buffers.
+ */
+ VMBusChannelNotifyCb chan_notify_cb;
+};
+
+struct VMBusDevice {
+ DeviceState parent;
+ QemuUUID instanceid;
+ uint16_t num_channels;
+ VMBusChannel *channels;
+ AddressSpace *dma_as;
+};
+
+extern const VMStateDescription vmstate_vmbus_dev;
+
+/*
+ * A unit of work parsed out of a message in the receive (i.e. guest->host)
+ * ring buffer of a channel. It's supposed to be subclassed (through
+ * embedding) by the specific devices.
+ */
+typedef struct VMBusChanReq {
+ VMBusChannel *chan;
+ uint16_t pkt_type;
+ uint32_t msglen;
+ void *msg;
+ uint64_t transaction_id;
+ bool need_comp;
+ QEMUSGList sgl;
+} VMBusChanReq;
+
+VMBusDevice *vmbus_channel_device(VMBusChannel *chan);
+VMBusChannel *vmbus_device_channel(VMBusDevice *dev, uint32_t chan_idx);
+uint32_t vmbus_channel_idx(VMBusChannel *chan);
+bool vmbus_channel_is_open(VMBusChannel *chan);
+
+/*
+ * Notify (on guest's behalf) the host side of the channel that there's data in
+ * the ringbuffer to process.
+ */
+void vmbus_channel_notify_host(VMBusChannel *chan);
+
+/*
+ * Reserve space for a packet in the send (i.e. host->guest) ringbuffer. If
+ * there isn't enough room, indicate that to the guest, to be notified when it
+ * becomes available.
+ * Return 0 on success, negative errno on failure.
+ * The ringbuffer indices are NOT updated, the requested space indicator may.
+ */
+int vmbus_channel_reserve(VMBusChannel *chan,
+ uint32_t desclen, uint32_t msglen);
+
+/*
+ * Send a packet to the guest. The space for the packet MUST be reserved
+ * first.
+ * Return total number of bytes placed in the send ringbuffer on success,
+ * negative errno on failure.
+ * The ringbuffer indices are updated on success, and the guest is signaled if
+ * needed.
+ */
+ssize_t vmbus_channel_send(VMBusChannel *chan, uint16_t pkt_type,
+ void *desc, uint32_t desclen,
+ void *msg, uint32_t msglen,
+ bool need_comp, uint64_t transaction_id);
+
+/*
+ * Prepare to fetch a batch of packets from the receive ring buffer.
+ * Return 0 on success, negative errno on failure.
+ */
+int vmbus_channel_recv_start(VMBusChannel *chan);
+
+/*
+ * Shortcut for a common case of sending a simple completion packet with no
+ * auxiliary descriptors.
+ */
+ssize_t vmbus_channel_send_completion(VMBusChanReq *req,
+ void *msg, uint32_t msglen);
+
+/*
+ * Peek at the receive (i.e. guest->host) ring buffer and extract a unit of
+ * work (a device-specific subclass of VMBusChanReq) from a packet if there's
+ * one.
+ * Return an allocated buffer, containing the request of @size with filled
+ * VMBusChanReq at the beginning, followed by the message payload, or NULL on
+ * failure.
+ * The ringbuffer indices are NOT updated, nor is the private copy of the read
+ * index.
+ */
+void *vmbus_channel_recv_peek(VMBusChannel *chan, uint32_t size);
+
+/*
+ * Update the private copy of the read index once the preceding peek is deemed
+ * successful.
+ * The ringbuffer indices are NOT updated.
+ */
+void vmbus_channel_recv_pop(VMBusChannel *chan);
+
+/*
+ * Propagate the private copy of the read index into the receive ring buffer,
+ * and thus complete the reception of a series of packets. Notify guest if
+ * needed.
+ * Return the number of bytes popped off the receive ring buffer by the
+ * preceding recv_peek/recv_pop calls on success, negative errno on failure.
+ */
+ssize_t vmbus_channel_recv_done(VMBusChannel *chan);
+
+/*
+ * Free the request allocated by vmbus_channel_recv_peek, together with its
+ * fields.
+ */
+void vmbus_free_req(void *req);
+
+/*
+ * Find and reference a GPADL by @gpadl_id.
+ * If not found return NULL.
+ */
+VMBusGpadl *vmbus_get_gpadl(VMBusChannel *chan, uint32_t gpadl_id);
+
+/*
+ * Unreference @gpadl. If the reference count drops to zero, free it.
+ * @gpadl may be NULL, in which case nothing is done.
+ */
+void vmbus_put_gpadl(VMBusGpadl *gpadl);
+
+/*
+ * Calculate total length in bytes of @gpadl.
+ * @gpadl must be valid.
+ */
+uint32_t vmbus_gpadl_len(VMBusGpadl *gpadl);
+
+/*
+ * Copy data from @iov to @gpadl at offset @off.
+ * Return the number of bytes copied, or a negative status on failure.
+ */
+ssize_t vmbus_iov_to_gpadl(VMBusChannel *chan, VMBusGpadl *gpadl, uint32_t off,
+ const struct iovec *iov, size_t iov_cnt);
+
+/*
+ * Map SGList contained in the request @req, at offset @off and no more than
+ * @len bytes, for io in direction @dir, and populate @iov with the mapped
+ * iovecs.
+ * Return the number of iovecs mapped, or negative status on failure.
+ */
+int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
+ unsigned iov_cnt, size_t len, size_t off);
+
+/*
+ * Unmap *iov mapped with vmbus_map_sgl, marking the number of bytes @accessed.
+ */
+void vmbus_unmap_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
+ unsigned iov_cnt, size_t accessed);
+
+#endif
diff --git a/include/hw/i2c/allwinner-i2c.h b/include/hw/i2c/allwinner-i2c.h
new file mode 100644
index 0000000000..0e325d265e
--- /dev/null
+++ b/include/hw/i2c/allwinner-i2c.h
@@ -0,0 +1,61 @@
+/*
+ * Allwinner I2C Bus Serial Interface registers definition
+ *
+ * Copyright (C) 2022 Strahinja Jankovic. <strahinja.p.jankovic@gmail.com>
+ *
+ * This file is derived from IMX I2C controller,
+ * by Jean-Christophe DUBOIS .
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef ALLWINNER_I2C_H
+#define ALLWINNER_I2C_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_AW_I2C "allwinner.i2c"
+
+/** Allwinner I2C sun6i family and newer (A31, H2+, H3, etc) */
+#define TYPE_AW_I2C_SUN6I TYPE_AW_I2C "-sun6i"
+
+OBJECT_DECLARE_SIMPLE_TYPE(AWI2CState, AW_I2C)
+
+#define AW_I2C_MEM_SIZE 0x24
+
+struct AWI2CState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ I2CBus *bus;
+ qemu_irq irq;
+
+ uint8_t addr;
+ uint8_t xaddr;
+ uint8_t data;
+ uint8_t cntr;
+ uint8_t stat;
+ uint8_t ccr;
+ uint8_t srst;
+ uint8_t efr;
+ uint8_t lcr;
+
+ bool irq_clear_inverted;
+};
+
+#endif /* ALLWINNER_I2C_H */
diff --git a/include/hw/i2c/arm_sbcon_i2c.h b/include/hw/i2c/arm_sbcon_i2c.h
new file mode 100644
index 0000000000..da9b5e8f83
--- /dev/null
+++ b/include/hw/i2c/arm_sbcon_i2c.h
@@ -0,0 +1,36 @@
+/*
+ * ARM SBCon two-wire serial bus interface (I2C bitbang)
+ * a.k.a.
+ * ARM Versatile I2C controller
+ *
+ * Copyright (c) 2006-2007 CodeSourcery.
+ * Copyright (c) 2012 Oskar Andero <oskar.andero@gmail.com>
+ * Copyright (C) 2020 Philippe Mathieu-Daudé <f4bug@amsat.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_I2C_ARM_SBCON_I2C_H
+#define HW_I2C_ARM_SBCON_I2C_H
+
+#include "hw/sysbus.h"
+#include "hw/i2c/bitbang_i2c.h"
+#include "qom/object.h"
+
+#define TYPE_ARM_SBCON_I2C "versatile_i2c"
+
+typedef struct ArmSbconI2CState ArmSbconI2CState;
+DECLARE_INSTANCE_CHECKER(ArmSbconI2CState, ARM_SBCON_I2C, TYPE_ARM_SBCON_I2C)
+
+struct ArmSbconI2CState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion iomem;
+ bitbang_i2c_interface bitbang;
+ int out;
+ int in;
+};
+
+#endif /* HW_I2C_ARM_SBCON_I2C_H */
diff --git a/include/hw/i2c/aspeed_i2c.h b/include/hw/i2c/aspeed_i2c.h
index f9020acdef..a064479e59 100644
--- a/include/hw/i2c/aspeed_i2c.h
+++ b/include/hw/i2c/aspeed_i2c.h
@@ -17,46 +17,373 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
+
#ifndef ASPEED_I2C_H
#define ASPEED_I2C_H
#include "hw/i2c/i2c.h"
+#include "hw/sysbus.h"
+#include "hw/registerfields.h"
+#include "qom/object.h"
#define TYPE_ASPEED_I2C "aspeed.i2c"
-#define ASPEED_I2C(obj) \
- OBJECT_CHECK(AspeedI2CState, (obj), TYPE_ASPEED_I2C)
+#define TYPE_ASPEED_2400_I2C TYPE_ASPEED_I2C "-ast2400"
+#define TYPE_ASPEED_2500_I2C TYPE_ASPEED_I2C "-ast2500"
+#define TYPE_ASPEED_2600_I2C TYPE_ASPEED_I2C "-ast2600"
+#define TYPE_ASPEED_1030_I2C TYPE_ASPEED_I2C "-ast1030"
+OBJECT_DECLARE_TYPE(AspeedI2CState, AspeedI2CClass, ASPEED_I2C)
+
+#define ASPEED_I2C_NR_BUSSES 16
+#define ASPEED_I2C_MAX_POOL_SIZE 0x800
+#define ASPEED_I2C_OLD_NUM_REG 11
+#define ASPEED_I2C_NEW_NUM_REG 22
+
+#define A_I2CD_M_STOP_CMD BIT(5)
+#define A_I2CD_M_RX_CMD BIT(3)
+#define A_I2CD_M_TX_CMD BIT(1)
+#define A_I2CD_M_START_CMD BIT(0)
+
+#define A_I2CD_MASTER_EN BIT(0)
-#define ASPEED_I2C_NR_BUSSES 14
+/* Tx State Machine */
+#define I2CD_TX_STATE_MASK 0xf
+#define I2CD_IDLE 0x0
+#define I2CD_MACTIVE 0x8
+#define I2CD_MSTART 0x9
+#define I2CD_MSTARTR 0xa
+#define I2CD_MSTOP 0xb
+#define I2CD_MTXD 0xc
+#define I2CD_MRXACK 0xd
+#define I2CD_MRXD 0xe
+#define I2CD_MTXACK 0xf
+#define I2CD_SWAIT 0x1
+#define I2CD_SRXD 0x4
+#define I2CD_STXACK 0x5
+#define I2CD_STXD 0x6
+#define I2CD_SRXACK 0x7
+#define I2CD_RECOVER 0x3
+
+/* I2C Global Register */
+REG32(I2C_CTRL_STATUS, 0x0) /* Device Interrupt Status */
+REG32(I2C_CTRL_ASSIGN, 0x8) /* Device Interrupt Target Assignment */
+REG32(I2C_CTRL_GLOBAL, 0xC) /* Global Control Register */
+ FIELD(I2C_CTRL_GLOBAL, REG_MODE, 2, 1)
+ FIELD(I2C_CTRL_GLOBAL, SRAM_EN, 0, 1)
+REG32(I2C_CTRL_NEW_CLK_DIVIDER, 0x10) /* New mode clock divider */
+
+/* I2C Old Mode Device (Bus) Register */
+REG32(I2CD_FUN_CTRL, 0x0) /* I2CD Function Control */
+ FIELD(I2CD_FUN_CTRL, POOL_PAGE_SEL, 20, 3) /* AST2400 */
+ SHARED_FIELD(M_SDA_LOCK_EN, 16, 1)
+ SHARED_FIELD(MULTI_MASTER_DIS, 15, 1)
+ SHARED_FIELD(M_SCL_DRIVE_EN, 14, 1)
+ SHARED_FIELD(MSB_STS, 9, 1)
+ SHARED_FIELD(SDA_DRIVE_IT_EN, 8, 1)
+ SHARED_FIELD(M_SDA_DRIVE_IT_EN, 7, 1)
+ SHARED_FIELD(M_HIGH_SPEED_EN, 6, 1)
+ SHARED_FIELD(DEF_ADDR_EN, 5, 1)
+ SHARED_FIELD(DEF_ALERT_EN, 4, 1)
+ SHARED_FIELD(DEF_ARP_EN, 3, 1)
+ SHARED_FIELD(DEF_GCALL_EN, 2, 1)
+ SHARED_FIELD(SLAVE_EN, 1, 1)
+ SHARED_FIELD(MASTER_EN, 0, 1)
+REG32(I2CD_AC_TIMING1, 0x04) /* Clock and AC Timing Control #1 */
+REG32(I2CD_AC_TIMING2, 0x08) /* Clock and AC Timing Control #2 */
+REG32(I2CD_INTR_CTRL, 0x0C) /* I2CD Interrupt Control */
+REG32(I2CD_INTR_STS, 0x10) /* I2CD Interrupt Status */
+ SHARED_FIELD(SLAVE_ADDR_MATCH, 31, 1) /* 0: addr1 1: addr2 */
+ SHARED_FIELD(SLAVE_ADDR_RX_PENDING, 29, 1)
+ SHARED_FIELD(SLAVE_INACTIVE_TIMEOUT, 15, 1)
+ SHARED_FIELD(SDA_DL_TIMEOUT, 14, 1)
+ SHARED_FIELD(BUS_RECOVER_DONE, 13, 1)
+ SHARED_FIELD(SMBUS_ALERT, 12, 1) /* Bus [0-3] only */
+ FIELD(I2CD_INTR_STS, SMBUS_ARP_ADDR, 11, 1) /* Removed */
+ FIELD(I2CD_INTR_STS, SMBUS_DEV_ALERT_ADDR, 10, 1) /* Removed */
+ FIELD(I2CD_INTR_STS, SMBUS_DEF_ADDR, 9, 1) /* Removed */
+ FIELD(I2CD_INTR_STS, GCALL_ADDR, 8, 1) /* Removed */
+ FIELD(I2CD_INTR_STS, SLAVE_ADDR_RX_MATCH, 7, 1) /* use RX_DONE */
+ SHARED_FIELD(SCL_TIMEOUT, 6, 1)
+ SHARED_FIELD(ABNORMAL, 5, 1)
+ SHARED_FIELD(NORMAL_STOP, 4, 1)
+ SHARED_FIELD(ARBIT_LOSS, 3, 1)
+ SHARED_FIELD(RX_DONE, 2, 1)
+ SHARED_FIELD(TX_NAK, 1, 1)
+ SHARED_FIELD(TX_ACK, 0, 1)
+REG32(I2CD_CMD, 0x14) /* I2CD Command/Status */
+ SHARED_FIELD(SDA_OE, 28, 1)
+ SHARED_FIELD(SDA_O, 27, 1)
+ SHARED_FIELD(SCL_OE, 26, 1)
+ SHARED_FIELD(SCL_O, 25, 1)
+ SHARED_FIELD(TX_TIMING, 23, 2)
+ SHARED_FIELD(TX_STATE, 19, 4)
+ SHARED_FIELD(SCL_LINE_STS, 18, 1)
+ SHARED_FIELD(SDA_LINE_STS, 17, 1)
+ SHARED_FIELD(BUS_BUSY_STS, 16, 1)
+ SHARED_FIELD(SDA_OE_OUT_DIR, 15, 1)
+ SHARED_FIELD(SDA_O_OUT_DIR, 14, 1)
+ SHARED_FIELD(SCL_OE_OUT_DIR, 13, 1)
+ SHARED_FIELD(SCL_O_OUT_DIR, 12, 1)
+ SHARED_FIELD(BUS_RECOVER_CMD_EN, 11, 1)
+ SHARED_FIELD(S_ALT_EN, 10, 1)
+ /* Command Bits */
+ SHARED_FIELD(RX_DMA_EN, 9, 1)
+ SHARED_FIELD(TX_DMA_EN, 8, 1)
+ SHARED_FIELD(RX_BUFF_EN, 7, 1)
+ SHARED_FIELD(TX_BUFF_EN, 6, 1)
+ SHARED_FIELD(M_STOP_CMD, 5, 1)
+ SHARED_FIELD(M_S_RX_CMD_LAST, 4, 1)
+ SHARED_FIELD(M_RX_CMD, 3, 1)
+ SHARED_FIELD(S_TX_CMD, 2, 1)
+ SHARED_FIELD(M_TX_CMD, 1, 1)
+ SHARED_FIELD(M_START_CMD, 0, 1)
+REG32(I2CD_DEV_ADDR, 0x18) /* Slave Device Address */
+ SHARED_FIELD(SLAVE_DEV_ADDR1, 0, 7)
+REG32(I2CD_POOL_CTRL, 0x1C) /* Pool Buffer Control */
+ SHARED_FIELD(RX_COUNT, 24, 6)
+ SHARED_FIELD(RX_SIZE, 16, 5)
+ SHARED_FIELD(TX_COUNT, 8, 5)
+ FIELD(I2CD_POOL_CTRL, OFFSET, 2, 6) /* AST2400 */
+ SHARED_FIELD(BUF_ORGANIZATION, 0, 1) /* AST2600 */
+REG32(I2CD_BYTE_BUF, 0x20) /* Transmit/Receive Byte Buffer */
+ SHARED_FIELD(RX_BUF, 8, 8)
+ SHARED_FIELD(TX_BUF, 0, 8)
+REG32(I2CD_DMA_ADDR, 0x24) /* DMA Buffer Address */
+REG32(I2CD_DMA_LEN, 0x28) /* DMA Transfer Length < 4KB */
+
+/* I2C New Mode Device (Bus) Register */
+REG32(I2CC_FUN_CTRL, 0x0)
+ FIELD(I2CC_FUN_CTRL, RB_EARLY_DONE_EN, 22, 1)
+ FIELD(I2CC_FUN_CTRL, DMA_DIS_AUTO_RECOVER, 21, 1)
+ FIELD(I2CC_FUN_CTRL, S_SAVE_ADDR, 20, 1)
+ FIELD(I2CC_FUN_CTRL, M_PKT_RETRY_CNT, 18, 2)
+ /* 17:0 shared with I2CD_FUN_CTRL[17:0] */
+REG32(I2CC_AC_TIMING, 0x04)
+REG32(I2CC_MS_TXRX_BYTE_BUF, 0x08)
+ /* 31:16 shared with I2CD_CMD[31:16] */
+ /* 15:0 shared with I2CD_BYTE_BUF[15:0] */
+REG32(I2CC_POOL_CTRL, 0x0c)
+ /* 31:0 shared with I2CD_POOL_CTRL[31:0] */
+REG32(I2CM_INTR_CTRL, 0x10)
+REG32(I2CM_INTR_STS, 0x14)
+ FIELD(I2CM_INTR_STS, PKT_STATE, 28, 4)
+ FIELD(I2CM_INTR_STS, PKT_CMD_TIMEOUT, 18, 1)
+ FIELD(I2CM_INTR_STS, PKT_CMD_FAIL, 17, 1)
+ FIELD(I2CM_INTR_STS, PKT_CMD_DONE, 16, 1)
+ FIELD(I2CM_INTR_STS, BUS_RECOVER_FAIL, 15, 1)
+ /* 14:0 shared with I2CD_INTR_STS[14:0] */
+REG32(I2CM_CMD, 0x18)
+ FIELD(I2CM_CMD, W1_CTRL, 31, 1)
+ FIELD(I2CM_CMD, PKT_DEV_ADDR, 24, 7)
+ FIELD(I2CM_CMD, HS_MASTER_MODE_LSB, 17, 3)
+ FIELD(I2CM_CMD, PKT_OP_EN, 16, 1)
+ /* 15:0 shared with I2CD_CMD[15:0] */
+REG32(I2CM_DMA_LEN, 0x1c)
+ FIELD(I2CM_DMA_LEN, RX_BUF_LEN_W1T, 31, 1)
+ FIELD(I2CM_DMA_LEN, RX_BUF_LEN, 16, 11)
+ FIELD(I2CM_DMA_LEN, TX_BUF_LEN_W1T, 15, 1)
+ FIELD(I2CM_DMA_LEN, TX_BUF_LEN, 0, 11)
+REG32(I2CS_INTR_CTRL, 0x20)
+ FIELD(I2CS_INTR_CTRL, PKT_CMD_FAIL, 17, 1)
+ FIELD(I2CS_INTR_CTRL, PKT_CMD_DONE, 16, 1)
+REG32(I2CS_INTR_STS, 0x24)
+ /* 31:29 shared with I2CD_INTR_STS[31:29] */
+ FIELD(I2CS_INTR_STS, SLAVE_PARKING_STS, 24, 2)
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR3_NAK, 22, 1)
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR2_NAK, 21, 1)
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR1_NAK, 20, 1)
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR_INDICATOR, 18, 2)
+ FIELD(I2CS_INTR_STS, PKT_CMD_FAIL, 17, 1)
+ FIELD(I2CS_INTR_STS, PKT_CMD_DONE, 16, 1)
+ /* 14:0 shared with I2CD_INTR_STS[14:0] */
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR_RX_MATCH, 7, 1)
+REG32(I2CS_CMD, 0x28)
+ FIELD(I2CS_CMD, W1_CTRL, 31, 1)
+ FIELD(I2CS_CMD, PKT_MODE_ACTIVE_ADDR, 17, 2)
+ FIELD(I2CS_CMD, PKT_MODE_EN, 16, 1)
+ FIELD(I2CS_CMD, AUTO_NAK_INACTIVE_ADDR, 15, 1)
+ FIELD(I2CS_CMD, AUTO_NAK_ACTIVE_ADDR, 14, 1)
+ /* 13:0 shared with I2CD_CMD[13:0] */
+REG32(I2CS_DMA_LEN, 0x2c)
+ FIELD(I2CS_DMA_LEN, RX_BUF_LEN_W1T, 31, 1)
+ FIELD(I2CS_DMA_LEN, RX_BUF_LEN, 16, 11)
+ FIELD(I2CS_DMA_LEN, TX_BUF_LEN_W1T, 15, 1)
+ FIELD(I2CS_DMA_LEN, TX_BUF_LEN, 0, 11)
+REG32(I2CM_DMA_TX_ADDR, 0x30)
+ FIELD(I2CM_DMA_TX_ADDR, ADDR, 0, 31)
+REG32(I2CM_DMA_RX_ADDR, 0x34)
+ FIELD(I2CM_DMA_RX_ADDR, ADDR, 0, 31)
+REG32(I2CS_DMA_TX_ADDR, 0x38)
+ FIELD(I2CS_DMA_TX_ADDR, ADDR, 0, 31)
+REG32(I2CS_DMA_RX_ADDR, 0x3c)
+ FIELD(I2CS_DMA_RX_ADDR, ADDR, 0, 31)
+REG32(I2CS_DEV_ADDR, 0x40)
+REG32(I2CM_DMA_LEN_STS, 0x48)
+ FIELD(I2CM_DMA_LEN_STS, RX_LEN, 16, 13)
+ FIELD(I2CM_DMA_LEN_STS, TX_LEN, 0, 13)
+REG32(I2CS_DMA_LEN_STS, 0x4c)
+ FIELD(I2CS_DMA_LEN_STS, RX_LEN, 16, 13)
+ FIELD(I2CS_DMA_LEN_STS, TX_LEN, 0, 13)
+REG32(I2CC_DMA_ADDR, 0x50)
+REG32(I2CC_DMA_LEN, 0x54)
struct AspeedI2CState;
-typedef struct AspeedI2CBus {
+#define TYPE_ASPEED_I2C_BUS "aspeed.i2c.bus"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedI2CBus, ASPEED_I2C_BUS)
+struct AspeedI2CBus {
+ SysBusDevice parent_obj;
+
struct AspeedI2CState *controller;
+ /* slave mode */
+ I2CSlave *slave;
+
MemoryRegion mr;
I2CBus *bus;
uint8_t id;
+ qemu_irq irq;
- uint32_t ctrl;
- uint32_t timing[2];
- uint32_t intr_ctrl;
- uint32_t intr_status;
- uint32_t cmd;
- uint32_t buf;
-} AspeedI2CBus;
+ uint32_t regs[ASPEED_I2C_NEW_NUM_REG];
+};
-typedef struct AspeedI2CState {
+struct AspeedI2CState {
SysBusDevice parent_obj;
MemoryRegion iomem;
qemu_irq irq;
uint32_t intr_status;
+ uint32_t ctrl_global;
+ uint32_t new_clk_divider;
+ MemoryRegion pool_iomem;
+ uint8_t pool[ASPEED_I2C_MAX_POOL_SIZE];
AspeedI2CBus busses[ASPEED_I2C_NR_BUSSES];
-} AspeedI2CState;
+ MemoryRegion *dram_mr;
+ AddressSpace dram_as;
+};
+
+#define TYPE_ASPEED_I2C_BUS_SLAVE "aspeed.i2c.slave"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedI2CBusSlave, ASPEED_I2C_BUS_SLAVE)
+struct AspeedI2CBusSlave {
+ I2CSlave i2c;
+};
+
+struct AspeedI2CClass {
+ SysBusDeviceClass parent_class;
+
+ uint8_t num_busses;
+ uint8_t reg_size;
+ uint8_t gap;
+ qemu_irq (*bus_get_irq)(AspeedI2CBus *);
+
+ uint64_t pool_size;
+ hwaddr pool_base;
+ uint8_t *(*bus_pool_base)(AspeedI2CBus *);
+ bool check_sram;
+ bool has_dma;
+
+};
+
+static inline bool aspeed_i2c_is_new_mode(AspeedI2CState *s)
+{
+ return FIELD_EX32(s->ctrl_global, I2C_CTRL_GLOBAL, REG_MODE);
+}
+
+static inline bool aspeed_i2c_bus_pkt_mode_en(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return ARRAY_FIELD_EX32(bus->regs, I2CM_CMD, PKT_OP_EN);
+ }
+ return false;
+}
+
+static inline uint32_t aspeed_i2c_bus_ctrl_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_FUN_CTRL;
+ }
+ return R_I2CD_FUN_CTRL;
+}
+
+static inline uint32_t aspeed_i2c_bus_cmd_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CM_CMD;
+ }
+ return R_I2CD_CMD;
+}
+
+static inline uint32_t aspeed_i2c_bus_dev_addr_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CS_DEV_ADDR;
+ }
+ return R_I2CD_DEV_ADDR;
+}
+
+static inline uint32_t aspeed_i2c_bus_intr_ctrl_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CM_INTR_CTRL;
+ }
+ return R_I2CD_INTR_CTRL;
+}
+
+static inline uint32_t aspeed_i2c_bus_intr_sts_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CM_INTR_STS;
+ }
+ return R_I2CD_INTR_STS;
+}
+
+static inline uint32_t aspeed_i2c_bus_pool_ctrl_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_POOL_CTRL;
+ }
+ return R_I2CD_POOL_CTRL;
+}
+
+static inline uint32_t aspeed_i2c_bus_byte_buf_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_MS_TXRX_BYTE_BUF;
+ }
+ return R_I2CD_BYTE_BUF;
+}
+
+static inline uint32_t aspeed_i2c_bus_dma_len_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_DMA_LEN;
+ }
+ return R_I2CD_DMA_LEN;
+}
+
+static inline uint32_t aspeed_i2c_bus_dma_addr_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_DMA_ADDR;
+ }
+ return R_I2CD_DMA_ADDR;
+}
+
+static inline bool aspeed_i2c_bus_is_master(AspeedI2CBus *bus)
+{
+ return SHARED_ARRAY_FIELD_EX32(bus->regs, aspeed_i2c_bus_ctrl_offset(bus),
+ MASTER_EN);
+}
+
+static inline bool aspeed_i2c_bus_is_enabled(AspeedI2CBus *bus)
+{
+ uint32_t ctrl_reg = aspeed_i2c_bus_ctrl_offset(bus);
+ return SHARED_ARRAY_FIELD_EX32(bus->regs, ctrl_reg, MASTER_EN) ||
+ SHARED_ARRAY_FIELD_EX32(bus->regs, ctrl_reg, SLAVE_EN);
+}
-I2CBus *aspeed_i2c_get_bus(DeviceState *dev, int busnr);
+I2CBus *aspeed_i2c_get_bus(AspeedI2CState *s, int busnr);
#endif /* ASPEED_I2C_H */
diff --git a/include/hw/i2c/bcm2835_i2c.h b/include/hw/i2c/bcm2835_i2c.h
new file mode 100644
index 0000000000..0a56df4720
--- /dev/null
+++ b/include/hw/i2c/bcm2835_i2c.h
@@ -0,0 +1,80 @@
+/*
+ * Broadcom Serial Controller (BSC)
+ *
+ * Copyright (c) 2024 Rayhan Faizel <rayhan.faizel@gmail.com>
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/i2c/i2c.h"
+#include "qom/object.h"
+
+#define TYPE_BCM2835_I2C "bcm2835-i2c"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835I2CState, BCM2835_I2C)
+
+#define BCM2835_I2C_C 0x0 /* Control */
+#define BCM2835_I2C_S 0x4 /* Status */
+#define BCM2835_I2C_DLEN 0x8 /* Data Length */
+#define BCM2835_I2C_A 0xc /* Slave Address */
+#define BCM2835_I2C_FIFO 0x10 /* FIFO */
+#define BCM2835_I2C_DIV 0x14 /* Clock Divider */
+#define BCM2835_I2C_DEL 0x18 /* Data Delay */
+#define BCM2835_I2C_CLKT 0x20 /* Clock Stretch Timeout */
+
+#define BCM2835_I2C_C_I2CEN BIT(15) /* I2C enable */
+#define BCM2835_I2C_C_INTR BIT(10) /* Interrupt on RXR */
+#define BCM2835_I2C_C_INTT BIT(9) /* Interrupt on TXW */
+#define BCM2835_I2C_C_INTD BIT(8) /* Interrupt on DONE */
+#define BCM2835_I2C_C_ST BIT(7) /* Start transfer */
+#define BCM2835_I2C_C_CLEAR (BIT(5) | BIT(4)) /* Clear FIFO */
+#define BCM2835_I2C_C_READ BIT(0) /* I2C read mode */
+
+#define BCM2835_I2C_S_CLKT BIT(9) /* Clock stretch timeout */
+#define BCM2835_I2C_S_ERR BIT(8) /* Slave error */
+#define BCM2835_I2C_S_RXF BIT(7) /* RX FIFO full */
+#define BCM2835_I2C_S_TXE BIT(6) /* TX FIFO empty */
+#define BCM2835_I2C_S_RXD BIT(5) /* RX bytes available */
+#define BCM2835_I2C_S_TXD BIT(4) /* TX space available */
+#define BCM2835_I2C_S_RXR BIT(3) /* RX FIFO needs reading */
+#define BCM2835_I2C_S_TXW BIT(2) /* TX FIFO needs writing */
+#define BCM2835_I2C_S_DONE BIT(1) /* I2C Transfer complete */
+#define BCM2835_I2C_S_TA BIT(0) /* I2C Transfer active */
+
+struct BCM2835I2CState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+ I2CBus *bus;
+ qemu_irq irq;
+
+ uint32_t c;
+ uint32_t s;
+ uint32_t dlen;
+ uint32_t a;
+ uint32_t div;
+ uint32_t del;
+ uint32_t clkt;
+
+ uint32_t last_dlen;
+};
diff --git a/include/hw/i2c/bitbang_i2c.h b/include/hw/i2c/bitbang_i2c.h
new file mode 100644
index 0000000000..a079e6d70f
--- /dev/null
+++ b/include/hw/i2c/bitbang_i2c.h
@@ -0,0 +1,52 @@
+#ifndef BITBANG_I2C_H
+#define BITBANG_I2C_H
+
+#include "hw/i2c/i2c.h"
+
+#define TYPE_GPIO_I2C "gpio_i2c"
+
+typedef struct bitbang_i2c_interface bitbang_i2c_interface;
+
+#define BITBANG_I2C_SDA 0
+#define BITBANG_I2C_SCL 1
+
+typedef enum bitbang_i2c_state {
+ STOPPED = 0,
+ SENDING_BIT7,
+ SENDING_BIT6,
+ SENDING_BIT5,
+ SENDING_BIT4,
+ SENDING_BIT3,
+ SENDING_BIT2,
+ SENDING_BIT1,
+ SENDING_BIT0,
+ WAITING_FOR_ACK,
+ RECEIVING_BIT7,
+ RECEIVING_BIT6,
+ RECEIVING_BIT5,
+ RECEIVING_BIT4,
+ RECEIVING_BIT3,
+ RECEIVING_BIT2,
+ RECEIVING_BIT1,
+ RECEIVING_BIT0,
+ SENDING_ACK,
+ SENT_NACK
+} bitbang_i2c_state;
+
+struct bitbang_i2c_interface {
+ I2CBus *bus;
+ bitbang_i2c_state state;
+ int last_data;
+ int last_clock;
+ int device_out;
+ uint8_t buffer;
+ int current_addr;
+};
+
+/**
+ * bitbang_i2c_init: in-place initialize the bitbang_i2c_interface struct
+ */
+void bitbang_i2c_init(bitbang_i2c_interface *s, I2CBus *bus);
+int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level);
+
+#endif
diff --git a/include/hw/i2c/i2c.h b/include/hw/i2c/i2c.h
index cf4c45a98f..2a3abacd1b 100644
--- a/include/hw/i2c/i2c.h
+++ b/include/hw/i2c/i2c.h
@@ -1,7 +1,8 @@
#ifndef QEMU_I2C_H
#define QEMU_I2C_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
/* The QEMU I2C implementation only supports simple transfers that complete
immediately. It does not support slave devices that need to be able to
@@ -11,32 +12,31 @@
enum i2c_event {
I2C_START_RECV,
I2C_START_SEND,
+ I2C_START_SEND_ASYNC,
I2C_FINISH,
I2C_NACK /* Masker NACKed a receive byte. */
};
-typedef struct I2CSlave I2CSlave;
+typedef struct I2CNodeList I2CNodeList;
#define TYPE_I2C_SLAVE "i2c-slave"
-#define I2C_SLAVE(obj) \
- OBJECT_CHECK(I2CSlave, (obj), TYPE_I2C_SLAVE)
-#define I2C_SLAVE_CLASS(klass) \
- OBJECT_CLASS_CHECK(I2CSlaveClass, (klass), TYPE_I2C_SLAVE)
-#define I2C_SLAVE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(I2CSlaveClass, (obj), TYPE_I2C_SLAVE)
-
-typedef struct I2CSlaveClass {
+OBJECT_DECLARE_TYPE(I2CSlave, I2CSlaveClass,
+ I2C_SLAVE)
+
+struct I2CSlaveClass {
DeviceClass parent_class;
/* Master to slave. Returns non-zero for a NAK, 0 for success. */
int (*send)(I2CSlave *s, uint8_t data);
+ /* Master to slave (asynchronous). Receiving slave must call i2c_ack(). */
+ void (*send_async)(I2CSlave *s, uint8_t data);
+
/*
* Slave to master. This cannot fail, the device should always
- * return something here. Negative values probably result in 0xff
- * and a possible log from the driver, and shouldn't be used.
+ * return something here.
*/
- int (*recv)(I2CSlave *s);
+ uint8_t (*recv)(I2CSlave *s);
/*
* Notify the slave of a bus state change. For start event,
@@ -44,7 +44,17 @@ typedef struct I2CSlaveClass {
* return code is not used and should be zero.
*/
int (*event)(I2CSlave *s, enum i2c_event event);
-} I2CSlaveClass;
+
+ /*
+ * Check if this device matches the address provided. Returns bool of
+ * true if it matches (or broadcast), and updates the device list, false
+ * otherwise.
+ *
+ * If broadcast is true, match should add the device and return true.
+ */
+ bool (*match_and_add)(I2CSlave *candidate, uint8_t address, bool broadcast,
+ I2CNodeList *current_devs);
+};
struct I2CSlave {
DeviceState qdev;
@@ -54,7 +64,7 @@ struct I2CSlave {
};
#define TYPE_I2C_BUS "i2c-bus"
-#define I2C_BUS(obj) OBJECT_CHECK(I2CBus, (obj), TYPE_I2C_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(I2CBus, I2C_BUS)
typedef struct I2CNode I2CNode;
@@ -63,29 +73,145 @@ struct I2CNode {
QLIST_ENTRY(I2CNode) next;
};
+typedef struct I2CPendingMaster I2CPendingMaster;
+
+struct I2CPendingMaster {
+ QEMUBH *bh;
+ QSIMPLEQ_ENTRY(I2CPendingMaster) entry;
+};
+
+typedef QLIST_HEAD(I2CNodeList, I2CNode) I2CNodeList;
+typedef QSIMPLEQ_HEAD(I2CPendingMasters, I2CPendingMaster) I2CPendingMasters;
+
struct I2CBus {
BusState qbus;
- QLIST_HEAD(, I2CNode) current_devs;
+ I2CNodeList current_devs;
+ I2CPendingMasters pending_masters;
uint8_t saved_address;
bool broadcast;
+
+ /* Set from slave currently mastering the bus. */
+ QEMUBH *bh;
};
I2CBus *i2c_init_bus(DeviceState *parent, const char *name);
-void i2c_set_slave_address(I2CSlave *dev, uint8_t address);
int i2c_bus_busy(I2CBus *bus);
-int i2c_start_transfer(I2CBus *bus, uint8_t address, int recv);
+
+/**
+ * i2c_start_transfer: start a transfer on an I2C bus.
+ *
+ * @bus: #I2CBus to be used
+ * @address: address of the slave
+ * @is_recv: indicates the transfer direction
+ *
+ * When @is_recv is a known boolean constant, use the
+ * i2c_start_recv() or i2c_start_send() helper instead.
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int i2c_start_transfer(I2CBus *bus, uint8_t address, bool is_recv);
+
+/**
+ * i2c_start_recv: start a 'receive' transfer on an I2C bus.
+ *
+ * @bus: #I2CBus to be used
+ * @address: address of the slave
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int i2c_start_recv(I2CBus *bus, uint8_t address);
+
+/**
+ * i2c_start_send: start a 'send' transfer on an I2C bus.
+ *
+ * @bus: #I2CBus to be used
+ * @address: address of the slave
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int i2c_start_send(I2CBus *bus, uint8_t address);
+
+/**
+ * i2c_start_send_async: start an asynchronous 'send' transfer on an I2C bus.
+ *
+ * @bus: #I2CBus to be used
+ * @address: address of the slave
+ *
+ * Return: 0 on success, -1 on error
+ */
+int i2c_start_send_async(I2CBus *bus, uint8_t address);
+
+void i2c_schedule_pending_master(I2CBus *bus);
+
void i2c_end_transfer(I2CBus *bus);
void i2c_nack(I2CBus *bus);
-int i2c_send_recv(I2CBus *bus, uint8_t *data, bool send);
+void i2c_ack(I2CBus *bus);
+void i2c_bus_master(I2CBus *bus, QEMUBH *bh);
+void i2c_bus_release(I2CBus *bus);
int i2c_send(I2CBus *bus, uint8_t data);
-int i2c_recv(I2CBus *bus);
-
-DeviceState *i2c_create_slave(I2CBus *bus, const char *name, uint8_t addr);
-
-typedef struct bitbang_i2c_interface bitbang_i2c_interface;
-
-/* lm832x.c */
-void lm832x_key_event(DeviceState *dev, int key, int state);
+int i2c_send_async(I2CBus *bus, uint8_t data);
+uint8_t i2c_recv(I2CBus *bus);
+bool i2c_scan_bus(I2CBus *bus, uint8_t address, bool broadcast,
+ I2CNodeList *current_devs);
+
+/**
+ * Create an I2C slave device on the heap.
+ * @name: a device type name
+ * @addr: I2C address of the slave when put on a bus
+ *
+ * This only initializes the device state structure and allows
+ * properties to be set. Type @name must exist. The device still
+ * needs to be realized. See qdev-core.h.
+ */
+I2CSlave *i2c_slave_new(const char *name, uint8_t addr);
+
+/**
+ * Create and realize an I2C slave device on the heap.
+ * @bus: I2C bus to put it on
+ * @name: I2C slave device type name
+ * @addr: I2C address of the slave when put on a bus
+ *
+ * Create the device state structure, initialize it, put it on the
+ * specified @bus, and drop the reference to it (the device is realized).
+ */
+I2CSlave *i2c_slave_create_simple(I2CBus *bus, const char *name, uint8_t addr);
+
+/**
+ * Realize and drop a reference an I2C slave device
+ * @dev: I2C slave device to realize
+ * @bus: I2C bus to put it on
+ * @addr: I2C address of the slave on the bus
+ * @errp: pointer to NULL initialized error object
+ *
+ * Returns: %true on success, %false on failure.
+ *
+ * Call 'realize' on @dev, put it on the specified @bus, and drop the
+ * reference to it.
+ *
+ * This function is useful if you have created @dev via qdev_new(),
+ * i2c_slave_new() or i2c_slave_try_new() (which take a reference to
+ * the device it returns to you), so that you can set properties on it
+ * before realizing it. If you don't need to set properties then
+ * i2c_slave_create_simple() is probably better (as it does the create,
+ * init and realize in one step).
+ *
+ * If you are embedding the I2C slave into another QOM device and
+ * initialized it via some variant on object_initialize_child() then
+ * do not use this function, because that family of functions arrange
+ * for the only reference to the child device to be held by the parent
+ * via the child<> property, and so the reference-count-drop done here
+ * would be incorrect. (Instead you would want i2c_slave_realize(),
+ * which doesn't currently exist but would be trivial to create if we
+ * had any code that wanted it.)
+ */
+bool i2c_slave_realize_and_unref(I2CSlave *dev, I2CBus *bus, Error **errp);
+
+/**
+ * Set the I2C bus address of a slave device
+ * @dev: I2C slave device
+ * @address: I2C address of the slave when put on a bus
+ */
+void i2c_slave_set_address(I2CSlave *dev, uint8_t address);
extern const VMStateDescription vmstate_i2c_slave;
diff --git a/include/hw/i2c/i2c_mux_pca954x.h b/include/hw/i2c/i2c_mux_pca954x.h
new file mode 100644
index 0000000000..3dd25ec983
--- /dev/null
+++ b/include/hw/i2c/i2c_mux_pca954x.h
@@ -0,0 +1,19 @@
+#ifndef QEMU_I2C_MUX_PCA954X_H
+#define QEMU_I2C_MUX_PCA954X_H
+
+#include "hw/i2c/i2c.h"
+
+#define TYPE_PCA9546 "pca9546"
+#define TYPE_PCA9548 "pca9548"
+
+/**
+ * Retrieves the i2c bus associated with the specified channel on this i2c
+ * mux.
+ * @mux: an i2c mux device.
+ * @channel: the i2c channel requested
+ *
+ * Returns: a pointer to the associated i2c bus.
+ */
+I2CBus *pca954x_i2c_get_bus(I2CSlave *mux, uint8_t channel);
+
+#endif
diff --git a/include/hw/i2c/imx_i2c.h b/include/hw/i2c/imx_i2c.h
index 7c73a1fa28..e4f91339f5 100644
--- a/include/hw/i2c/imx_i2c.h
+++ b/include/hw/i2c/imx_i2c.h
@@ -22,9 +22,10 @@
#define IMX_I2C_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX_I2C "imx.i2c"
-#define IMX_I2C(obj) OBJECT_CHECK(IMXI2CState, (obj), TYPE_IMX_I2C)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXI2CState, IMX_I2C)
#define IMX_I2C_MEM_SIZE 0x14
@@ -65,7 +66,7 @@
#define ADDR_RESET 0xFF00
-typedef struct IMXI2CState {
+struct IMXI2CState {
/*< private >*/
SysBusDevice parent_obj;
@@ -82,6 +83,6 @@ typedef struct IMXI2CState {
uint16_t i2sr;
uint16_t i2dr_read;
uint16_t i2dr_write;
-} IMXI2CState;
+};
#endif /* IMX_I2C_H */
diff --git a/include/hw/i2c/microbit_i2c.h b/include/hw/i2c/microbit_i2c.h
new file mode 100644
index 0000000000..3c29e09bf3
--- /dev/null
+++ b/include/hw/i2c/microbit_i2c.h
@@ -0,0 +1,42 @@
+/*
+ * Microbit stub for Nordic Semiconductor nRF51 SoC Two-Wire Interface
+ * http://infocenter.nordicsemi.com/pdf/nRF51_RM_v3.0.1.pdf
+ *
+ * Copyright 2019 Red Hat, Inc.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef MICROBIT_I2C_H
+#define MICROBIT_I2C_H
+
+#include "hw/sysbus.h"
+#include "hw/arm/nrf51.h"
+#include "qom/object.h"
+
+#define NRF51_TWI_TASK_STARTRX 0x000
+#define NRF51_TWI_TASK_STARTTX 0x008
+#define NRF51_TWI_TASK_STOP 0x014
+#define NRF51_TWI_EVENT_STOPPED 0x104
+#define NRF51_TWI_EVENT_RXDREADY 0x108
+#define NRF51_TWI_EVENT_TXDSENT 0x11c
+#define NRF51_TWI_REG_ENABLE 0x500
+#define NRF51_TWI_REG_RXD 0x518
+#define NRF51_TWI_REG_TXD 0x51c
+#define NRF51_TWI_REG_ADDRESS 0x588
+
+#define TYPE_MICROBIT_I2C "microbit.i2c"
+OBJECT_DECLARE_SIMPLE_TYPE(MicrobitI2CState, MICROBIT_I2C)
+
+#define MICROBIT_I2C_NREGS (NRF51_PERIPHERAL_SIZE / sizeof(uint32_t))
+
+struct MicrobitI2CState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ uint32_t regs[MICROBIT_I2C_NREGS];
+ uint32_t read_idx;
+};
+
+#endif /* MICROBIT_I2C_H */
diff --git a/include/hw/i2c/npcm7xx_smbus.h b/include/hw/i2c/npcm7xx_smbus.h
new file mode 100644
index 0000000000..dc45963c0e
--- /dev/null
+++ b/include/hw/i2c/npcm7xx_smbus.h
@@ -0,0 +1,112 @@
+/*
+ * Nuvoton NPCM7xx SMBus Module.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_SMBUS_H
+#define NPCM7XX_SMBUS_H
+
+#include "exec/memory.h"
+#include "hw/i2c/i2c.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+
+/*
+ * Number of addresses this module contains. Do not change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_SMBUS_NR_ADDRS 10
+
+/* Size of the FIFO buffer. */
+#define NPCM7XX_SMBUS_FIFO_SIZE 16
+
+typedef enum NPCM7xxSMBusStatus {
+ NPCM7XX_SMBUS_STATUS_IDLE,
+ NPCM7XX_SMBUS_STATUS_SENDING,
+ NPCM7XX_SMBUS_STATUS_RECEIVING,
+ NPCM7XX_SMBUS_STATUS_NEGACK,
+ NPCM7XX_SMBUS_STATUS_STOPPING_LAST_RECEIVE,
+ NPCM7XX_SMBUS_STATUS_STOPPING_NEGACK,
+} NPCM7xxSMBusStatus;
+
+/*
+ * struct NPCM7xxSMBusState - System Management Bus device state.
+ * @bus: The underlying I2C Bus.
+ * @irq: GIC interrupt line to fire on events (if enabled).
+ * @sda: The serial data register.
+ * @st: The status register.
+ * @cst: The control status register.
+ * @cst2: The control status register 2.
+ * @cst3: The control status register 3.
+ * @ctl1: The control register 1.
+ * @ctl2: The control register 2.
+ * @ctl3: The control register 3.
+ * @ctl4: The control register 4.
+ * @ctl5: The control register 5.
+ * @addr: The SMBus module's own addresses on the I2C bus.
+ * @scllt: The SCL low time register.
+ * @sclht: The SCL high time register.
+ * @fif_ctl: The FIFO control register.
+ * @fif_cts: The FIFO control status register.
+ * @fair_per: The fair period register.
+ * @txf_ctl: The transmit FIFO control register.
+ * @t_out: The SMBus timeout register.
+ * @txf_sts: The transmit FIFO status register.
+ * @rxf_sts: The receive FIFO status register.
+ * @rxf_ctl: The receive FIFO control register.
+ * @rx_fifo: The FIFO buffer for receiving in FIFO mode.
+ * @rx_cur: The current position of rx_fifo.
+ * @status: The current status of the SMBus.
+ */
+struct NPCM7xxSMBusState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ I2CBus *bus;
+ qemu_irq irq;
+
+ uint8_t sda;
+ uint8_t st;
+ uint8_t cst;
+ uint8_t cst2;
+ uint8_t cst3;
+ uint8_t ctl1;
+ uint8_t ctl2;
+ uint8_t ctl3;
+ uint8_t ctl4;
+ uint8_t ctl5;
+ uint8_t addr[NPCM7XX_SMBUS_NR_ADDRS];
+
+ uint8_t scllt;
+ uint8_t sclht;
+
+ uint8_t fif_ctl;
+ uint8_t fif_cts;
+ uint8_t fair_per;
+ uint8_t txf_ctl;
+ uint8_t t_out;
+ uint8_t txf_sts;
+ uint8_t rxf_sts;
+ uint8_t rxf_ctl;
+
+ uint8_t rx_fifo[NPCM7XX_SMBUS_FIFO_SIZE];
+ uint8_t rx_cur;
+
+ NPCM7xxSMBusStatus status;
+};
+
+#define TYPE_NPCM7XX_SMBUS "npcm7xx-smbus"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxSMBusState, NPCM7XX_SMBUS)
+
+#endif /* NPCM7XX_SMBUS_H */
diff --git a/include/hw/i2c/pm_smbus.h b/include/hw/i2c/pm_smbus.h
index 060d3c6ac0..0d74207efb 100644
--- a/include/hw/i2c/pm_smbus.h
+++ b/include/hw/i2c/pm_smbus.h
@@ -1,6 +1,9 @@
#ifndef PM_SMBUS_H
#define PM_SMBUS_H
+#include "exec/memory.h"
+#include "hw/i2c/smbus_master.h"
+
#define PM_SMBUS_MAX_MSG_SIZE 32
typedef struct PMSMBus {
@@ -31,8 +34,23 @@ typedef struct PMSMBus {
/* Set on block transfers after the last byte has been read, so the
INTR bit can be set at the right time. */
bool op_done;
+
+ /* Set during an I2C block read, so we know how to handle data. */
+ bool in_i2c_block_read;
+
+ /* Used to work around a bug in AMIBIOS, see smb_transaction_start() */
+ bool start_transaction_on_status_read;
} PMSMBus;
void pm_smbus_init(DeviceState *parent, PMSMBus *smb, bool force_aux_blk);
+/*
+ * For backwards compatibility on migration, older versions don't have
+ * working migration for pm_smbus, this lets us ignore the migrations
+ * for older machine versions.
+ */
+bool pm_smbus_vmstate_needed(void);
+
+extern const VMStateDescription pmsmb_vmstate;
+
#endif /* PM_SMBUS_H */
diff --git a/include/hw/i2c/pmbus_device.h b/include/hw/i2c/pmbus_device.h
new file mode 100644
index 0000000000..f195c11384
--- /dev/null
+++ b/include/hw/i2c/pmbus_device.h
@@ -0,0 +1,564 @@
+/*
+ * QEMU PMBus device emulation
+ *
+ * Copyright 2021 Google LLC
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PMBUS_DEVICE_H
+#define HW_PMBUS_DEVICE_H
+
+#include "qemu/bitops.h"
+#include "hw/i2c/smbus_slave.h"
+
+enum pmbus_registers {
+ PMBUS_PAGE = 0x00, /* R/W byte */
+ PMBUS_OPERATION = 0x01, /* R/W byte */
+ PMBUS_ON_OFF_CONFIG = 0x02, /* R/W byte */
+ PMBUS_CLEAR_FAULTS = 0x03, /* Send Byte */
+ PMBUS_PHASE = 0x04, /* R/W byte */
+ PMBUS_PAGE_PLUS_WRITE = 0x05, /* Block Write-only */
+ PMBUS_PAGE_PLUS_READ = 0x06, /* Block Read-only */
+ PMBUS_WRITE_PROTECT = 0x10, /* R/W byte */
+ PMBUS_STORE_DEFAULT_ALL = 0x11, /* Send Byte */
+ PMBUS_RESTORE_DEFAULT_ALL = 0x12, /* Send Byte */
+ PMBUS_STORE_DEFAULT_CODE = 0x13, /* Write-only Byte */
+ PMBUS_RESTORE_DEFAULT_CODE = 0x14, /* Write-only Byte */
+ PMBUS_STORE_USER_ALL = 0x15, /* Send Byte */
+ PMBUS_RESTORE_USER_ALL = 0x16, /* Send Byte */
+ PMBUS_STORE_USER_CODE = 0x17, /* Write-only Byte */
+ PMBUS_RESTORE_USER_CODE = 0x18, /* Write-only Byte */
+ PMBUS_CAPABILITY = 0x19, /* Read-Only byte */
+ PMBUS_QUERY = 0x1A, /* Write-Only */
+ PMBUS_SMBALERT_MASK = 0x1B, /* Block read, Word write */
+ PMBUS_VOUT_MODE = 0x20, /* R/W byte */
+ PMBUS_VOUT_COMMAND = 0x21, /* R/W word */
+ PMBUS_VOUT_TRIM = 0x22, /* R/W word */
+ PMBUS_VOUT_CAL_OFFSET = 0x23, /* R/W word */
+ PMBUS_VOUT_MAX = 0x24, /* R/W word */
+ PMBUS_VOUT_MARGIN_HIGH = 0x25, /* R/W word */
+ PMBUS_VOUT_MARGIN_LOW = 0x26, /* R/W word */
+ PMBUS_VOUT_TRANSITION_RATE = 0x27, /* R/W word */
+ PMBUS_VOUT_DROOP = 0x28, /* R/W word */
+ PMBUS_VOUT_SCALE_LOOP = 0x29, /* R/W word */
+ PMBUS_VOUT_SCALE_MONITOR = 0x2A, /* R/W word */
+ PMBUS_VOUT_MIN = 0x2B, /* R/W word */
+ PMBUS_COEFFICIENTS = 0x30, /* Read-only block 5 bytes */
+ PMBUS_POUT_MAX = 0x31, /* R/W word */
+ PMBUS_MAX_DUTY = 0x32, /* R/W word */
+ PMBUS_FREQUENCY_SWITCH = 0x33, /* R/W word */
+ PMBUS_VIN_ON = 0x35, /* R/W word */
+ PMBUS_VIN_OFF = 0x36, /* R/W word */
+ PMBUS_INTERLEAVE = 0x37, /* R/W word */
+ PMBUS_IOUT_CAL_GAIN = 0x38, /* R/W word */
+ PMBUS_IOUT_CAL_OFFSET = 0x39, /* R/W word */
+ PMBUS_FAN_CONFIG_1_2 = 0x3A, /* R/W byte */
+ PMBUS_FAN_COMMAND_1 = 0x3B, /* R/W word */
+ PMBUS_FAN_COMMAND_2 = 0x3C, /* R/W word */
+ PMBUS_FAN_CONFIG_3_4 = 0x3D, /* R/W byte */
+ PMBUS_FAN_COMMAND_3 = 0x3E, /* R/W word */
+ PMBUS_FAN_COMMAND_4 = 0x3F, /* R/W word */
+ PMBUS_VOUT_OV_FAULT_LIMIT = 0x40, /* R/W word */
+ PMBUS_VOUT_OV_FAULT_RESPONSE = 0x41, /* R/W byte */
+ PMBUS_VOUT_OV_WARN_LIMIT = 0x42, /* R/W word */
+ PMBUS_VOUT_UV_WARN_LIMIT = 0x43, /* R/W word */
+ PMBUS_VOUT_UV_FAULT_LIMIT = 0x44, /* R/W word */
+ PMBUS_VOUT_UV_FAULT_RESPONSE = 0x45, /* R/W byte */
+ PMBUS_IOUT_OC_FAULT_LIMIT = 0x46, /* R/W word */
+ PMBUS_IOUT_OC_FAULT_RESPONSE = 0x47, /* R/W byte */
+ PMBUS_IOUT_OC_LV_FAULT_LIMIT = 0x48, /* R/W word */
+ PMBUS_IOUT_OC_LV_FAULT_RESPONSE = 0x49, /* R/W byte */
+ PMBUS_IOUT_OC_WARN_LIMIT = 0x4A, /* R/W word */
+ PMBUS_IOUT_UC_FAULT_LIMIT = 0x4B, /* R/W word */
+ PMBUS_IOUT_UC_FAULT_RESPONSE = 0x4C, /* R/W byte */
+ PMBUS_OT_FAULT_LIMIT = 0x4F, /* R/W word */
+ PMBUS_OT_FAULT_RESPONSE = 0x50, /* R/W byte */
+ PMBUS_OT_WARN_LIMIT = 0x51, /* R/W word */
+ PMBUS_UT_WARN_LIMIT = 0x52, /* R/W word */
+ PMBUS_UT_FAULT_LIMIT = 0x53, /* R/W word */
+ PMBUS_UT_FAULT_RESPONSE = 0x54, /* R/W byte */
+ PMBUS_VIN_OV_FAULT_LIMIT = 0x55, /* R/W word */
+ PMBUS_VIN_OV_FAULT_RESPONSE = 0x56, /* R/W byte */
+ PMBUS_VIN_OV_WARN_LIMIT = 0x57, /* R/W word */
+ PMBUS_VIN_UV_WARN_LIMIT = 0x58, /* R/W word */
+ PMBUS_VIN_UV_FAULT_LIMIT = 0x59, /* R/W word */
+ PMBUS_VIN_UV_FAULT_RESPONSE = 0x5A, /* R/W byte */
+ PMBUS_IIN_OC_FAULT_LIMIT = 0x5B, /* R/W word */
+ PMBUS_IIN_OC_FAULT_RESPONSE = 0x5C, /* R/W byte */
+ PMBUS_IIN_OC_WARN_LIMIT = 0x5D, /* R/W word */
+ PMBUS_POWER_GOOD_ON = 0x5E, /* R/W word */
+ PMBUS_POWER_GOOD_OFF = 0x5F, /* R/W word */
+ PMBUS_TON_DELAY = 0x60, /* R/W word */
+ PMBUS_TON_RISE = 0x61, /* R/W word */
+ PMBUS_TON_MAX_FAULT_LIMIT = 0x62, /* R/W word */
+ PMBUS_TON_MAX_FAULT_RESPONSE = 0x63, /* R/W byte */
+ PMBUS_TOFF_DELAY = 0x64, /* R/W word */
+ PMBUS_TOFF_FALL = 0x65, /* R/W word */
+ PMBUS_TOFF_MAX_WARN_LIMIT = 0x66, /* R/W word */
+ PMBUS_POUT_OP_FAULT_LIMIT = 0x68, /* R/W word */
+ PMBUS_POUT_OP_FAULT_RESPONSE = 0x69, /* R/W byte */
+ PMBUS_POUT_OP_WARN_LIMIT = 0x6A, /* R/W word */
+ PMBUS_PIN_OP_WARN_LIMIT = 0x6B, /* R/W word */
+ PMBUS_STATUS_BYTE = 0x78, /* R/W byte */
+ PMBUS_STATUS_WORD = 0x79, /* R/W word */
+ PMBUS_STATUS_VOUT = 0x7A, /* R/W byte */
+ PMBUS_STATUS_IOUT = 0x7B, /* R/W byte */
+ PMBUS_STATUS_INPUT = 0x7C, /* R/W byte */
+ PMBUS_STATUS_TEMPERATURE = 0x7D, /* R/W byte */
+ PMBUS_STATUS_CML = 0x7E, /* R/W byte */
+ PMBUS_STATUS_OTHER = 0x7F, /* R/W byte */
+ PMBUS_STATUS_MFR_SPECIFIC = 0x80, /* R/W byte */
+ PMBUS_STATUS_FANS_1_2 = 0x81, /* R/W byte */
+ PMBUS_STATUS_FANS_3_4 = 0x82, /* R/W byte */
+ PMBUS_READ_EIN = 0x86, /* Read-Only block 5 bytes */
+ PMBUS_READ_EOUT = 0x87, /* Read-Only block 5 bytes */
+ PMBUS_READ_VIN = 0x88, /* Read-Only word */
+ PMBUS_READ_IIN = 0x89, /* Read-Only word */
+ PMBUS_READ_VCAP = 0x8A, /* Read-Only word */
+ PMBUS_READ_VOUT = 0x8B, /* Read-Only word */
+ PMBUS_READ_IOUT = 0x8C, /* Read-Only word */
+ PMBUS_READ_TEMPERATURE_1 = 0x8D, /* Read-Only word */
+ PMBUS_READ_TEMPERATURE_2 = 0x8E, /* Read-Only word */
+ PMBUS_READ_TEMPERATURE_3 = 0x8F, /* Read-Only word */
+ PMBUS_READ_FAN_SPEED_1 = 0x90, /* Read-Only word */
+ PMBUS_READ_FAN_SPEED_2 = 0x91, /* Read-Only word */
+ PMBUS_READ_FAN_SPEED_3 = 0x92, /* Read-Only word */
+ PMBUS_READ_FAN_SPEED_4 = 0x93, /* Read-Only word */
+ PMBUS_READ_DUTY_CYCLE = 0x94, /* Read-Only word */
+ PMBUS_READ_FREQUENCY = 0x95, /* Read-Only word */
+ PMBUS_READ_POUT = 0x96, /* Read-Only word */
+ PMBUS_READ_PIN = 0x97, /* Read-Only word */
+ PMBUS_REVISION = 0x98, /* Read-Only byte */
+ PMBUS_MFR_ID = 0x99, /* R/W block */
+ PMBUS_MFR_MODEL = 0x9A, /* R/W block */
+ PMBUS_MFR_REVISION = 0x9B, /* R/W block */
+ PMBUS_MFR_LOCATION = 0x9C, /* R/W block */
+ PMBUS_MFR_DATE = 0x9D, /* R/W block */
+ PMBUS_MFR_SERIAL = 0x9E, /* R/W block */
+ PMBUS_APP_PROFILE_SUPPORT = 0x9F, /* Read-Only block-read */
+ PMBUS_MFR_VIN_MIN = 0xA0, /* Read-Only word */
+ PMBUS_MFR_VIN_MAX = 0xA1, /* Read-Only word */
+ PMBUS_MFR_IIN_MAX = 0xA2, /* Read-Only word */
+ PMBUS_MFR_PIN_MAX = 0xA3, /* Read-Only word */
+ PMBUS_MFR_VOUT_MIN = 0xA4, /* Read-Only word */
+ PMBUS_MFR_VOUT_MAX = 0xA5, /* Read-Only word */
+ PMBUS_MFR_IOUT_MAX = 0xA6, /* Read-Only word */
+ PMBUS_MFR_POUT_MAX = 0xA7, /* Read-Only word */
+ PMBUS_MFR_TAMBIENT_MAX = 0xA8, /* Read-Only word */
+ PMBUS_MFR_TAMBIENT_MIN = 0xA9, /* Read-Only word */
+ PMBUS_MFR_EFFICIENCY_LL = 0xAA, /* Read-Only block 14 bytes */
+ PMBUS_MFR_EFFICIENCY_HL = 0xAB, /* Read-Only block 14 bytes */
+ PMBUS_MFR_PIN_ACCURACY = 0xAC, /* Read-Only byte */
+ PMBUS_IC_DEVICE_ID = 0xAD, /* Read-Only block-read */
+ PMBUS_IC_DEVICE_REV = 0xAE, /* Read-Only block-read */
+ PMBUS_MFR_MAX_TEMP_1 = 0xC0, /* R/W word */
+ PMBUS_MFR_MAX_TEMP_2 = 0xC1, /* R/W word */
+ PMBUS_MFR_MAX_TEMP_3 = 0xC2, /* R/W word */
+ PMBUS_IDLE_STATE = 0xFF,
+};
+
+/* STATUS_WORD */
+#define PB_STATUS_VOUT BIT(15)
+#define PB_STATUS_IOUT_POUT BIT(14)
+#define PB_STATUS_INPUT BIT(13)
+#define PB_STATUS_WORD_MFR BIT(12)
+#define PB_STATUS_POWER_GOOD_N BIT(11)
+#define PB_STATUS_FAN BIT(10)
+#define PB_STATUS_OTHER BIT(9)
+#define PB_STATUS_UNKNOWN BIT(8)
+/* STATUS_BYTE */
+#define PB_STATUS_BUSY BIT(7)
+#define PB_STATUS_OFF BIT(6)
+#define PB_STATUS_VOUT_OV BIT(5)
+#define PB_STATUS_IOUT_OC BIT(4)
+#define PB_STATUS_VIN_UV BIT(3)
+#define PB_STATUS_TEMPERATURE BIT(2)
+#define PB_STATUS_CML BIT(1)
+#define PB_STATUS_NONE_ABOVE BIT(0)
+
+/* STATUS_VOUT */
+#define PB_STATUS_VOUT_OV_FAULT BIT(7) /* Output Overvoltage Fault */
+#define PB_STATUS_VOUT_OV_WARN BIT(6) /* Output Overvoltage Warning */
+#define PB_STATUS_VOUT_UV_WARN BIT(5) /* Output Undervoltage Warning */
+#define PB_STATUS_VOUT_UV_FAULT BIT(4) /* Output Undervoltage Fault */
+#define PB_STATUS_VOUT_MAX BIT(3)
+#define PB_STATUS_VOUT_TON_MAX_FAULT BIT(2)
+#define PB_STATUS_VOUT_TOFF_MAX_WARN BIT(1)
+
+/* STATUS_IOUT */
+#define PB_STATUS_IOUT_OC_FAULT BIT(7) /* Output Overcurrent Fault */
+#define PB_STATUS_IOUT_OC_LV_FAULT BIT(6) /* Output OC And Low Voltage Fault */
+#define PB_STATUS_IOUT_OC_WARN BIT(5) /* Output Overcurrent Warning */
+#define PB_STATUS_IOUT_UC_FAULT BIT(4) /* Output Undercurrent Fault */
+#define PB_STATUS_CURR_SHARE BIT(3) /* Current Share Fault */
+#define PB_STATUS_PWR_LIM_MODE BIT(2) /* In Power Limiting Mode */
+#define PB_STATUS_POUT_OP_FAULT BIT(1) /* Output Overpower Fault */
+#define PB_STATUS_POUT_OP_WARN BIT(0) /* Output Overpower Warning */
+
+/* STATUS_INPUT */
+#define PB_STATUS_INPUT_VIN_OV_FAULT BIT(7) /* Input Overvoltage Fault */
+#define PB_STATUS_INPUT_VIN_OV_WARN BIT(6) /* Input Overvoltage Warning */
+#define PB_STATUS_INPUT_VIN_UV_WARN BIT(5) /* Input Undervoltage Warning */
+#define PB_STATUS_INPUT_VIN_UV_FAULT BIT(4) /* Input Undervoltage Fault */
+#define PB_STATUS_INPUT_IIN_OC_FAULT BIT(2) /* Input Overcurrent Fault */
+#define PB_STATUS_INPUT_IIN_OC_WARN BIT(1) /* Input Overcurrent Warning */
+#define PB_STATUS_INPUT_PIN_OP_WARN BIT(0) /* Input Overpower Warning */
+
+/* STATUS_TEMPERATURE */
+#define PB_STATUS_OT_FAULT BIT(7) /* Overtemperature Fault */
+#define PB_STATUS_OT_WARN BIT(6) /* Overtemperature Warning */
+#define PB_STATUS_UT_WARN BIT(5) /* Undertemperature Warning */
+#define PB_STATUS_UT_FAULT BIT(4) /* Undertemperature Fault */
+
+/* STATUS_CML */
+#define PB_CML_FAULT_INVALID_CMD BIT(7) /* Invalid/Unsupported Command */
+#define PB_CML_FAULT_INVALID_DATA BIT(6) /* Invalid/Unsupported Data */
+#define PB_CML_FAULT_PEC BIT(5) /* Packet Error Check Failed */
+#define PB_CML_FAULT_MEMORY BIT(4) /* Memory Fault Detected */
+#define PB_CML_FAULT_PROCESSOR BIT(3) /* Processor Fault Detected */
+#define PB_CML_FAULT_OTHER_COMM BIT(1) /* Other communication fault */
+#define PB_CML_FAULT_OTHER_MEM_LOGIC BIT(0) /* Other Memory Or Logic Fault */
+
+/* OPERATION*/
+#define PB_OP_ON BIT(7) /* PSU is switched on */
+#define PB_OP_MARGIN_HIGH BIT(5) /* PSU vout is set to margin high */
+#define PB_OP_MARGIN_LOW BIT(4) /* PSU vout is set to margin low */
+
+/* PAGES */
+#define PB_MAX_PAGES 0x1F
+#define PB_ALL_PAGES 0xFF
+
+#define PMBUS_ERR_BYTE 0xFF
+
+#define TYPE_PMBUS_DEVICE "pmbus-device"
+OBJECT_DECLARE_TYPE(PMBusDevice, PMBusDeviceClass,
+ PMBUS_DEVICE)
+
+/* flags */
+#define PB_HAS_COEFFICIENTS BIT_ULL(9)
+#define PB_HAS_VIN BIT_ULL(10)
+#define PB_HAS_VOUT BIT_ULL(11)
+#define PB_HAS_VOUT_MARGIN BIT_ULL(12)
+#define PB_HAS_VIN_RATING BIT_ULL(13)
+#define PB_HAS_VOUT_RATING BIT_ULL(14)
+#define PB_HAS_VOUT_MODE BIT_ULL(15)
+#define PB_HAS_VCAP BIT_ULL(16)
+#define PB_HAS_IOUT BIT_ULL(21)
+#define PB_HAS_IIN BIT_ULL(22)
+#define PB_HAS_IOUT_RATING BIT_ULL(23)
+#define PB_HAS_IIN_RATING BIT_ULL(24)
+#define PB_HAS_IOUT_GAIN BIT_ULL(25)
+#define PB_HAS_POUT BIT_ULL(30)
+#define PB_HAS_PIN BIT_ULL(31)
+#define PB_HAS_EIN BIT_ULL(32)
+#define PB_HAS_EOUT BIT_ULL(33)
+#define PB_HAS_POUT_RATING BIT_ULL(34)
+#define PB_HAS_PIN_RATING BIT_ULL(35)
+#define PB_HAS_TEMPERATURE BIT_ULL(40)
+#define PB_HAS_TEMP2 BIT_ULL(41)
+#define PB_HAS_TEMP3 BIT_ULL(42)
+#define PB_HAS_TEMP_RATING BIT_ULL(43)
+#define PB_HAS_FAN BIT_ULL(44)
+#define PB_HAS_MFR_INFO BIT_ULL(50)
+#define PB_HAS_STATUS_MFR_SPECIFIC BIT_ULL(51)
+
+struct PMBusDeviceClass {
+ SMBusDeviceClass parent_class;
+ uint8_t device_num_pages;
+
+ /**
+ * Implement quick_cmd, receive byte, and write_data to support non-standard
+ * PMBus functionality
+ */
+ void (*quick_cmd)(PMBusDevice *dev, uint8_t read);
+ int (*write_data)(PMBusDevice *dev, const uint8_t *buf, uint8_t len);
+ uint8_t (*receive_byte)(PMBusDevice *dev);
+};
+
+/*
+ * According to the spec, each page may offer the full range of PMBus commands
+ * available for each output or non-PMBus device.
+ * Therefore, we can't assume that any registers will always be the same across
+ * all pages.
+ * The page 0xFF is intended for writes to all pages
+ */
+typedef struct PMBusPage {
+ uint64_t page_flags;
+
+ uint8_t page; /* R/W byte */
+ uint8_t operation; /* R/W byte */
+ uint8_t on_off_config; /* R/W byte */
+ uint8_t write_protect; /* R/W byte */
+ uint8_t phase; /* R/W byte */
+ uint8_t vout_mode; /* R/W byte */
+ uint16_t vout_command; /* R/W word */
+ uint16_t vout_trim; /* R/W word */
+ uint16_t vout_cal_offset; /* R/W word */
+ uint16_t vout_max; /* R/W word */
+ uint16_t vout_margin_high; /* R/W word */
+ uint16_t vout_margin_low; /* R/W word */
+ uint16_t vout_transition_rate; /* R/W word */
+ uint16_t vout_droop; /* R/W word */
+ uint16_t vout_scale_loop; /* R/W word */
+ uint16_t vout_scale_monitor; /* R/W word */
+ uint16_t vout_min; /* R/W word */
+ uint8_t coefficients[5]; /* Read-only block 5 bytes */
+ uint16_t pout_max; /* R/W word */
+ uint16_t max_duty; /* R/W word */
+ uint16_t frequency_switch; /* R/W word */
+ uint16_t vin_on; /* R/W word */
+ uint16_t vin_off; /* R/W word */
+ uint16_t iout_cal_gain; /* R/W word */
+ uint16_t iout_cal_offset; /* R/W word */
+ uint8_t fan_config_1_2; /* R/W byte */
+ uint16_t fan_command_1; /* R/W word */
+ uint16_t fan_command_2; /* R/W word */
+ uint8_t fan_config_3_4; /* R/W byte */
+ uint16_t fan_command_3; /* R/W word */
+ uint16_t fan_command_4; /* R/W word */
+ uint16_t vout_ov_fault_limit; /* R/W word */
+ uint8_t vout_ov_fault_response; /* R/W byte */
+ uint16_t vout_ov_warn_limit; /* R/W word */
+ uint16_t vout_uv_warn_limit; /* R/W word */
+ uint16_t vout_uv_fault_limit; /* R/W word */
+ uint8_t vout_uv_fault_response; /* R/W byte */
+ uint16_t iout_oc_fault_limit; /* R/W word */
+ uint8_t iout_oc_fault_response; /* R/W byte */
+ uint16_t iout_oc_lv_fault_limit; /* R/W word */
+ uint8_t iout_oc_lv_fault_response; /* R/W byte */
+ uint16_t iout_oc_warn_limit; /* R/W word */
+ uint16_t iout_uc_fault_limit; /* R/W word */
+ uint8_t iout_uc_fault_response; /* R/W byte */
+ uint16_t ot_fault_limit; /* R/W word */
+ uint8_t ot_fault_response; /* R/W byte */
+ uint16_t ot_warn_limit; /* R/W word */
+ uint16_t ut_warn_limit; /* R/W word */
+ uint16_t ut_fault_limit; /* R/W word */
+ uint8_t ut_fault_response; /* R/W byte */
+ uint16_t vin_ov_fault_limit; /* R/W word */
+ uint8_t vin_ov_fault_response; /* R/W byte */
+ uint16_t vin_ov_warn_limit; /* R/W word */
+ uint16_t vin_uv_warn_limit; /* R/W word */
+ uint16_t vin_uv_fault_limit; /* R/W word */
+ uint8_t vin_uv_fault_response; /* R/W byte */
+ uint16_t iin_oc_fault_limit; /* R/W word */
+ uint8_t iin_oc_fault_response; /* R/W byte */
+ uint16_t iin_oc_warn_limit; /* R/W word */
+ uint16_t power_good_on; /* R/W word */
+ uint16_t power_good_off; /* R/W word */
+ uint16_t ton_delay; /* R/W word */
+ uint16_t ton_rise; /* R/W word */
+ uint16_t ton_max_fault_limit; /* R/W word */
+ uint8_t ton_max_fault_response; /* R/W byte */
+ uint16_t toff_delay; /* R/W word */
+ uint16_t toff_fall; /* R/W word */
+ uint16_t toff_max_warn_limit; /* R/W word */
+ uint16_t pout_op_fault_limit; /* R/W word */
+ uint8_t pout_op_fault_response; /* R/W byte */
+ uint16_t pout_op_warn_limit; /* R/W word */
+ uint16_t pin_op_warn_limit; /* R/W word */
+ uint16_t status_word; /* R/W word */
+ uint8_t status_vout; /* R/W byte */
+ uint8_t status_iout; /* R/W byte */
+ uint8_t status_input; /* R/W byte */
+ uint8_t status_temperature; /* R/W byte */
+ uint8_t status_cml; /* R/W byte */
+ uint8_t status_other; /* R/W byte */
+ uint8_t status_mfr_specific; /* R/W byte */
+ uint8_t status_fans_1_2; /* R/W byte */
+ uint8_t status_fans_3_4; /* R/W byte */
+ uint8_t read_ein[5]; /* Read-Only block 5 bytes */
+ uint8_t read_eout[5]; /* Read-Only block 5 bytes */
+ uint16_t read_vin; /* Read-Only word */
+ uint16_t read_iin; /* Read-Only word */
+ uint16_t read_vcap; /* Read-Only word */
+ uint16_t read_vout; /* Read-Only word */
+ uint16_t read_iout; /* Read-Only word */
+ uint16_t read_temperature_1; /* Read-Only word */
+ uint16_t read_temperature_2; /* Read-Only word */
+ uint16_t read_temperature_3; /* Read-Only word */
+ uint16_t read_fan_speed_1; /* Read-Only word */
+ uint16_t read_fan_speed_2; /* Read-Only word */
+ uint16_t read_fan_speed_3; /* Read-Only word */
+ uint16_t read_fan_speed_4; /* Read-Only word */
+ uint16_t read_duty_cycle; /* Read-Only word */
+ uint16_t read_frequency; /* Read-Only word */
+ uint16_t read_pout; /* Read-Only word */
+ uint16_t read_pin; /* Read-Only word */
+ uint8_t revision; /* Read-Only byte */
+ const char *mfr_id; /* R/W block */
+ const char *mfr_model; /* R/W block */
+ const char *mfr_revision; /* R/W block */
+ const char *mfr_location; /* R/W block */
+ const char *mfr_date; /* R/W block */
+ const char *mfr_serial; /* R/W block */
+ const char *app_profile_support; /* Read-Only block-read */
+ uint16_t mfr_vin_min; /* Read-Only word */
+ uint16_t mfr_vin_max; /* Read-Only word */
+ uint16_t mfr_iin_max; /* Read-Only word */
+ uint16_t mfr_pin_max; /* Read-Only word */
+ uint16_t mfr_vout_min; /* Read-Only word */
+ uint16_t mfr_vout_max; /* Read-Only word */
+ uint16_t mfr_iout_max; /* Read-Only word */
+ uint16_t mfr_pout_max; /* Read-Only word */
+ uint16_t mfr_tambient_max; /* Read-Only word */
+ uint16_t mfr_tambient_min; /* Read-Only word */
+ uint8_t mfr_efficiency_ll[14]; /* Read-Only block 14 bytes */
+ uint8_t mfr_efficiency_hl[14]; /* Read-Only block 14 bytes */
+ uint8_t mfr_pin_accuracy; /* Read-Only byte */
+ uint16_t mfr_max_temp_1; /* R/W word */
+ uint16_t mfr_max_temp_2; /* R/W word */
+ uint16_t mfr_max_temp_3; /* R/W word */
+} PMBusPage;
+
+/* State */
+struct PMBusDevice {
+ SMBusDevice smb;
+
+ uint8_t num_pages;
+ uint8_t code;
+ uint8_t page;
+
+ /*
+ * PMBus registers are stored in a PMBusPage structure allocated by
+ * calling pmbus_pages_alloc()
+ */
+ PMBusPage *pages;
+ uint8_t capability;
+
+
+ int32_t in_buf_len;
+ uint8_t *in_buf;
+ int32_t out_buf_len;
+ uint8_t out_buf[SMBUS_DATA_MAX_LEN];
+};
+
+/**
+ * Direct mode coefficients
+ * @var m - mantissa
+ * @var b - offset
+ * @var R - exponent
+ */
+typedef struct PMBusCoefficients {
+ int32_t m; /* mantissa */
+ int64_t b; /* offset */
+ int32_t R; /* exponent */
+} PMBusCoefficients;
+
+/**
+ * VOUT_Mode bit fields
+ */
+typedef struct PMBusVoutMode {
+ uint8_t mode:3;
+ int8_t exp:5;
+} PMBusVoutMode;
+
+/**
+ * Convert sensor values to direct mode format
+ *
+ * Y = (m * x - b) * 10^R
+ *
+ * @return uint16_t
+ */
+uint16_t pmbus_data2direct_mode(PMBusCoefficients c, uint32_t value);
+
+/**
+ * Convert direct mode formatted data into sensor reading
+ *
+ * X = (Y * 10^-R - b) / m
+ *
+ * @return uint32_t
+ */
+uint32_t pmbus_direct_mode2data(PMBusCoefficients c, uint16_t value);
+
+/**
+ * Convert sensor values to linear mode format
+ *
+ * L = D * 2^(-e)
+ *
+ * @return uint16
+ */
+uint16_t pmbus_data2linear_mode(uint16_t value, int exp);
+
+/**
+ * Convert linear mode formatted data into sensor reading
+ *
+ * D = L * 2^e
+ *
+ * @return uint16
+ */
+uint16_t pmbus_linear_mode2data(uint16_t value, int exp);
+
+/**
+ * @brief Send a block of data over PMBus
+ * Assumes that the bytes in the block are already ordered correctly,
+ * also assumes the length has been prepended to the block if necessary
+ * | low_byte | ... | high_byte |
+ * @param state - maintains state of the PMBus device
+ * @param data - byte array to be sent by device
+ * @param len - number
+ */
+void pmbus_send(PMBusDevice *state, const uint8_t *data, uint16_t len);
+void pmbus_send8(PMBusDevice *state, uint8_t data);
+void pmbus_send16(PMBusDevice *state, uint16_t data);
+void pmbus_send32(PMBusDevice *state, uint32_t data);
+void pmbus_send64(PMBusDevice *state, uint64_t data);
+
+/**
+ * @brief Send a string over PMBus with length prepended.
+ * Length is calculated using str_len()
+ */
+void pmbus_send_string(PMBusDevice *state, const char *data);
+
+/**
+ * @brief Receive data sent with Block Write.
+ * @param dest - memory with enough capacity to receive the write
+ * @param len - the capacity of dest
+ */
+uint8_t pmbus_receive_block(PMBusDevice *pmdev, uint8_t *dest, size_t len);
+
+/**
+ * @brief Receive data over PMBus
+ * These methods help track how much data is being received over PMBus
+ * Log to GUEST_ERROR if too much or too little is sent.
+ */
+uint8_t pmbus_receive8(PMBusDevice *pmdev);
+uint16_t pmbus_receive16(PMBusDevice *pmdev);
+uint32_t pmbus_receive32(PMBusDevice *pmdev);
+uint64_t pmbus_receive64(PMBusDevice *pmdev);
+
+/**
+ * PMBus page config must be called before any page is first used.
+ * It will allocate memory for all the pages if needed.
+ * Passed in flags overwrite existing flags if any.
+ * @param page_index the page to which the flags are applied, setting page_index
+ * to 0xFF applies the passed in flags to all pages.
+ * @param flags
+ */
+int pmbus_page_config(PMBusDevice *pmdev, uint8_t page_index, uint64_t flags);
+
+/**
+ * Update the status registers when sensor values change.
+ * Useful if modifying sensors through qmp, this way status registers get
+ * updated
+ */
+void pmbus_check_limits(PMBusDevice *pmdev);
+
+/**
+ * Enter an idle state where only the PMBUS_ERR_BYTE will be returned
+ * indefinitely until a new command is issued.
+ */
+void pmbus_idle(PMBusDevice *pmdev);
+
+extern const VMStateDescription vmstate_pmbus_device;
+
+#define VMSTATE_PMBUS_DEVICE(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(PMBusDevice), \
+ .vmsd = &vmstate_pmbus_device, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, PMBusDevice), \
+}
+
+#endif
diff --git a/include/hw/i2c/pnv_i2c_regs.h b/include/hw/i2c/pnv_i2c_regs.h
new file mode 100644
index 0000000000..85e96ff480
--- /dev/null
+++ b/include/hw/i2c/pnv_i2c_regs.h
@@ -0,0 +1,143 @@
+/*
+ * PowerNV I2C Controller Register Definitions
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PNV_I2C_REGS_H
+#define PNV_I2C_REGS_H
+
+/* I2C FIFO register */
+#define I2C_FIFO_REG 0x4
+#define I2C_FIFO PPC_BITMASK(0, 7)
+
+/* I2C command register */
+#define I2C_CMD_REG 0x5
+#define I2C_CMD_WITH_START PPC_BIT(0)
+#define I2C_CMD_WITH_ADDR PPC_BIT(1)
+#define I2C_CMD_READ_CONT PPC_BIT(2)
+#define I2C_CMD_WITH_STOP PPC_BIT(3)
+#define I2C_CMD_INTR_STEERING PPC_BITMASK(6, 7) /* P9 */
+#define I2C_CMD_INTR_STEER_HOST 1
+#define I2C_CMD_INTR_STEER_OCC 2
+#define I2C_CMD_DEV_ADDR PPC_BITMASK(8, 14)
+#define I2C_CMD_READ_NOT_WRITE PPC_BIT(15)
+#define I2C_CMD_LEN_BYTES PPC_BITMASK(16, 31)
+#define I2C_MAX_TFR_LEN 0xfff0ull
+
+/* I2C mode register */
+#define I2C_MODE_REG 0x6
+#define I2C_MODE_BIT_RATE_DIV PPC_BITMASK(0, 15)
+#define I2C_MODE_PORT_NUM PPC_BITMASK(16, 21)
+#define I2C_MODE_ENHANCED PPC_BIT(28)
+#define I2C_MODE_DIAGNOSTIC PPC_BIT(29)
+#define I2C_MODE_PACING_ALLOW PPC_BIT(30)
+#define I2C_MODE_WRAP PPC_BIT(31)
+
+/* I2C watermark register */
+#define I2C_WATERMARK_REG 0x7
+#define I2C_WATERMARK_HIGH PPC_BITMASK(16, 19)
+#define I2C_WATERMARK_LOW PPC_BITMASK(24, 27)
+
+/*
+ * I2C interrupt mask and condition registers
+ *
+ * NB: The function of 0x9 and 0xa changes depending on whether you're reading
+ * or writing to them. When read they return the interrupt condition bits
+ * and on writes they update the interrupt mask register.
+ *
+ * The bit definitions are the same for all the interrupt registers.
+ */
+#define I2C_INTR_MASK_REG 0x8
+
+#define I2C_INTR_RAW_COND_REG 0x9 /* read */
+#define I2C_INTR_MASK_OR_REG 0x9 /* write*/
+
+#define I2C_INTR_COND_REG 0xa /* read */
+#define I2C_INTR_MASK_AND_REG 0xa /* write */
+
+#define I2C_INTR_ALL PPC_BITMASK(16, 31)
+#define I2C_INTR_INVALID_CMD PPC_BIT(16)
+#define I2C_INTR_LBUS_PARITY_ERR PPC_BIT(17)
+#define I2C_INTR_BKEND_OVERRUN_ERR PPC_BIT(18)
+#define I2C_INTR_BKEND_ACCESS_ERR PPC_BIT(19)
+#define I2C_INTR_ARBT_LOST_ERR PPC_BIT(20)
+#define I2C_INTR_NACK_RCVD_ERR PPC_BIT(21)
+#define I2C_INTR_DATA_REQ PPC_BIT(22)
+#define I2C_INTR_CMD_COMP PPC_BIT(23)
+#define I2C_INTR_STOP_ERR PPC_BIT(24)
+#define I2C_INTR_I2C_BUSY PPC_BIT(25)
+#define I2C_INTR_NOT_I2C_BUSY PPC_BIT(26)
+#define I2C_INTR_SCL_EQ_1 PPC_BIT(28)
+#define I2C_INTR_SCL_EQ_0 PPC_BIT(29)
+#define I2C_INTR_SDA_EQ_1 PPC_BIT(30)
+#define I2C_INTR_SDA_EQ_0 PPC_BIT(31)
+
+/* I2C status register */
+#define I2C_RESET_I2C_REG 0xb /* write */
+#define I2C_RESET_ERRORS 0xc
+#define I2C_STAT_REG 0xb /* read */
+#define I2C_STAT_INVALID_CMD PPC_BIT(0)
+#define I2C_STAT_LBUS_PARITY_ERR PPC_BIT(1)
+#define I2C_STAT_BKEND_OVERRUN_ERR PPC_BIT(2)
+#define I2C_STAT_BKEND_ACCESS_ERR PPC_BIT(3)
+#define I2C_STAT_ARBT_LOST_ERR PPC_BIT(4)
+#define I2C_STAT_NACK_RCVD_ERR PPC_BIT(5)
+#define I2C_STAT_DATA_REQ PPC_BIT(6)
+#define I2C_STAT_CMD_COMP PPC_BIT(7)
+#define I2C_STAT_STOP_ERR PPC_BIT(8)
+#define I2C_STAT_UPPER_THRS PPC_BITMASK(9, 15)
+#define I2C_STAT_ANY_I2C_INTR PPC_BIT(16)
+#define I2C_STAT_PORT_HISTORY_BUSY PPC_BIT(19)
+#define I2C_STAT_SCL_INPUT_LEVEL PPC_BIT(20)
+#define I2C_STAT_SDA_INPUT_LEVEL PPC_BIT(21)
+#define I2C_STAT_PORT_BUSY PPC_BIT(22)
+#define I2C_STAT_INTERFACE_BUSY PPC_BIT(23)
+#define I2C_STAT_FIFO_ENTRY_COUNT PPC_BITMASK(24, 31)
+
+#define I2C_STAT_ANY_ERR (I2C_STAT_INVALID_CMD | I2C_STAT_LBUS_PARITY_ERR | \
+ I2C_STAT_BKEND_OVERRUN_ERR | \
+ I2C_STAT_BKEND_ACCESS_ERR | I2C_STAT_ARBT_LOST_ERR | \
+ I2C_STAT_NACK_RCVD_ERR | I2C_STAT_STOP_ERR)
+
+
+#define I2C_INTR_ACTIVE \
+ ((I2C_STAT_ANY_ERR >> 16) | I2C_INTR_CMD_COMP | I2C_INTR_DATA_REQ)
+
+/* Pseudo-status used for timeouts */
+#define I2C_STAT_PSEUDO_TIMEOUT PPC_BIT(63)
+
+/* I2C extended status register */
+#define I2C_EXTD_STAT_REG 0xc
+#define I2C_EXTD_STAT_FIFO_SIZE PPC_BITMASK(0, 7)
+#define I2C_EXTD_STAT_MSM_CURSTATE PPC_BITMASK(11, 15)
+#define I2C_EXTD_STAT_SCL_IN_SYNC PPC_BIT(16)
+#define I2C_EXTD_STAT_SDA_IN_SYNC PPC_BIT(17)
+#define I2C_EXTD_STAT_S_SCL PPC_BIT(18)
+#define I2C_EXTD_STAT_S_SDA PPC_BIT(19)
+#define I2C_EXTD_STAT_M_SCL PPC_BIT(20)
+#define I2C_EXTD_STAT_M_SDA PPC_BIT(21)
+#define I2C_EXTD_STAT_HIGH_WATER PPC_BIT(22)
+#define I2C_EXTD_STAT_LOW_WATER PPC_BIT(23)
+#define I2C_EXTD_STAT_I2C_BUSY PPC_BIT(24)
+#define I2C_EXTD_STAT_SELF_BUSY PPC_BIT(25)
+#define I2C_EXTD_STAT_I2C_VERSION PPC_BITMASK(27, 31)
+
+/* I2C residual front end/back end length */
+#define I2C_RESIDUAL_LEN_REG 0xd
+#define I2C_RESIDUAL_FRONT_END PPC_BITMASK(0, 15)
+#define I2C_RESIDUAL_BACK_END PPC_BITMASK(16, 31)
+
+/* Port busy register */
+#define I2C_PORT_BUSY_REG 0xe
+#define I2C_SET_S_SCL_REG 0xd
+#define I2C_RESET_S_SCL_REG 0xf
+#define I2C_SET_S_SDA_REG 0x10
+#define I2C_RESET_S_SDA_REG 0x11
+
+#define PNV_I2C_FIFO_SIZE 8
+#define PNV_I2C_MAX_BUSSES 64
+
+#endif /* PNV_I2C_REGS_H */
diff --git a/include/hw/i2c/ppc4xx_i2c.h b/include/hw/i2c/ppc4xx_i2c.h
index b3450bacf7..4e882fa3c8 100644
--- a/include/hw/i2c/ppc4xx_i2c.h
+++ b/include/hw/i2c/ppc4xx_i2c.h
@@ -27,14 +27,14 @@
#ifndef PPC4XX_I2C_H
#define PPC4XX_I2C_H
-#include "qemu-common.h"
#include "hw/sysbus.h"
-#include "hw/i2c/i2c.h"
+#include "hw/i2c/bitbang_i2c.h"
+#include "qom/object.h"
#define TYPE_PPC4xx_I2C "ppc4xx-i2c"
-#define PPC4xx_I2C(obj) OBJECT_CHECK(PPC4xxI2CState, (obj), TYPE_PPC4xx_I2C)
+OBJECT_DECLARE_SIMPLE_TYPE(PPC4xxI2CState, PPC4xx_I2C)
-typedef struct PPC4xxI2CState {
+struct PPC4xxI2CState {
/*< private >*/
SysBusDevice parent_obj;
@@ -42,7 +42,7 @@ typedef struct PPC4xxI2CState {
I2CBus *bus;
qemu_irq irq;
MemoryRegion iomem;
- bitbang_i2c_interface *bitbang;
+ bitbang_i2c_interface bitbang;
int mdidx;
uint8_t mdata[4];
uint8_t lmadr;
@@ -58,6 +58,6 @@ typedef struct PPC4xxI2CState {
uint8_t xfrcnt;
uint8_t xtcntlss;
uint8_t directcntl;
-} PPC4xxI2CState;
+};
#endif /* PPC4XX_I2C_H */
diff --git a/include/hw/i2c/smbus_eeprom.h b/include/hw/i2c/smbus_eeprom.h
new file mode 100644
index 0000000000..68b0063ab6
--- /dev/null
+++ b/include/hw/i2c/smbus_eeprom.h
@@ -0,0 +1,36 @@
+/*
+ * QEMU SMBus EEPROM API
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_SMBUS_EEPROM_H
+#define HW_SMBUS_EEPROM_H
+
+#include "exec/cpu-common.h"
+#include "hw/i2c/i2c.h"
+
+void smbus_eeprom_init_one(I2CBus *bus, uint8_t address, uint8_t *eeprom_buf);
+void smbus_eeprom_init(I2CBus *bus, int nb_eeprom,
+ const uint8_t *eeprom_spd, int size);
+
+enum sdram_type { SDR = 0x4, DDR = 0x7, DDR2 = 0x8 };
+uint8_t *spd_data_generate(enum sdram_type type, ram_addr_t size);
+
+#endif
diff --git a/include/hw/i2c/smbus.h b/include/hw/i2c/smbus_master.h
index d8b1b9ee81..bb13bc423c 100644
--- a/include/hw/i2c/smbus.h
+++ b/include/hw/i2c/smbus_master.h
@@ -1,8 +1,5 @@
-#ifndef QEMU_SMBUS_H
-#define QEMU_SMBUS_H
-
/*
- * QEMU SMBus API
+ * QEMU SMBus host (master) API
*
* Copyright (c) 2007 Arastra, Inc.
*
@@ -25,44 +22,10 @@
* THE SOFTWARE.
*/
-#include "hw/i2c/i2c.h"
-
-#define TYPE_SMBUS_DEVICE "smbus-device"
-#define SMBUS_DEVICE(obj) \
- OBJECT_CHECK(SMBusDevice, (obj), TYPE_SMBUS_DEVICE)
-#define SMBUS_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SMBusDeviceClass, (klass), TYPE_SMBUS_DEVICE)
-#define SMBUS_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SMBusDeviceClass, (obj), TYPE_SMBUS_DEVICE)
-
-typedef struct SMBusDeviceClass
-{
- I2CSlaveClass parent_class;
- void (*quick_cmd)(SMBusDevice *dev, uint8_t read);
- void (*send_byte)(SMBusDevice *dev, uint8_t val);
- uint8_t (*receive_byte)(SMBusDevice *dev);
- /* We can't distinguish between a word write and a block write with
- length 1, so pass the whole data block including the length byte
- (if present). The device is responsible figuring out what type of
- command this is. */
- void (*write_data)(SMBusDevice *dev, uint8_t cmd, uint8_t *buf, int len);
- /* Likewise we can't distinguish between different reads, or even know
- the length of the read until the read is complete, so read data a
- byte at a time. The device is responsible for adding the length
- byte on block reads. */
- uint8_t (*read_data)(SMBusDevice *dev, uint8_t cmd, int n);
-} SMBusDeviceClass;
+#ifndef HW_SMBUS_MASTER_H
+#define HW_SMBUS_MASTER_H
-struct SMBusDevice {
- /* The SMBus protocol is implemented on top of I2C. */
- I2CSlave i2c;
-
- /* Remaining fields for internal use only. */
- int mode;
- int data_len;
- uint8_t data_buf[34]; /* command + len + 32 bytes of data. */
- uint8_t command;
-};
+#include "hw/i2c/i2c.h"
/* Master device commands. */
int smbus_quick_command(I2CBus *bus, uint8_t addr, int read);
@@ -89,8 +52,4 @@ int smbus_read_block(I2CBus *bus, uint8_t addr, uint8_t command, uint8_t *data,
int smbus_write_block(I2CBus *bus, uint8_t addr, uint8_t command, uint8_t *data,
int len, bool send_len);
-void smbus_eeprom_init_one(I2CBus *smbus, uint8_t address, uint8_t *eeprom_buf);
-void smbus_eeprom_init(I2CBus *smbus, int nb_eeprom,
- const uint8_t *eeprom_spd, int size);
-
#endif
diff --git a/include/hw/i2c/smbus_slave.h b/include/hw/i2c/smbus_slave.h
new file mode 100644
index 0000000000..86bfe0a79e
--- /dev/null
+++ b/include/hw/i2c/smbus_slave.h
@@ -0,0 +1,95 @@
+/*
+ * QEMU SMBus device (slave) API
+ *
+ * Copyright (c) 2007 Arastra, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_SMBUS_SLAVE_H
+#define HW_SMBUS_SLAVE_H
+
+#include "hw/i2c/i2c.h"
+#include "qom/object.h"
+
+#define TYPE_SMBUS_DEVICE "smbus-device"
+OBJECT_DECLARE_TYPE(SMBusDevice, SMBusDeviceClass,
+ SMBUS_DEVICE)
+
+
+struct SMBusDeviceClass {
+ I2CSlaveClass parent_class;
+
+ /*
+ * An operation with no data, special in SMBus.
+ * This may be NULL, quick commands are ignore in that case.
+ */
+ void (*quick_cmd)(SMBusDevice *dev, uint8_t read);
+
+ /*
+ * We can't distinguish between a word write and a block write with
+ * length 1, so pass the whole data block including the length byte
+ * (if present). The device is responsible figuring out what type of
+ * command this is.
+ * This may be NULL if no data is written to the device. Writes
+ * will be ignore in that case.
+ */
+ int (*write_data)(SMBusDevice *dev, uint8_t *buf, uint8_t len);
+
+ /*
+ * Likewise we can't distinguish between different reads, or even know
+ * the length of the read until the read is complete, so read data a
+ * byte at a time. The device is responsible for adding the length
+ * byte on block reads. This call cannot fail, it should return
+ * something, preferably 0xff if nothing is available.
+ * This may be NULL if no data is read from the device. Reads will
+ * return 0xff in that case.
+ */
+ uint8_t (*receive_byte)(SMBusDevice *dev);
+};
+
+#define SMBUS_DATA_MAX_LEN 34 /* command + len + 32 bytes of data. */
+
+struct SMBusDevice {
+ /* The SMBus protocol is implemented on top of I2C. */
+ I2CSlave i2c;
+
+ /* Remaining fields for internal use only. */
+ int32_t mode;
+ int32_t data_len;
+ uint8_t data_buf[SMBUS_DATA_MAX_LEN];
+};
+
+extern const VMStateDescription vmstate_smbus_device;
+
+#define VMSTATE_SMBUS_DEVICE(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(SMBusDevice), \
+ .vmsd = &vmstate_smbus_device, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, SMBusDevice), \
+}
+
+/*
+ * Users should call this in their .needed functions to know if the
+ * SMBus slave data needs to be transferred.
+ */
+bool smbus_vmstate_needed(SMBusDevice *dev);
+
+#endif
diff --git a/include/hw/i386/apic.h b/include/hw/i386/apic.h
index a9f6c0aa33..eb606d6076 100644
--- a/include/hw/i386/apic.h
+++ b/include/hw/i386/apic.h
@@ -1,19 +1,16 @@
#ifndef APIC_H
#define APIC_H
-#include "qemu-common.h"
/* apic.c */
-void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode,
- uint8_t vector_num, uint8_t trigger_mode);
+void apic_set_max_apic_id(uint32_t max_apic_id);
int apic_accept_pic_intr(DeviceState *s);
void apic_deliver_pic_intr(DeviceState *s, int level);
void apic_deliver_nmi(DeviceState *d);
int apic_get_interrupt(DeviceState *s);
-void apic_reset_irq_delivered(void);
-int apic_get_irq_delivered(void);
-void cpu_set_apic_base(DeviceState *s, uint64_t val);
+int cpu_set_apic_base(DeviceState *s, uint64_t val);
uint64_t cpu_get_apic_base(DeviceState *s);
+bool cpu_is_apic_enabled(DeviceState *s);
void cpu_set_apic_tpr(DeviceState *s, uint8_t val);
uint8_t cpu_get_apic_tpr(DeviceState *s);
void apic_init_reset(DeviceState *s);
@@ -21,6 +18,9 @@ void apic_sipi(DeviceState *s);
void apic_poll_irq(DeviceState *d);
void apic_designate_bsp(DeviceState *d, bool bsp);
int apic_get_highest_priority_irr(DeviceState *dev);
+int apic_msr_read(int index, uint64_t *val);
+int apic_msr_write(int index, uint64_t val);
+bool is_x2apic_mode(DeviceState *d);
/* pc.c */
DeviceState *cpu_get_current_apic(void);
diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h
index 1209eb483a..d6e85833da 100644
--- a/include/hw/i386/apic_internal.h
+++ b/include/hw/i386/apic_internal.h
@@ -7,7 +7,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,6 +24,8 @@
#include "cpu.h"
#include "exec/memory.h"
#include "qemu/timer.h"
+#include "target/i386/cpu-qom.h"
+#include "qom/object.h"
/* APIC Local Vector Table */
#define APIC_LVT_TIMER 0
@@ -44,8 +46,10 @@
#define APIC_DM_EXTINT 7
/* APIC destination mode */
-#define APIC_DESTMODE_FLAT 0xf
-#define APIC_DESTMODE_CLUSTER 1
+#define APIC_DESTMODE_PHYSICAL 0
+#define APIC_DESTMODE_LOGICAL 1
+#define APIC_DESTMODE_LOGICAL_FLAT 0xf
+#define APIC_DESTMODE_LOGICAL_CLUSTER 0
#define APIC_TRIGGER_EDGE 0
#define APIC_TRIGGER_LEVEL 1
@@ -124,20 +128,16 @@
typedef struct APICCommonState APICCommonState;
#define TYPE_APIC_COMMON "apic-common"
-#define APIC_COMMON(obj) \
- OBJECT_CHECK(APICCommonState, (obj), TYPE_APIC_COMMON)
-#define APIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(APICCommonClass, (klass), TYPE_APIC_COMMON)
-#define APIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(APICCommonClass, (obj), TYPE_APIC_COMMON)
-
-typedef struct APICCommonClass
-{
+typedef struct APICCommonClass APICCommonClass;
+DECLARE_OBJ_CHECKERS(APICCommonState, APICCommonClass,
+ APIC_COMMON, TYPE_APIC_COMMON)
+
+struct APICCommonClass {
DeviceClass parent_class;
DeviceRealize realize;
DeviceUnrealize unrealize;
- void (*set_base)(APICCommonState *s, uint64_t val);
+ int (*set_base)(APICCommonState *s, uint64_t val);
void (*set_tpr)(APICCommonState *s, uint8_t val);
uint8_t (*get_tpr)(APICCommonState *s);
void (*enable_tpr_reporting)(APICCommonState *s, bool enable);
@@ -150,7 +150,7 @@ typedef struct APICCommonClass
* device, but it's convenient to have it here for now.
*/
void (*send_msi)(MSIMessage *msi);
-} APICCommonClass;
+};
struct APICCommonState {
/*< private >*/
@@ -189,6 +189,7 @@ struct APICCommonState {
DeviceState *vapic;
hwaddr vapic_paddr; /* note: persistence via kvmvapic */
bool legacy_instance_id;
+ uint32_t extended_log_dest;
};
typedef struct VAPICState {
@@ -201,7 +202,6 @@ typedef struct VAPICState {
extern bool apic_report_tpr_access;
-void apic_report_irq_delivered(int delivered);
bool apic_next_timer(APICCommonState *s, int64_t current_time);
void apic_enable_tpr_access_reporting(DeviceState *d, bool enable);
void apic_enable_vapic(DeviceState *d, hwaddr paddr);
@@ -210,6 +210,7 @@ void vapic_report_tpr_access(DeviceState *dev, CPUState *cpu, target_ulong ip,
TPRAccess access);
int apic_get_ppr(APICCommonState *s);
+uint32_t apic_get_current_count(APICCommonState *s);
static inline void apic_set_bit(uint32_t *tab, int index)
{
@@ -227,6 +228,6 @@ static inline int apic_get_bit(uint32_t *tab, int index)
return !!(tab[i] & mask);
}
-APICCommonClass *apic_get_class(void);
+APICCommonClass *apic_get_class(Error **errp);
#endif /* QEMU_APIC_INTERNAL_H */
diff --git a/include/hw/i386/hostmem-epc.h b/include/hw/i386/hostmem-epc.h
new file mode 100644
index 0000000000..846c726085
--- /dev/null
+++ b/include/hw/i386/hostmem-epc.h
@@ -0,0 +1,28 @@
+/*
+ * SGX EPC backend
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Authors:
+ * Sean Christopherson <sean.j.christopherson@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_HOSTMEM_EPC_H
+#define QEMU_HOSTMEM_EPC_H
+
+#include "sysemu/hostmem.h"
+
+#define TYPE_MEMORY_BACKEND_EPC "memory-backend-epc"
+
+#define MEMORY_BACKEND_EPC(obj) \
+ OBJECT_CHECK(HostMemoryBackendEpc, (obj), TYPE_MEMORY_BACKEND_EPC)
+
+typedef struct HostMemoryBackendEpc HostMemoryBackendEpc;
+
+struct HostMemoryBackendEpc {
+ HostMemoryBackend parent_obj;
+};
+
+#endif
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index a321cc9691..7fa0a695c8 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -21,17 +21,13 @@
#ifndef INTEL_IOMMU_H
#define INTEL_IOMMU_H
-#include "hw/qdev.h"
-#include "sysemu/dma.h"
+
#include "hw/i386/x86-iommu.h"
-#include "hw/i386/ioapic.h"
-#include "hw/pci/msi.h"
-#include "hw/sysbus.h"
#include "qemu/iova-tree.h"
+#include "qom/object.h"
#define TYPE_INTEL_IOMMU_DEVICE "intel-iommu"
-#define INTEL_IOMMU_DEVICE(obj) \
- OBJECT_CHECK(IntelIOMMUState, (obj), TYPE_INTEL_IOMMU_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(IntelIOMMUState, INTEL_IOMMU_DEVICE)
#define TYPE_INTEL_IOMMU_MEMORY_REGION "intel-iommu-iommu-memory-region"
@@ -60,17 +56,24 @@
typedef struct VTDContextEntry VTDContextEntry;
typedef struct VTDContextCacheEntry VTDContextCacheEntry;
-typedef struct IntelIOMMUState IntelIOMMUState;
typedef struct VTDAddressSpace VTDAddressSpace;
typedef struct VTDIOTLBEntry VTDIOTLBEntry;
-typedef struct VTDBus VTDBus;
typedef union VTD_IR_TableEntry VTD_IR_TableEntry;
typedef union VTD_IR_MSIAddress VTD_IR_MSIAddress;
+typedef struct VTDPASIDDirEntry VTDPASIDDirEntry;
+typedef struct VTDPASIDEntry VTDPASIDEntry;
/* Context-Entry */
struct VTDContextEntry {
- uint64_t lo;
- uint64_t hi;
+ union {
+ struct {
+ uint64_t lo;
+ uint64_t hi;
+ };
+ struct {
+ uint64_t val[4];
+ };
+ };
};
struct VTDContextCacheEntry {
@@ -81,30 +84,74 @@ struct VTDContextCacheEntry {
struct VTDContextEntry context_entry;
};
+/* PASID Directory Entry */
+struct VTDPASIDDirEntry {
+ uint64_t val;
+};
+
+/* PASID Table Entry */
+struct VTDPASIDEntry {
+ uint64_t val[8];
+};
+
struct VTDAddressSpace {
PCIBus *bus;
uint8_t devfn;
+ uint32_t pasid;
AddressSpace as;
IOMMUMemoryRegion iommu;
- MemoryRegion root;
- MemoryRegion sys_alias;
+ MemoryRegion root; /* The root container of the device */
+ MemoryRegion nodmar; /* The alias of shared nodmar MR */
MemoryRegion iommu_ir; /* Interrupt region: 0xfeeXXXXX */
+ MemoryRegion iommu_ir_fault; /* Interrupt region for catching fault */
IntelIOMMUState *iommu_state;
VTDContextCacheEntry context_cache_entry;
QLIST_ENTRY(VTDAddressSpace) next;
/* Superset of notifier flags that this address space has */
IOMMUNotifierFlag notifier_flags;
- IOVATree *iova_tree; /* Traces mapped IOVA ranges */
-};
-
-struct VTDBus {
- PCIBus* bus; /* A reference to the bus to provide translation for */
- VTDAddressSpace *dev_as[0]; /* A table of VTDAddressSpace objects indexed by devfn */
+ /*
+ * @iova_tree traces mapped IOVA ranges.
+ *
+ * The tree is not needed if no MAP notifier is registered with current
+ * VTD address space, because all guest invalidate commands can be
+ * directly passed to the IOMMU UNMAP notifiers without any further
+ * reshuffling.
+ *
+ * The tree OTOH is required for MAP typed iommu notifiers for a few
+ * reasons.
+ *
+ * Firstly, there's no way to identify whether an PSI (Page Selective
+ * Invalidations) or DSI (Domain Selective Invalidations) event is an
+ * MAP or UNMAP event within the message itself. Without having prior
+ * knowledge of existing state vIOMMU doesn't know whether it should
+ * notify MAP or UNMAP for a PSI message it received when caching mode
+ * is enabled (for MAP notifiers).
+ *
+ * Secondly, PSI messages received from guest driver can be enlarged in
+ * range, covers but not limited to what the guest driver wanted to
+ * invalidate. When the range to invalidates gets bigger than the
+ * limit of a PSI message, it can even become a DSI which will
+ * invalidate the whole domain. If the vIOMMU directly notifies the
+ * registered device with the unmodified range, it may confuse the
+ * registered drivers (e.g. vfio-pci) on either:
+ *
+ * (1) Trying to map the same region more than once (for
+ * VFIO_IOMMU_MAP_DMA, -EEXIST will trigger), or,
+ *
+ * (2) Trying to UNMAP a range that is still partially mapped.
+ *
+ * That accuracy is not required for UNMAP-only notifiers, but it is a
+ * must-to-have for notifiers registered with MAP events, because the
+ * vIOMMU needs to make sure the shadow page table is always in sync
+ * with the guest IOMMU pgtables for a device.
+ */
+ IOVATree *iova_tree;
};
struct VTDIOTLBEntry {
uint64_t gfn;
uint16_t domain_id;
+ uint32_t pasid;
uint64_t slpte;
uint64_t mask;
uint8_t access_flags;
@@ -130,38 +177,40 @@ enum {
/* Interrupt Remapping Table Entry Definition */
union VTD_IR_TableEntry {
struct {
-#ifdef HOST_WORDS_BIGENDIAN
- uint32_t __reserved_1:8; /* Reserved 1 */
- uint32_t vector:8; /* Interrupt Vector */
- uint32_t irte_mode:1; /* IRTE Mode */
- uint32_t __reserved_0:3; /* Reserved 0 */
- uint32_t __avail:4; /* Available spaces for software */
- uint32_t delivery_mode:3; /* Delivery Mode */
- uint32_t trigger_mode:1; /* Trigger Mode */
- uint32_t redir_hint:1; /* Redirection Hint */
- uint32_t dest_mode:1; /* Destination Mode */
- uint32_t fault_disable:1; /* Fault Processing Disable */
- uint32_t present:1; /* Whether entry present/available */
+#if HOST_BIG_ENDIAN
+ uint64_t dest_id:32; /* Destination ID */
+ uint64_t __reserved_1:8; /* Reserved 1 */
+ uint64_t vector:8; /* Interrupt Vector */
+ uint64_t irte_mode:1; /* IRTE Mode */
+ uint64_t __reserved_0:3; /* Reserved 0 */
+ uint64_t __avail:4; /* Available spaces for software */
+ uint64_t delivery_mode:3; /* Delivery Mode */
+ uint64_t trigger_mode:1; /* Trigger Mode */
+ uint64_t redir_hint:1; /* Redirection Hint */
+ uint64_t dest_mode:1; /* Destination Mode */
+ uint64_t fault_disable:1; /* Fault Processing Disable */
+ uint64_t present:1; /* Whether entry present/available */
#else
- uint32_t present:1; /* Whether entry present/available */
- uint32_t fault_disable:1; /* Fault Processing Disable */
- uint32_t dest_mode:1; /* Destination Mode */
- uint32_t redir_hint:1; /* Redirection Hint */
- uint32_t trigger_mode:1; /* Trigger Mode */
- uint32_t delivery_mode:3; /* Delivery Mode */
- uint32_t __avail:4; /* Available spaces for software */
- uint32_t __reserved_0:3; /* Reserved 0 */
- uint32_t irte_mode:1; /* IRTE Mode */
- uint32_t vector:8; /* Interrupt Vector */
- uint32_t __reserved_1:8; /* Reserved 1 */
+ uint64_t present:1; /* Whether entry present/available */
+ uint64_t fault_disable:1; /* Fault Processing Disable */
+ uint64_t dest_mode:1; /* Destination Mode */
+ uint64_t redir_hint:1; /* Redirection Hint */
+ uint64_t trigger_mode:1; /* Trigger Mode */
+ uint64_t delivery_mode:3; /* Delivery Mode */
+ uint64_t __avail:4; /* Available spaces for software */
+ uint64_t __reserved_0:3; /* Reserved 0 */
+ uint64_t irte_mode:1; /* IRTE Mode */
+ uint64_t vector:8; /* Interrupt Vector */
+ uint64_t __reserved_1:8; /* Reserved 1 */
+ uint64_t dest_id:32; /* Destination ID */
#endif
- uint32_t dest_id; /* Destination ID */
- uint16_t source_id; /* Source-ID */
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint64_t __reserved_2:44; /* Reserved 2 */
uint64_t sid_vtype:2; /* Source-ID Validation Type */
uint64_t sid_q:2; /* Source-ID Qualifier */
+ uint64_t source_id:16; /* Source-ID */
#else
+ uint64_t source_id:16; /* Source-ID */
uint64_t sid_q:2; /* Source-ID Qualifier */
uint64_t sid_vtype:2; /* Source-ID Validation Type */
uint64_t __reserved_2:44; /* Reserved 2 */
@@ -176,7 +225,7 @@ union VTD_IR_TableEntry {
/* Programming format for MSI/MSI-X addresses */
union VTD_IR_MSIAddress {
struct {
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint32_t __head:12; /* Should always be: 0x0fee */
uint32_t index_l:15; /* Interrupt index bit 14-0 */
uint32_t int_mode:1; /* Interrupt format */
@@ -202,22 +251,28 @@ union VTD_IR_MSIAddress {
struct IntelIOMMUState {
X86IOMMUState x86_iommu;
MemoryRegion csrmem;
+ MemoryRegion mr_nodmar;
+ MemoryRegion mr_ir;
+ MemoryRegion mr_sys_alias;
uint8_t csr[DMAR_REG_SIZE]; /* register values */
uint8_t wmask[DMAR_REG_SIZE]; /* R/W bytes */
uint8_t w1cmask[DMAR_REG_SIZE]; /* RW1C(Write 1 to Clear) bytes */
uint8_t womask[DMAR_REG_SIZE]; /* WO (write only - read returns 0) */
uint32_t version;
- bool caching_mode; /* RO - is cap CM enabled? */
+ bool caching_mode; /* RO - is cap CM enabled? */
+ bool scalable_mode; /* RO - is Scalable Mode supported? */
+ bool snoop_control; /* RO - is SNP filed supported? */
dma_addr_t root; /* Current root table pointer */
- bool root_extended; /* Type of root table (extended or not) */
+ bool root_scalable; /* Type of root table (scalable or not) */
bool dmar_enabled; /* Set if DMA remapping is enabled */
uint16_t iq_head; /* Current invalidation queue head */
uint16_t iq_tail; /* Current invalidation queue tail */
dma_addr_t iq; /* Current invalidation queue pointer */
uint16_t iq_size; /* IQ Size in number of entries */
+ bool iq_dw; /* IQ descriptor width 256bit or not */
bool qi_enabled; /* Set if the QI is enabled */
uint8_t iq_last_desc_type; /* The type of last completed descriptor */
@@ -232,8 +287,8 @@ struct IntelIOMMUState {
uint32_t context_cache_gen; /* Should be in [1,MAX] */
GHashTable *iotlb; /* IOTLB */
- GHashTable *vtd_as_by_busptr; /* VTDBus objects indexed by PCIBus* reference */
- VTDBus *vtd_as_by_bus_num[VTD_PCI_BUS_MAX]; /* VTDBus objects indexed by bus number */
+ GHashTable *vtd_address_spaces; /* VTD address spaces */
+ VTDAddressSpace *vtd_as_cache[VTD_PCI_BUS_MAX]; /* VTD address space cache */
/* list of registered notifiers */
QLIST_HEAD(, VTDAddressSpace) vtd_as_with_notifiers;
@@ -246,6 +301,8 @@ struct IntelIOMMUState {
bool buggy_eim; /* Force buggy EIM unless eim=off */
uint8_t aw_bits; /* Host/IOVA address width (in bits) */
bool dma_drain; /* Whether DMA r/w draining enabled */
+ bool dma_translation; /* Whether DMA translation supported */
+ bool pasid; /* Whether to support PASID */
/*
* Protects IOMMU states in general. Currently it protects the
@@ -257,6 +314,7 @@ struct IntelIOMMUState {
/* Find the VTD Address space associated with the given bus pointer,
* create a new one if none exists
*/
-VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn);
+VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus,
+ int devfn, unsigned int pasid);
#endif
diff --git a/include/hw/i386/ioapic_internal.h b/include/hw/i386/ioapic_internal.h
deleted file mode 100644
index 9848f391bb..0000000000
--- a/include/hw/i386/ioapic_internal.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * IOAPIC emulation logic - internal interfaces
- *
- * Copyright (c) 2004-2005 Fabrice Bellard
- * Copyright (c) 2009 Xiantao Zhang, Intel
- * Copyright (c) 2011 Jan Kiszka, Siemens AG
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef QEMU_IOAPIC_INTERNAL_H
-#define QEMU_IOAPIC_INTERNAL_H
-
-#include "hw/hw.h"
-#include "exec/memory.h"
-#include "hw/sysbus.h"
-#include "qemu/notify.h"
-
-#define MAX_IOAPICS 1
-
-#define IOAPIC_LVT_DEST_SHIFT 56
-#define IOAPIC_LVT_DEST_IDX_SHIFT 48
-#define IOAPIC_LVT_MASKED_SHIFT 16
-#define IOAPIC_LVT_TRIGGER_MODE_SHIFT 15
-#define IOAPIC_LVT_REMOTE_IRR_SHIFT 14
-#define IOAPIC_LVT_POLARITY_SHIFT 13
-#define IOAPIC_LVT_DELIV_STATUS_SHIFT 12
-#define IOAPIC_LVT_DEST_MODE_SHIFT 11
-#define IOAPIC_LVT_DELIV_MODE_SHIFT 8
-
-#define IOAPIC_LVT_MASKED (1 << IOAPIC_LVT_MASKED_SHIFT)
-#define IOAPIC_LVT_TRIGGER_MODE (1 << IOAPIC_LVT_TRIGGER_MODE_SHIFT)
-#define IOAPIC_LVT_REMOTE_IRR (1 << IOAPIC_LVT_REMOTE_IRR_SHIFT)
-#define IOAPIC_LVT_POLARITY (1 << IOAPIC_LVT_POLARITY_SHIFT)
-#define IOAPIC_LVT_DELIV_STATUS (1 << IOAPIC_LVT_DELIV_STATUS_SHIFT)
-#define IOAPIC_LVT_DEST_MODE (1 << IOAPIC_LVT_DEST_MODE_SHIFT)
-#define IOAPIC_LVT_DELIV_MODE (7 << IOAPIC_LVT_DELIV_MODE_SHIFT)
-
-/* Bits that are read-only for IOAPIC entry */
-#define IOAPIC_RO_BITS (IOAPIC_LVT_REMOTE_IRR | \
- IOAPIC_LVT_DELIV_STATUS)
-#define IOAPIC_RW_BITS (~(uint64_t)IOAPIC_RO_BITS)
-
-#define IOAPIC_TRIGGER_EDGE 0
-#define IOAPIC_TRIGGER_LEVEL 1
-
-/*io{apic,sapic} delivery mode*/
-#define IOAPIC_DM_FIXED 0x0
-#define IOAPIC_DM_LOWEST_PRIORITY 0x1
-#define IOAPIC_DM_PMI 0x2
-#define IOAPIC_DM_NMI 0x4
-#define IOAPIC_DM_INIT 0x5
-#define IOAPIC_DM_SIPI 0x6
-#define IOAPIC_DM_EXTINT 0x7
-#define IOAPIC_DM_MASK 0x7
-
-#define IOAPIC_VECTOR_MASK 0xff
-
-#define IOAPIC_IOREGSEL 0x00
-#define IOAPIC_IOWIN 0x10
-#define IOAPIC_EOI 0x40
-
-#define IOAPIC_REG_ID 0x00
-#define IOAPIC_REG_VER 0x01
-#define IOAPIC_REG_ARB 0x02
-#define IOAPIC_REG_REDTBL_BASE 0x10
-#define IOAPIC_ID 0x00
-
-#define IOAPIC_ID_SHIFT 24
-#define IOAPIC_ID_MASK 0xf
-
-#define IOAPIC_VER_ENTRIES_SHIFT 16
-
-typedef struct IOAPICCommonState IOAPICCommonState;
-
-#define TYPE_IOAPIC_COMMON "ioapic-common"
-#define IOAPIC_COMMON(obj) \
- OBJECT_CHECK(IOAPICCommonState, (obj), TYPE_IOAPIC_COMMON)
-#define IOAPIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(IOAPICCommonClass, (klass), TYPE_IOAPIC_COMMON)
-#define IOAPIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IOAPICCommonClass, (obj), TYPE_IOAPIC_COMMON)
-
-typedef struct IOAPICCommonClass {
- SysBusDeviceClass parent_class;
-
- DeviceRealize realize;
- void (*pre_save)(IOAPICCommonState *s);
- void (*post_load)(IOAPICCommonState *s);
-} IOAPICCommonClass;
-
-struct IOAPICCommonState {
- SysBusDevice busdev;
- MemoryRegion io_memory;
- uint8_t id;
- uint8_t ioregsel;
- uint32_t irr;
- uint64_t ioredtbl[IOAPIC_NUM_PINS];
- Notifier machine_done;
- uint8_t version;
- uint64_t irq_count[IOAPIC_NUM_PINS];
- int irq_level[IOAPIC_NUM_PINS];
-};
-
-void ioapic_reset_common(DeviceState *dev);
-
-void ioapic_print_redtbl(Monitor *mon, IOAPICCommonState *s);
-void ioapic_stat_update_irq(IOAPICCommonState *s, int irq, int level);
-
-#endif /* QEMU_IOAPIC_INTERNAL_H */
diff --git a/include/hw/i386/microvm.h b/include/hw/i386/microvm.h
new file mode 100644
index 0000000000..fad97a891d
--- /dev/null
+++ b/include/hw/i386/microvm.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 Intel Corporation
+ * Copyright (c) 2019 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_I386_MICROVM_H
+#define HW_I386_MICROVM_H
+
+#include "exec/hwaddr.h"
+#include "qemu/notify.h"
+
+#include "hw/boards.h"
+#include "hw/i386/x86.h"
+#include "hw/acpi/acpi_dev_interface.h"
+#include "hw/pci-host/gpex.h"
+#include "qom/object.h"
+
+/*
+ * IRQ | pc | microvm (acpi=on)
+ * --------+------------+------------------
+ * 0 | pit |
+ * 1 | kbd |
+ * 2 | cascade |
+ * 3 | serial 1 |
+ * 4 | serial 0 | serial
+ * 5 | - |
+ * 6 | floppy |
+ * 7 | parallel |
+ * 8 | rtc | rtc (rtc=on)
+ * 9 | acpi | acpi (ged)
+ * 10 | pci lnk | xhci (usb=on)
+ * 11 | pci lnk |
+ * 12 | ps2 | pcie
+ * 13 | fpu | pcie
+ * 14 | ide 0 | pcie
+ * 15 | ide 1 | pcie
+ * 16-23 | pci gsi | virtio
+ */
+
+/* Platform virtio definitions */
+#define VIRTIO_MMIO_BASE 0xfeb00000
+#define VIRTIO_CMDLINE_MAXLEN 64
+
+#define GED_MMIO_BASE 0xfea00000
+#define GED_MMIO_BASE_MEMHP (GED_MMIO_BASE + 0x100)
+#define GED_MMIO_BASE_REGS (GED_MMIO_BASE + 0x200)
+#define GED_MMIO_IRQ 9
+
+#define MICROVM_XHCI_BASE 0xfe900000
+#define MICROVM_XHCI_IRQ 10
+
+#define PCIE_MMIO_BASE 0xc0000000
+#define PCIE_MMIO_SIZE 0x20000000
+#define PCIE_ECAM_BASE 0xe0000000
+#define PCIE_ECAM_SIZE 0x10000000
+
+/* Machine type options */
+#define MICROVM_MACHINE_RTC "rtc"
+#define MICROVM_MACHINE_PCIE "pcie"
+#define MICROVM_MACHINE_IOAPIC2 "ioapic2"
+#define MICROVM_MACHINE_ISA_SERIAL "isa-serial"
+#define MICROVM_MACHINE_OPTION_ROMS "x-option-roms"
+#define MICROVM_MACHINE_AUTO_KERNEL_CMDLINE "auto-kernel-cmdline"
+
+struct MicrovmMachineClass {
+ X86MachineClass parent;
+ HotplugHandler *(*orig_hotplug_handler)(MachineState *machine,
+ DeviceState *dev);
+};
+
+struct MicrovmMachineState {
+ X86MachineState parent;
+
+ /* Machine type options */
+ OnOffAuto rtc;
+ OnOffAuto pcie;
+ OnOffAuto ioapic2;
+ bool isa_serial;
+ bool option_roms;
+ bool auto_kernel_cmdline;
+
+ /* Machine state */
+ uint32_t pcie_irq_base;
+ uint32_t virtio_irq_base;
+ uint32_t virtio_num_transports;
+ bool kernel_cmdline_fixed;
+ Notifier machine_done;
+ Notifier powerdown_req;
+ struct GPEXConfig gpex;
+
+ /* device tree */
+ void *fdt;
+ uint32_t ioapic_phandle[2];
+};
+
+#define TYPE_MICROVM_MACHINE MACHINE_TYPE_NAME("microvm")
+OBJECT_DECLARE_TYPE(MicrovmMachineState, MicrovmMachineClass, MICROVM_MACHINE)
+
+#endif
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 0abbe45637..e52290916c 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -1,32 +1,29 @@
#ifndef HW_PC_H
#define HW_PC_H
-#include "qemu-common.h"
-#include "exec/memory.h"
+#include "qemu/notify.h"
+#include "qapi/qapi-types-common.h"
+#include "qemu/uuid.h"
#include "hw/boards.h"
-#include "hw/isa/isa.h"
#include "hw/block/fdc.h"
-#include "net/net.h"
-#include "hw/i386/ioapic.h"
+#include "hw/block/flash.h"
+#include "hw/i386/x86.h"
-#include "qemu/range.h"
-#include "qemu/bitmap.h"
-#include "sysemu/sysemu.h"
-#include "hw/pci/pci.h"
-#include "hw/mem/pc-dimm.h"
-#include "hw/mem/nvdimm.h"
-#include "hw/acpi/acpi_dev_interface.h"
+#include "hw/hotplug.h"
+#include "qom/object.h"
+#include "hw/i386/sgx-epc.h"
+#include "hw/cxl/cxl.h"
-#define HPET_INTCAP "hpet-intcap"
+#define MAX_IDE_BUS 2
/**
* PCMachineState:
* @acpi_dev: link to ACPI PM device that performs ACPI hotplug handling
* @boot_cpus: number of present VCPUs
*/
-struct PCMachineState {
+typedef struct PCMachineState {
/*< private >*/
- MachineState parent_obj;
+ X86MachineState parent_obj;
/* <public> */
@@ -34,51 +31,43 @@ struct PCMachineState {
Notifier machine_done;
/* Pointers to devices and objects: */
- HotplugHandler *acpi_dev;
- ISADevice *rtc;
- PCIBus *bus;
- FWCfgState *fw_cfg;
- qemu_irq *gsi;
+ PCIBus *pcibus;
+ I2CBus *smbus;
+ PFlashCFI01 *flash[2];
+ ISADevice *pcspk;
+ DeviceState *iommu;
+ BusState *idebus[MAX_IDE_BUS];
/* Configuration options: */
uint64_t max_ram_below_4g;
OnOffAuto vmport;
- OnOffAuto smm;
-
- AcpiNVDIMMState acpi_nvdimm_state;
+ SmbiosEntryPointType smbios_entry_point_type;
+ const char *south_bridge;
bool acpi_build_enabled;
bool smbus_enabled;
bool sata_enabled;
- bool pit_enabled;
-
- /* RAM information (sizes, addresses, configuration): */
- ram_addr_t below_4g_mem_size, above_4g_mem_size;
+ bool hpet_enabled;
+ bool i8042_enabled;
+ bool default_bus_bypass_iommu;
+ bool fd_bootchk;
+ uint64_t max_fw_size;
- /* CPU and apic information: */
- bool apic_xrupt_override;
- unsigned apic_id_limit;
- uint16_t boot_cpus;
+ /* ACPI Memory hotplug IO base address */
+ hwaddr memhp_io_base;
- /* NUMA information: */
- uint64_t numa_nodes;
- uint64_t *node_mem;
-
- /* Address space used by IOAPIC device. All IOAPIC interrupts
- * will be translated to MSI messages in the address space. */
- AddressSpace *ioapic_as;
-};
+ SGXEPCState sgx_epc;
+ CXLState cxl_devices_state;
+} PCMachineState;
#define PC_MACHINE_ACPI_DEVICE_PROP "acpi-device"
-#define PC_MACHINE_DEVMEM_REGION_SIZE "device-memory-region-size"
#define PC_MACHINE_MAX_RAM_BELOW_4G "max-ram-below-4g"
#define PC_MACHINE_VMPORT "vmport"
-#define PC_MACHINE_SMM "smm"
-#define PC_MACHINE_NVDIMM "nvdimm"
-#define PC_MACHINE_NVDIMM_PERSIST "nvdimm-persistence"
#define PC_MACHINE_SMBUS "smbus"
#define PC_MACHINE_SATA "sata"
-#define PC_MACHINE_PIT "pit"
+#define PC_MACHINE_I8042 "i8042"
+#define PC_MACHINE_MAX_FW_SIZE "max-fw-size"
+#define PC_MACHINE_SMBIOS_EP "smbios-entry-point-type"
/**
* PCMachineClass:
@@ -98,98 +87,73 @@ struct PCMachineState {
*/
struct PCMachineClass {
/*< private >*/
- MachineClass parent_class;
+ X86MachineClass parent_class;
/*< public >*/
/* Device configuration: */
bool pci_enabled;
- bool kvmclock_enabled;
- const char *default_nic_model;
+ const char *default_south_bridge;
/* Compat options: */
+ /* Default CPU model version. See x86_cpu_set_default_version(). */
+ int default_cpu_version;
+
/* ACPI compat: */
bool has_acpi_build;
bool rsdp_in_ram;
int legacy_acpi_table_size;
unsigned acpi_data_size;
+ int pci_root_uid;
/* SMBIOS compat: */
bool smbios_defaults;
bool smbios_legacy_mode;
bool smbios_uuid_encoded;
+ SmbiosEntryPointType default_smbios_ep_type;
/* RAM / address space compat: */
bool gigabyte_align;
bool has_reserved_memory;
bool enforce_aligned_dimm;
bool broken_reserved_end;
+ bool enforce_amd_1tb_hole;
- /* TSC rate migration: */
- bool save_tsc_khz;
/* generate legacy CPU hotplug AML */
bool legacy_cpu_hotplug;
- /* use DMA capable linuxboot option rom */
- bool linuxboot_dma_enabled;
-};
-
-#define TYPE_PC_MACHINE "generic-pc-machine"
-#define PC_MACHINE(obj) \
- OBJECT_CHECK(PCMachineState, (obj), TYPE_PC_MACHINE)
-#define PC_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCMachineClass, (obj), TYPE_PC_MACHINE)
-#define PC_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PCMachineClass, (klass), TYPE_PC_MACHINE)
+ /* use PVH to load kernels that support this feature */
+ bool pvh_enabled;
-/* i8259.c */
+ /* create kvmclock device even when KVM PV features are not exposed */
+ bool kvmclock_create_always;
-extern DeviceState *isa_pic;
-qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq);
-qemu_irq *kvm_i8259_init(ISABus *bus);
-int pic_read_irq(DeviceState *d);
-int pic_get_output(DeviceState *d);
+ /* resizable acpi blob compat */
+ bool resizable_acpi_blob;
-/* ioapic.c */
-
-/* Global System Interrupts */
-
-#define GSI_NUM_PINS IOAPIC_NUM_PINS
-
-typedef struct GSIState {
- qemu_irq i8259_irq[ISA_NUM_IRQS];
- qemu_irq ioapic_irq[IOAPIC_NUM_PINS];
-} GSIState;
-
-void gsi_handler(void *opaque, int n, int level);
+ /*
+ * whether the machine type implements broken 32-bit address space bound
+ * check for memory.
+ */
+ bool broken_32bit_mem_addr_check;
+};
-/* vmport.c */
-#define TYPE_VMPORT "vmport"
-typedef uint32_t (VMPortReadFunc)(void *opaque, uint32_t address);
+#define TYPE_PC_MACHINE "generic-pc-machine"
+OBJECT_DECLARE_TYPE(PCMachineState, PCMachineClass, PC_MACHINE)
-static inline void vmport_init(ISABus *bus)
-{
- isa_create_simple(bus, TYPE_VMPORT);
-}
+/* ioapic.c */
-void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque);
-void vmmouse_get_data(uint32_t *data);
-void vmmouse_set_data(const uint32_t *data);
+GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled);
/* pc.c */
-extern int fd_bootchk;
-bool pc_machine_is_smm_enabled(PCMachineState *pcms);
-void pc_register_ferr_irq(qemu_irq irq);
void pc_acpi_smi_interrupt(void *opaque, int irq, int level);
-void pc_cpus_init(PCMachineState *pcms);
-void pc_hot_add_cpu(const int64_t id, Error **errp);
-void pc_acpi_init(const char *default_dsdt);
-
-void pc_guest_info_init(PCMachineState *pcms);
-
+#define PCI_HOST_PROP_RAM_MEM "ram-mem"
+#define PCI_HOST_PROP_PCI_MEM "pci-mem"
+#define PCI_HOST_PROP_SYSTEM_MEM "system-mem"
+#define PCI_HOST_PROP_IO_MEM "io-mem"
#define PCI_HOST_PROP_PCI_HOLE_START "pci-hole-start"
#define PCI_HOST_PROP_PCI_HOLE_END "pci-hole-end"
#define PCI_HOST_PROP_PCI_HOLE64_START "pci-hole64-start"
@@ -197,101 +161,91 @@ void pc_guest_info_init(PCMachineState *pcms);
#define PCI_HOST_PROP_PCI_HOLE64_SIZE "pci-hole64-size"
#define PCI_HOST_BELOW_4G_MEM_SIZE "below-4g-mem-size"
#define PCI_HOST_ABOVE_4G_MEM_SIZE "above-4g-mem-size"
+#define PCI_HOST_PROP_SMM_RANGES "smm-ranges"
-void pc_pci_as_mapping_init(Object *owner, MemoryRegion *system_memory,
+void pc_pci_as_mapping_init(MemoryRegion *system_memory,
MemoryRegion *pci_address_space);
void xen_load_linux(PCMachineState *pcms);
void pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory,
MemoryRegion *rom_memory,
- MemoryRegion **ram_memory);
+ uint64_t pci_hole64_size);
uint64_t pc_pci_hole64_start(void);
-qemu_irq pc_allocate_cpu_irq(void);
DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus);
-void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
- ISADevice **rtc_state,
+void pc_basic_device_init(struct PCMachineState *pcms,
+ ISABus *isa_bus, qemu_irq *gsi,
+ ISADevice *rtc_state,
bool create_fdctrl,
- bool no_vmport,
- bool has_pit,
uint32_t hpet_irqs);
-void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd);
-void pc_cmos_init(PCMachineState *pcms,
- BusState *ide0, BusState *ide1,
- ISADevice *s);
void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus);
-void pc_pci_device_init(PCIBus *pci_bus);
-typedef void (*cpu_set_smm_t)(int smm, void *arg);
+void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs);
-void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name);
+/* port92.c */
+#define PORT92_A20_LINE "a20"
-ISADevice *pc_find_fdc0(void);
-int cmos_get_fd_drive_type(FloppyDriveType fd0);
+#define TYPE_PORT92 "port92"
-#define FW_CFG_IO_BASE 0x510
+/* pc_sysfw.c */
+void pc_system_flash_create(PCMachineState *pcms);
+void pc_system_flash_cleanup_unused(PCMachineState *pcms);
+void pc_system_firmware_init(PCMachineState *pcms, MemoryRegion *rom_memory);
+bool pc_system_ovmf_table_find(const char *entry, uint8_t **data,
+ int *data_len);
+void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size);
-#define PORT92_A20_LINE "a20"
+/* sgx.c */
+void pc_machine_init_sgx_epc(PCMachineState *pcms);
-/* acpi_piix.c */
+extern GlobalProperty pc_compat_9_0[];
+extern const size_t pc_compat_9_0_len;
-I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
- qemu_irq sci_irq, qemu_irq smi_irq,
- int smm_enabled, DeviceState **piix4_pm);
+extern GlobalProperty pc_compat_8_2[];
+extern const size_t pc_compat_8_2_len;
-/* hpet.c */
-extern int no_hpet;
+extern GlobalProperty pc_compat_8_1[];
+extern const size_t pc_compat_8_1_len;
-/* piix_pci.c */
-struct PCII440FXState;
-typedef struct PCII440FXState PCII440FXState;
+extern GlobalProperty pc_compat_8_0[];
+extern const size_t pc_compat_8_0_len;
-#define TYPE_I440FX_PCI_HOST_BRIDGE "i440FX-pcihost"
-#define TYPE_I440FX_PCI_DEVICE "i440FX"
+extern GlobalProperty pc_compat_7_2[];
+extern const size_t pc_compat_7_2_len;
-#define TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE "igd-passthrough-i440FX"
+extern GlobalProperty pc_compat_7_1[];
+extern const size_t pc_compat_7_1_len;
-/*
- * Reset Control Register: PCI-accessible ISA-Compatible Register at address
- * 0xcf9, provided by the PCI/ISA bridge (PIIX3 PCI function 0, 8086:7000).
- */
-#define RCR_IOPORT 0xcf9
-
-PCIBus *i440fx_init(const char *host_type, const char *pci_type,
- PCII440FXState **pi440fx_state, int *piix_devfn,
- ISABus **isa_bus, qemu_irq *pic,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
- ram_addr_t ram_size,
- ram_addr_t below_4g_mem_size,
- ram_addr_t above_4g_mem_size,
- MemoryRegion *pci_memory,
- MemoryRegion *ram_memory);
-
-PCIBus *find_i440fx(void);
-/* piix4.c */
-extern PCIDevice *piix4_dev;
-int piix4_init(PCIBus *bus, ISABus **isa_bus, int devfn);
+extern GlobalProperty pc_compat_7_0[];
+extern const size_t pc_compat_7_0_len;
-/* pc_sysfw.c */
-void pc_system_firmware_init(MemoryRegion *rom_memory,
- bool isapc_ram_fw);
+extern GlobalProperty pc_compat_6_2[];
+extern const size_t pc_compat_6_2_len;
+
+extern GlobalProperty pc_compat_6_1[];
+extern const size_t pc_compat_6_1_len;
+
+extern GlobalProperty pc_compat_6_0[];
+extern const size_t pc_compat_6_0_len;
+
+extern GlobalProperty pc_compat_5_2[];
+extern const size_t pc_compat_5_2_len;
-/* acpi-build.c */
-void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
- const CPUArchIdList *apic_ids, GArray *entry);
+extern GlobalProperty pc_compat_5_1[];
+extern const size_t pc_compat_5_1_len;
-/* e820 types */
-#define E820_RAM 1
-#define E820_RESERVED 2
-#define E820_ACPI 3
-#define E820_NVS 4
-#define E820_UNUSABLE 5
+extern GlobalProperty pc_compat_5_0[];
+extern const size_t pc_compat_5_0_len;
-int e820_add_entry(uint64_t, uint64_t, uint32_t);
-int e820_get_num_entries(void);
-bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
+extern GlobalProperty pc_compat_4_2[];
+extern const size_t pc_compat_4_2_len;
+
+extern GlobalProperty pc_compat_4_1[];
+extern const size_t pc_compat_4_1_len;
+
+extern GlobalProperty pc_compat_4_0[];
+extern const size_t pc_compat_4_0_len;
extern GlobalProperty pc_compat_3_1[];
extern const size_t pc_compat_3_1_len;
@@ -338,26 +292,6 @@ extern const size_t pc_compat_2_1_len;
extern GlobalProperty pc_compat_2_0[];
extern const size_t pc_compat_2_0_len;
-extern GlobalProperty pc_compat_1_7[];
-extern const size_t pc_compat_1_7_len;
-
-extern GlobalProperty pc_compat_1_6[];
-extern const size_t pc_compat_1_6_len;
-
-extern GlobalProperty pc_compat_1_5[];
-extern const size_t pc_compat_1_5_len;
-
-extern GlobalProperty pc_compat_1_4[];
-extern const size_t pc_compat_1_4_len;
-
-/* Helper for setting model-id for CPU models that changed model-id
- * depending on QEMU versions up to QEMU 2.4.
- */
-#define PC_CPU_MODEL_IDS(v) \
- { "qemu32-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\
- { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\
- { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },
-
#define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \
static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \
{ \
@@ -376,5 +310,4 @@ extern const size_t pc_compat_1_4_len;
} \
type_init(pc_machine_init_##suffix)
-extern void igd_passthrough_isa_bridge_create(PCIBus *bus, uint16_t gpu_dev_id);
#endif
diff --git a/include/hw/i386/sgx-epc.h b/include/hw/i386/sgx-epc.h
new file mode 100644
index 0000000000..3e00efd870
--- /dev/null
+++ b/include/hw/i386/sgx-epc.h
@@ -0,0 +1,71 @@
+/*
+ * SGX EPC device
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Authors:
+ * Sean Christopherson <sean.j.christopherson@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_SGX_EPC_H
+#define QEMU_SGX_EPC_H
+
+#include "hw/qdev-core.h"
+#include "hw/i386/hostmem-epc.h"
+
+#define TYPE_SGX_EPC "sgx-epc"
+#define SGX_EPC(obj) \
+ OBJECT_CHECK(SGXEPCDevice, (obj), TYPE_SGX_EPC)
+#define SGX_EPC_CLASS(oc) \
+ OBJECT_CLASS_CHECK(SGXEPCDeviceClass, (oc), TYPE_SGX_EPC)
+#define SGX_EPC_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SGXEPCDeviceClass, (obj), TYPE_SGX_EPC)
+
+#define SGX_EPC_ADDR_PROP "addr"
+#define SGX_EPC_SIZE_PROP "size"
+#define SGX_EPC_MEMDEV_PROP "memdev"
+#define SGX_EPC_NUMA_NODE_PROP "node"
+
+/**
+ * SGXEPCDevice:
+ * @addr: starting guest physical address, where @SGXEPCDevice is mapped.
+ * Default value: 0, means that address is auto-allocated.
+ * @hostmem: host memory backend providing memory for @SGXEPCDevice
+ */
+typedef struct SGXEPCDevice {
+ /* private */
+ DeviceState parent_obj;
+
+ /* public */
+ uint64_t addr;
+ uint32_t node;
+ HostMemoryBackendEpc *hostmem;
+} SGXEPCDevice;
+
+/*
+ * @base: address in guest physical address space where EPC regions start
+ * @mr: address space container for memory devices
+ */
+typedef struct SGXEPCState {
+ uint64_t base;
+ uint64_t size;
+
+ MemoryRegion mr;
+
+ struct SGXEPCDevice **sections;
+ int nr_sections;
+} SGXEPCState;
+
+bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size);
+void sgx_epc_build_srat(GArray *table_data);
+
+static inline uint64_t sgx_epc_above_4g_end(SGXEPCState *sgx_epc)
+{
+ assert(sgx_epc != NULL && sgx_epc->base >= 0x100000000ULL);
+
+ return sgx_epc->base + sgx_epc->size;
+}
+
+#endif
diff --git a/include/hw/i386/topology.h b/include/hw/i386/topology.h
index 1ebaee0f76..d4eeb7ab82 100644
--- a/include/hw/i386/topology.h
+++ b/include/hw/i386/topology.h
@@ -24,14 +24,15 @@
#ifndef HW_I386_TOPOLOGY_H
#define HW_I386_TOPOLOGY_H
-/* This file implements the APIC-ID-based CPU topology enumeration logic,
+/*
+ * This file implements the APIC-ID-based CPU topology enumeration logic,
* documented at the following document:
* Intel® 64 Architecture Processor Topology Enumeration
* http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/
*
* This code should be compatible with AMD's "Extended Method" described at:
* AMD CPUID Specification (Publication #25481)
- * Section 3: Multiple Core Calcuation
+ * Section 3: Multiple Core Calculation
* as long as:
* nr_threads is set to 1;
* OFFSET_IDX is assumed to be 0;
@@ -41,18 +42,25 @@
#include "qemu/bitops.h"
-/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support
+/*
+ * APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support
*/
typedef uint32_t apic_id_t;
-typedef struct X86CPUTopoInfo {
+typedef struct X86CPUTopoIDs {
unsigned pkg_id;
+ unsigned die_id;
unsigned core_id;
unsigned smt_id;
+} X86CPUTopoIDs;
+
+typedef struct X86CPUTopoInfo {
+ unsigned dies_per_pkg;
+ unsigned cores_per_die;
+ unsigned threads_per_core;
} X86CPUTopoInfo;
-/* Return the bit width needed for 'count' IDs
- */
+/* Return the bit width needed for 'count' IDs */
static unsigned apicid_bitwidth_for_count(unsigned count)
{
g_assert(count >= 1);
@@ -60,89 +68,104 @@ static unsigned apicid_bitwidth_for_count(unsigned count)
return count ? 32 - clz32(count) : 0;
}
-/* Bit width of the SMT_ID (thread ID) field on the APIC ID
- */
-static inline unsigned apicid_smt_width(unsigned nr_cores, unsigned nr_threads)
+/* Bit width of the SMT_ID (thread ID) field on the APIC ID */
+static inline unsigned apicid_smt_width(X86CPUTopoInfo *topo_info)
{
- return apicid_bitwidth_for_count(nr_threads);
+ return apicid_bitwidth_for_count(topo_info->threads_per_core);
}
-/* Bit width of the Core_ID field
- */
-static inline unsigned apicid_core_width(unsigned nr_cores, unsigned nr_threads)
+/* Bit width of the Core_ID field */
+static inline unsigned apicid_core_width(X86CPUTopoInfo *topo_info)
{
- return apicid_bitwidth_for_count(nr_cores);
+ return apicid_bitwidth_for_count(topo_info->cores_per_die);
}
-/* Bit offset of the Core_ID field
- */
-static inline unsigned apicid_core_offset(unsigned nr_cores,
- unsigned nr_threads)
+/* Bit width of the Die_ID field */
+static inline unsigned apicid_die_width(X86CPUTopoInfo *topo_info)
{
- return apicid_smt_width(nr_cores, nr_threads);
+ return apicid_bitwidth_for_count(topo_info->dies_per_pkg);
}
-/* Bit offset of the Pkg_ID (socket ID) field
- */
-static inline unsigned apicid_pkg_offset(unsigned nr_cores, unsigned nr_threads)
+/* Bit offset of the Core_ID field */
+static inline unsigned apicid_core_offset(X86CPUTopoInfo *topo_info)
{
- return apicid_core_offset(nr_cores, nr_threads) +
- apicid_core_width(nr_cores, nr_threads);
+ return apicid_smt_width(topo_info);
}
-/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
+/* Bit offset of the Die_ID field */
+static inline unsigned apicid_die_offset(X86CPUTopoInfo *topo_info)
+{
+ return apicid_core_offset(topo_info) + apicid_core_width(topo_info);
+}
+
+/* Bit offset of the Pkg_ID (socket ID) field */
+static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info)
+{
+ return apicid_die_offset(topo_info) + apicid_die_width(topo_info);
+}
+
+/*
+ * Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
*
* The caller must make sure core_id < nr_cores and smt_id < nr_threads.
*/
-static inline apic_id_t apicid_from_topo_ids(unsigned nr_cores,
- unsigned nr_threads,
- const X86CPUTopoInfo *topo)
+static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info,
+ const X86CPUTopoIDs *topo_ids)
{
- return (topo->pkg_id << apicid_pkg_offset(nr_cores, nr_threads)) |
- (topo->core_id << apicid_core_offset(nr_cores, nr_threads)) |
- topo->smt_id;
+ return (topo_ids->pkg_id << apicid_pkg_offset(topo_info)) |
+ (topo_ids->die_id << apicid_die_offset(topo_info)) |
+ (topo_ids->core_id << apicid_core_offset(topo_info)) |
+ topo_ids->smt_id;
}
-/* Calculate thread/core/package IDs for a specific topology,
+/*
+ * Calculate thread/core/package IDs for a specific topology,
* based on (contiguous) CPU index
*/
-static inline void x86_topo_ids_from_idx(unsigned nr_cores,
- unsigned nr_threads,
+static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info,
unsigned cpu_index,
- X86CPUTopoInfo *topo)
+ X86CPUTopoIDs *topo_ids)
{
- unsigned core_index = cpu_index / nr_threads;
- topo->smt_id = cpu_index % nr_threads;
- topo->core_id = core_index % nr_cores;
- topo->pkg_id = core_index / nr_cores;
+ unsigned nr_dies = topo_info->dies_per_pkg;
+ unsigned nr_cores = topo_info->cores_per_die;
+ unsigned nr_threads = topo_info->threads_per_core;
+
+ topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads);
+ topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies;
+ topo_ids->core_id = cpu_index / nr_threads % nr_cores;
+ topo_ids->smt_id = cpu_index % nr_threads;
}
-/* Calculate thread/core/package IDs for a specific topology,
+/*
+ * Calculate thread/core/package IDs for a specific topology,
* based on APIC ID
*/
static inline void x86_topo_ids_from_apicid(apic_id_t apicid,
- unsigned nr_cores,
- unsigned nr_threads,
- X86CPUTopoInfo *topo)
+ X86CPUTopoInfo *topo_info,
+ X86CPUTopoIDs *topo_ids)
{
- topo->smt_id = apicid &
- ~(0xFFFFFFFFUL << apicid_smt_width(nr_cores, nr_threads));
- topo->core_id = (apicid >> apicid_core_offset(nr_cores, nr_threads)) &
- ~(0xFFFFFFFFUL << apicid_core_width(nr_cores, nr_threads));
- topo->pkg_id = apicid >> apicid_pkg_offset(nr_cores, nr_threads);
+ topo_ids->smt_id = apicid &
+ ~(0xFFFFFFFFUL << apicid_smt_width(topo_info));
+ topo_ids->core_id =
+ (apicid >> apicid_core_offset(topo_info)) &
+ ~(0xFFFFFFFFUL << apicid_core_width(topo_info));
+ topo_ids->die_id =
+ (apicid >> apicid_die_offset(topo_info)) &
+ ~(0xFFFFFFFFUL << apicid_die_width(topo_info));
+ topo_ids->pkg_id = apicid >> apicid_pkg_offset(topo_info);
}
-/* Make APIC ID for the CPU 'cpu_index'
+/*
+ * Make APIC ID for the CPU 'cpu_index'
*
* 'cpu_index' is a sequential, contiguous ID for the CPU.
*/
-static inline apic_id_t x86_apicid_from_cpu_idx(unsigned nr_cores,
- unsigned nr_threads,
+static inline apic_id_t x86_apicid_from_cpu_idx(X86CPUTopoInfo *topo_info,
unsigned cpu_index)
{
- X86CPUTopoInfo topo;
- x86_topo_ids_from_idx(nr_cores, nr_threads, cpu_index, &topo);
- return apicid_from_topo_ids(nr_cores, nr_threads, &topo);
+ X86CPUTopoIDs topo_ids;
+ x86_topo_ids_from_idx(topo_info, cpu_index, &topo_ids);
+ return x86_apicid_from_topo_ids(topo_info, &topo_ids);
}
#endif /* HW_I386_TOPOLOGY_H */
diff --git a/include/hw/i386/vmport.h b/include/hw/i386/vmport.h
new file mode 100644
index 0000000000..8f5e27c6f5
--- /dev/null
+++ b/include/hw/i386/vmport.h
@@ -0,0 +1,28 @@
+#ifndef HW_VMPORT_H
+#define HW_VMPORT_H
+
+#include "hw/isa/isa.h"
+
+#define TYPE_VMPORT "vmport"
+typedef uint32_t VMPortReadFunc(void *opaque, uint32_t address);
+
+typedef enum {
+ VMPORT_CMD_GETVERSION = 10,
+ VMPORT_CMD_GETBIOSUUID = 19,
+ VMPORT_CMD_GETRAMSIZE = 20,
+ VMPORT_CMD_VMMOUSE_DATA = 39,
+ VMPORT_CMD_VMMOUSE_STATUS = 40,
+ VMPORT_CMD_VMMOUSE_COMMAND = 41,
+ VMPORT_CMD_GETHZ = 45,
+ VMPORT_CMD_GET_VCPU_INFO = 68,
+ VMPORT_ENTRIES
+} VMPortCommand;
+
+static inline void vmport_init(ISABus *bus)
+{
+ isa_create_simple(bus, TYPE_VMPORT);
+}
+
+void vmport_register(VMPortCommand command, VMPortReadFunc *func, void *opaque);
+
+#endif
diff --git a/include/hw/i386/x86-iommu.h b/include/hw/i386/x86-iommu.h
index dcd9719a2c..bfd21649d0 100644
--- a/include/hw/i386/x86-iommu.h
+++ b/include/hw/i386/x86-iommu.h
@@ -17,34 +17,21 @@
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef IOMMU_COMMON_H
-#define IOMMU_COMMON_H
+#ifndef HW_I386_X86_IOMMU_H
+#define HW_I386_X86_IOMMU_H
#include "hw/sysbus.h"
-#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
+#include "qom/object.h"
#define TYPE_X86_IOMMU_DEVICE ("x86-iommu")
-#define X86_IOMMU_DEVICE(obj) \
- OBJECT_CHECK(X86IOMMUState, (obj), TYPE_X86_IOMMU_DEVICE)
-#define X86_IOMMU_CLASS(klass) \
- OBJECT_CLASS_CHECK(X86IOMMUClass, (klass), TYPE_X86_IOMMU_DEVICE)
-#define X86_IOMMU_GET_CLASS(obj) \
- OBJECT_GET_CLASS(X86IOMMUClass, obj, TYPE_X86_IOMMU_DEVICE)
+OBJECT_DECLARE_TYPE(X86IOMMUState, X86IOMMUClass, X86_IOMMU_DEVICE)
#define X86_IOMMU_SID_INVALID (0xffff)
-typedef struct X86IOMMUState X86IOMMUState;
-typedef struct X86IOMMUClass X86IOMMUClass;
typedef struct X86IOMMUIrq X86IOMMUIrq;
typedef struct X86IOMMU_MSIMessage X86IOMMU_MSIMessage;
-typedef enum IommuType {
- TYPE_INTEL,
- TYPE_AMD,
- TYPE_NONE
-} IommuType;
-
struct X86IOMMUClass {
SysBusDeviceClass parent;
/* Intel/AMD specific realize() hook */
@@ -77,7 +64,6 @@ struct X86IOMMUState {
OnOffAuto intr_supported; /* Whether vIOMMU supports IR */
bool dt_supported; /* Whether vIOMMU supports DT */
bool pt_supported; /* Whether vIOMMU supports pass-through */
- IommuType type; /* IOMMU type - AMD/Intel */
QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */
};
@@ -100,41 +86,43 @@ struct X86IOMMUIrq {
struct X86IOMMU_MSIMessage {
union {
struct {
-#ifdef HOST_WORDS_BIGENDIAN
- uint32_t __addr_head:12; /* 0xfee */
- uint32_t dest:8;
- uint32_t __reserved:8;
- uint32_t redir_hint:1;
- uint32_t dest_mode:1;
- uint32_t __not_used:2;
+#if HOST_BIG_ENDIAN
+ uint64_t __addr_hi:32;
+ uint64_t __addr_head:12; /* 0xfee */
+ uint64_t dest:8;
+ uint64_t __reserved:8;
+ uint64_t redir_hint:1;
+ uint64_t dest_mode:1;
+ uint64_t __not_used:2;
#else
- uint32_t __not_used:2;
- uint32_t dest_mode:1;
- uint32_t redir_hint:1;
- uint32_t __reserved:8;
- uint32_t dest:8;
- uint32_t __addr_head:12; /* 0xfee */
+ uint64_t __not_used:2;
+ uint64_t dest_mode:1;
+ uint64_t redir_hint:1;
+ uint64_t __reserved:8;
+ uint64_t dest:8;
+ uint64_t __addr_head:12; /* 0xfee */
+ uint64_t __addr_hi:32;
#endif
- uint32_t __addr_hi;
} QEMU_PACKED;
uint64_t msi_addr;
};
union {
struct {
-#ifdef HOST_WORDS_BIGENDIAN
- uint16_t trigger_mode:1;
- uint16_t level:1;
- uint16_t __resved:3;
- uint16_t delivery_mode:3;
- uint16_t vector:8;
+#if HOST_BIG_ENDIAN
+ uint32_t __resved1:16;
+ uint32_t trigger_mode:1;
+ uint32_t level:1;
+ uint32_t __resved:3;
+ uint32_t delivery_mode:3;
+ uint32_t vector:8;
#else
- uint16_t vector:8;
- uint16_t delivery_mode:3;
- uint16_t __resved:3;
- uint16_t level:1;
- uint16_t trigger_mode:1;
+ uint32_t vector:8;
+ uint32_t delivery_mode:3;
+ uint32_t __resved:3;
+ uint32_t level:1;
+ uint32_t trigger_mode:1;
+ uint32_t __resved1:16;
#endif
- uint16_t __resved1;
} QEMU_PACKED;
uint32_t msi_data;
};
@@ -146,11 +134,6 @@ struct X86IOMMU_MSIMessage {
*/
X86IOMMUState *x86_iommu_get_default(void);
-/*
- * x86_iommu_get_type - get IOMMU type
- */
-IommuType x86_iommu_get_type(void);
-
/**
* x86_iommu_iec_register_notifier - register IEC (Interrupt Entry
* Cache) notifiers
diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h
new file mode 100644
index 0000000000..4dc30dcb4d
--- /dev/null
+++ b/include/hw/i386/x86.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2019 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_I386_X86_H
+#define HW_I386_X86_H
+
+#include "exec/hwaddr.h"
+
+#include "hw/boards.h"
+#include "hw/intc/ioapic.h"
+#include "hw/isa/isa.h"
+#include "qom/object.h"
+
+struct X86MachineClass {
+ /*< private >*/
+ MachineClass parent;
+
+ /*< public >*/
+
+ /* TSC rate migration: */
+ bool save_tsc_khz;
+ /* use DMA capable linuxboot option rom */
+ bool fwcfg_dma_enabled;
+ /* CPU and apic information: */
+ bool apic_xrupt_override;
+};
+
+struct X86MachineState {
+ /*< private >*/
+ MachineState parent;
+
+ /*< public >*/
+
+ /* Pointers to devices and objects: */
+ ISADevice *rtc;
+ FWCfgState *fw_cfg;
+ qemu_irq *gsi;
+ DeviceState *ioapic2;
+ GMappedFile *initrd_mapped_file;
+ HotplugHandler *acpi_dev;
+
+ /* RAM information (sizes, addresses, configuration): */
+ ram_addr_t below_4g_mem_size, above_4g_mem_size;
+
+ /* Start address of the initial RAM above 4G */
+ uint64_t above_4g_mem_start;
+
+ /* CPU and apic information: */
+ unsigned pci_irq_mask;
+ unsigned apic_id_limit;
+ uint16_t boot_cpus;
+ SgxEPCList *sgx_epc_list;
+
+ OnOffAuto smm;
+ OnOffAuto acpi;
+ OnOffAuto pit;
+ OnOffAuto pic;
+
+ char *oem_id;
+ char *oem_table_id;
+ /*
+ * Address space used by IOAPIC device. All IOAPIC interrupts
+ * will be translated to MSI messages in the address space.
+ */
+ AddressSpace *ioapic_as;
+
+ /*
+ * Ratelimit enforced on detected bus locks in guest.
+ * The default value of the bus_lock_ratelimit is 0 per second,
+ * which means no limitation on the guest's bus locks.
+ */
+ uint64_t bus_lock_ratelimit;
+};
+
+#define X86_MACHINE_SMM "smm"
+#define X86_MACHINE_ACPI "acpi"
+#define X86_MACHINE_PIT "pit"
+#define X86_MACHINE_PIC "pic"
+#define X86_MACHINE_OEM_ID "x-oem-id"
+#define X86_MACHINE_OEM_TABLE_ID "x-oem-table-id"
+#define X86_MACHINE_BUS_LOCK_RATELIMIT "bus-lock-ratelimit"
+
+#define TYPE_X86_MACHINE MACHINE_TYPE_NAME("x86")
+OBJECT_DECLARE_TYPE(X86MachineState, X86MachineClass, X86_MACHINE)
+
+uint32_t x86_cpu_apic_id_from_index(X86MachineState *pcms,
+ unsigned int cpu_index);
+
+void x86_cpu_new(X86MachineState *pcms, int64_t apic_id, Error **errp);
+void x86_cpus_init(X86MachineState *pcms, int default_cpu_version);
+CpuInstanceProperties x86_cpu_index_to_props(MachineState *ms,
+ unsigned cpu_index);
+int64_t x86_get_default_cpu_node_id(const MachineState *ms, int idx);
+const CPUArchIdList *x86_possible_cpu_arch_ids(MachineState *ms);
+CPUArchId *x86_find_cpu_slot(MachineState *ms, uint32_t id, int *idx);
+void x86_rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count);
+void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void x86_cpu_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void x86_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void x86_cpu_unplug_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+
+void x86_bios_rom_init(MachineState *ms, const char *default_firmware,
+ MemoryRegion *rom_memory, bool isapc_ram_fw);
+
+void x86_load_linux(X86MachineState *x86ms,
+ FWCfgState *fw_cfg,
+ int acpi_data_size,
+ bool pvh_enabled);
+
+bool x86_machine_is_smm_enabled(const X86MachineState *x86ms);
+bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms);
+
+/* Global System Interrupts */
+
+#define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11))
+
+typedef struct GSIState {
+ qemu_irq i8259_irq[ISA_NUM_IRQS];
+ qemu_irq ioapic_irq[IOAPIC_NUM_PINS];
+ qemu_irq ioapic2_irq[IOAPIC_NUM_PINS];
+} GSIState;
+
+qemu_irq x86_allocate_cpu_irq(void);
+void gsi_handler(void *opaque, int n, int level);
+void ioapic_init_gsi(GSIState *gsi_state, Object *parent);
+DeviceState *ioapic_init_secondary(GSIState *gsi_state);
+
+/* pc_sysfw.c */
+void x86_firmware_configure(void *ptr, int size);
+
+#endif
diff --git a/include/hw/i386/xen_arch_hvm.h b/include/hw/i386/xen_arch_hvm.h
new file mode 100644
index 0000000000..1000f8f543
--- /dev/null
+++ b/include/hw/i386/xen_arch_hvm.h
@@ -0,0 +1,11 @@
+#ifndef HW_XEN_ARCH_I386_HVM_H
+#define HW_XEN_ARCH_I386_HVM_H
+
+#include <xen/hvm/ioreq.h>
+#include "hw/xen/xen-hvm-common.h"
+
+void arch_handle_ioreq(XenIOState *state, ioreq_t *req);
+void arch_xen_set_memory(XenIOState *state,
+ MemoryRegionSection *section,
+ bool add);
+#endif
diff --git a/include/hw/ide.h b/include/hw/ide.h
deleted file mode 100644
index 3ae087c572..0000000000
--- a/include/hw/ide.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef HW_IDE_H
-#define HW_IDE_H
-
-#include "hw/isa/isa.h"
-#include "hw/pci/pci.h"
-#include "exec/memory.h"
-
-#define MAX_IDE_DEVS 2
-
-/* ide-isa.c */
-ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq,
- DriveInfo *hd0, DriveInfo *hd1);
-
-/* ide-pci.c */
-void pci_cmd646_ide_init(PCIBus *bus, DriveInfo **hd_table,
- int secondary_ide_enabled);
-PCIDevice *pci_piix3_xen_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn);
-PCIDevice *pci_piix3_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn);
-PCIDevice *pci_piix4_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn);
-int pci_piix3_xen_ide_unplug(DeviceState *dev, bool aux);
-void vt82c686b_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn);
-
-/* ide-mmio.c */
-void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1);
-
-int ide_get_geometry(BusState *bus, int unit,
- int16_t *cyls, int8_t *heads, int8_t *secs);
-int ide_get_bios_chs_trans(BusState *bus, int unit);
-
-/* ide/core.c */
-void ide_drive_get(DriveInfo **hd, int max_bus);
-
-#endif /* HW_IDE_H */
diff --git a/include/hw/ide/ahci-pci.h b/include/hw/ide/ahci-pci.h
new file mode 100644
index 0000000000..c2ee616962
--- /dev/null
+++ b/include/hw/ide/ahci-pci.h
@@ -0,0 +1,22 @@
+/*
+ * QEMU AHCI Emulation (PCI devices)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_IDE_AHCI_PCI_H
+#define HW_IDE_AHCI_PCI_H
+
+#include "qom/object.h"
+#include "hw/ide/ahci.h"
+#include "hw/pci/pci_device.h"
+
+#define TYPE_ICH9_AHCI "ich9-ahci"
+OBJECT_DECLARE_SIMPLE_TYPE(AHCIPCIState, ICH9_AHCI)
+
+struct AHCIPCIState {
+ PCIDevice parent_obj;
+
+ AHCIState ahci;
+};
+
+#endif
diff --git a/include/hw/ide/ahci-sysbus.h b/include/hw/ide/ahci-sysbus.h
new file mode 100644
index 0000000000..06eaac8cb6
--- /dev/null
+++ b/include/hw/ide/ahci-sysbus.h
@@ -0,0 +1,35 @@
+/*
+ * QEMU AHCI Emulation (MMIO-mapped devices)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_IDE_AHCI_SYSBUS_H
+#define HW_IDE_AHCI_SYSBUS_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "hw/ide/ahci.h"
+
+#define TYPE_SYSBUS_AHCI "sysbus-ahci"
+OBJECT_DECLARE_SIMPLE_TYPE(SysbusAHCIState, SYSBUS_AHCI)
+
+struct SysbusAHCIState {
+ SysBusDevice parent_obj;
+
+ AHCIState ahci;
+};
+
+#define TYPE_ALLWINNER_AHCI "allwinner-ahci"
+OBJECT_DECLARE_SIMPLE_TYPE(AllwinnerAHCIState, ALLWINNER_AHCI)
+
+#define ALLWINNER_AHCI_MMIO_OFF 0x80
+#define ALLWINNER_AHCI_MMIO_SIZE 0x80
+
+struct AllwinnerAHCIState {
+ SysbusAHCIState parent_obj;
+
+ MemoryRegion mmio;
+ uint32_t regs[ALLWINNER_AHCI_MMIO_SIZE / 4];
+};
+
+#endif
diff --git a/include/hw/ide/ahci.h b/include/hw/ide/ahci.h
index b7bb2b02d6..ba31e75ff9 100644
--- a/include/hw/ide/ahci.h
+++ b/include/hw/ide/ahci.h
@@ -9,7 +9,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,7 +24,7 @@
#ifndef HW_IDE_AHCI_H
#define HW_IDE_AHCI_H
-#include "hw/sysbus.h"
+#include "exec/memory.h"
typedef struct AHCIDevice AHCIDevice;
@@ -45,41 +45,12 @@ typedef struct AHCIState {
MemoryRegion idp; /* Index-Data Pair I/O port space */
unsigned idp_offset; /* Offset of index in I/O port space */
uint32_t idp_index; /* Current IDP index */
- int32_t ports;
+ uint32_t ports;
qemu_irq irq;
AddressSpace *as;
} AHCIState;
-typedef struct AHCIPCIState AHCIPCIState;
-#define TYPE_ICH9_AHCI "ich9-ahci"
-
-int32_t ahci_get_num_ports(PCIDevice *dev);
-void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd);
-
-#define TYPE_SYSBUS_AHCI "sysbus-ahci"
-
-typedef struct SysbusAHCIState {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- AHCIState ahci;
- uint32_t num_ports;
-} SysbusAHCIState;
-
-#define TYPE_ALLWINNER_AHCI "allwinner-ahci"
-
-#define ALLWINNER_AHCI_MMIO_OFF 0x80
-#define ALLWINNER_AHCI_MMIO_SIZE 0x80
-
-struct AllwinnerAHCIState {
- /*< private >*/
- SysbusAHCIState parent_obj;
- /*< public >*/
-
- MemoryRegion mmio;
- uint32_t regs[ALLWINNER_AHCI_MMIO_SIZE/4];
-};
+void ahci_ide_create_devs(AHCIState *ahci, DriveInfo **hd);
#endif /* HW_IDE_AHCI_H */
diff --git a/include/hw/ide/ide-bus.h b/include/hw/ide/ide-bus.h
new file mode 100644
index 0000000000..4841a7dcd6
--- /dev/null
+++ b/include/hw/ide/ide-bus.h
@@ -0,0 +1,42 @@
+#ifndef HW_IDE_BUS_H
+#define HW_IDE_BUS_H
+
+#include "exec/ioport.h"
+#include "hw/ide/ide-dev.h"
+#include "hw/ide/ide-dma.h"
+
+struct IDEBus {
+ BusState qbus;
+ IDEDevice *master;
+ IDEDevice *slave;
+ IDEState ifs[2];
+ QEMUBH *bh;
+
+ int bus_id;
+ int max_units;
+ IDEDMA *dma;
+ uint8_t unit;
+ uint8_t cmd;
+ qemu_irq irq; /* bus output */
+
+ int error_status;
+ uint8_t retry_unit;
+ int64_t retry_sector_num;
+ uint32_t retry_nsector;
+ PortioList portio_list;
+ PortioList portio2_list;
+ VMChangeStateEntry *vmstate;
+};
+
+#define TYPE_IDE_BUS "IDE"
+OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS)
+
+void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
+ int bus_id, int max_units);
+IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive);
+
+int ide_get_geometry(BusState *bus, int unit,
+ int16_t *cyls, int8_t *heads, int8_t *secs);
+int ide_get_bios_chs_trans(BusState *bus, int unit);
+
+#endif
diff --git a/include/hw/ide/ide-dev.h b/include/hw/ide/ide-dev.h
new file mode 100644
index 0000000000..9a0d71db4e
--- /dev/null
+++ b/include/hw/ide/ide-dev.h
@@ -0,0 +1,186 @@
+/*
+ * ide device definitions
+ *
+ * Copyright (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef IDE_DEV_H
+#define IDE_DEV_H
+
+#include "sysemu/dma.h"
+#include "hw/qdev-properties.h"
+#include "hw/block/block.h"
+
+typedef struct IDEDevice IDEDevice;
+typedef struct IDEState IDEState;
+typedef struct IDEBus IDEBus;
+
+typedef void EndTransferFunc(IDEState *);
+
+#define MAX_IDE_DEVS 2
+
+#define TYPE_IDE_DEVICE "ide-device"
+OBJECT_DECLARE_TYPE(IDEDevice, IDEDeviceClass, IDE_DEVICE)
+
+typedef enum { IDE_HD, IDE_CD, IDE_CFATA } IDEDriveKind;
+
+struct unreported_events {
+ bool eject_request;
+ bool new_media;
+};
+
+enum ide_dma_cmd {
+ IDE_DMA_READ = 0,
+ IDE_DMA_WRITE,
+ IDE_DMA_TRIM,
+ IDE_DMA_ATAPI,
+ IDE_DMA__COUNT
+};
+
+/* NOTE: IDEState represents in fact one drive */
+struct IDEState {
+ IDEBus *bus;
+ uint8_t unit;
+ /* ide config */
+ IDEDriveKind drive_kind;
+ int drive_heads, drive_sectors;
+ int cylinders, heads, sectors, chs_trans;
+ int64_t nb_sectors;
+ int mult_sectors;
+ int identify_set;
+ uint8_t identify_data[512];
+ int drive_serial;
+ char drive_serial_str[21];
+ char drive_model_str[41];
+ bool win2k_install_hack;
+ uint64_t wwn;
+ /* ide regs */
+ uint8_t feature;
+ uint8_t error;
+ uint32_t nsector;
+ uint8_t sector;
+ uint8_t lcyl;
+ uint8_t hcyl;
+ /* other part of tf for lba48 support */
+ uint8_t hob_feature;
+ uint8_t hob_nsector;
+ uint8_t hob_sector;
+ uint8_t hob_lcyl;
+ uint8_t hob_hcyl;
+
+ uint8_t select;
+ uint8_t status;
+
+ bool io8;
+ bool reset_reverts;
+
+ /* set for lba48 access */
+ uint8_t lba48;
+ BlockBackend *blk;
+ char version[9];
+ /* ATAPI specific */
+ struct unreported_events events;
+ uint8_t sense_key;
+ uint8_t asc;
+ bool tray_open;
+ bool tray_locked;
+ uint8_t cdrom_changed;
+ int packet_transfer_size;
+ int elementary_transfer_size;
+ int32_t io_buffer_index;
+ int lba;
+ int cd_sector_size;
+ int atapi_dma; /* true if dma is requested for the packet cmd */
+ BlockAcctCookie acct;
+ BlockAIOCB *pio_aiocb;
+ QEMUIOVector qiov;
+ QLIST_HEAD(, IDEBufferedRequest) buffered_requests;
+ /* ATA DMA state */
+ uint64_t io_buffer_offset;
+ int32_t io_buffer_size;
+ QEMUSGList sg;
+ /* PIO transfer handling */
+ int req_nb_sectors; /* number of sectors per interrupt */
+ EndTransferFunc *end_transfer_func;
+ uint8_t *data_ptr;
+ uint8_t *data_end;
+ uint8_t *io_buffer;
+ /* PIO save/restore */
+ int32_t io_buffer_total_len;
+ int32_t cur_io_buffer_offset;
+ int32_t cur_io_buffer_len;
+ uint8_t end_transfer_fn_idx;
+ QEMUTimer *sector_write_timer; /* only used for win2k install hack */
+ uint32_t irq_count; /* counts IRQs when using win2k install hack */
+ /* CF-ATA extended error */
+ uint8_t ext_error;
+ /* CF-ATA metadata storage */
+ uint32_t mdata_size;
+ uint8_t *mdata_storage;
+ int media_changed;
+ enum ide_dma_cmd dma_cmd;
+ /* SMART */
+ uint8_t smart_enabled;
+ uint8_t smart_autosave;
+ int smart_errors;
+ uint8_t smart_selftest_count;
+ uint8_t *smart_selftest_data;
+ /* AHCI */
+ int ncq_queues;
+};
+
+struct IDEDeviceClass {
+ DeviceClass parent_class;
+ void (*realize)(IDEDevice *dev, Error **errp);
+};
+
+struct IDEDevice {
+ DeviceState qdev;
+ uint32_t unit;
+ BlockConf conf;
+ int chs_trans;
+ char *version;
+ char *serial;
+ char *model;
+ uint64_t wwn;
+ /*
+ * 0x0000 - rotation rate not reported
+ * 0x0001 - non-rotating medium (SSD)
+ * 0x0002-0x0400 - reserved
+ * 0x0401-0xffe - rotations per minute
+ * 0xffff - reserved
+ */
+ uint16_t rotation_rate;
+ bool win2k_install_hack;
+};
+
+typedef struct IDEDrive {
+ IDEDevice dev;
+} IDEDrive;
+
+#define DEFINE_IDE_DEV_PROPERTIES() \
+ DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), \
+ DEFINE_BLOCK_ERROR_PROPERTIES(IDEDrive, dev.conf), \
+ DEFINE_PROP_STRING("ver", IDEDrive, dev.version), \
+ DEFINE_PROP_UINT64("wwn", IDEDrive, dev.wwn, 0), \
+ DEFINE_PROP_STRING("serial", IDEDrive, dev.serial),\
+ DEFINE_PROP_STRING("model", IDEDrive, dev.model)
+
+void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp);
+
+void ide_drive_get(DriveInfo **hd, int max_bus);
+
+#endif
diff --git a/include/hw/ide/ide-dma.h b/include/hw/ide/ide-dma.h
new file mode 100644
index 0000000000..d0b19ac9c5
--- /dev/null
+++ b/include/hw/ide/ide-dma.h
@@ -0,0 +1,37 @@
+#ifndef HW_IDE_DMA_H
+#define HW_IDE_DMA_H
+
+#include "block/aio.h"
+#include "qemu/iov.h"
+
+typedef struct IDEState IDEState;
+typedef struct IDEDMAOps IDEDMAOps;
+typedef struct IDEDMA IDEDMA;
+
+typedef void DMAStartFunc(const IDEDMA *, IDEState *, BlockCompletionFunc *);
+typedef void DMAVoidFunc(const IDEDMA *);
+typedef int DMAIntFunc(const IDEDMA *, bool);
+typedef int32_t DMAInt32Func(const IDEDMA *, int32_t len);
+typedef void DMAu32Func(const IDEDMA *, uint32_t);
+typedef void DMAStopFunc(const IDEDMA *, bool);
+
+struct IDEDMAOps {
+ DMAStartFunc *start_dma;
+ DMAVoidFunc *pio_transfer;
+ DMAInt32Func *prepare_buf;
+ DMAu32Func *commit_buf;
+ DMAIntFunc *rw_buf;
+ DMAVoidFunc *restart;
+ DMAVoidFunc *restart_dma;
+ DMAStopFunc *set_inactive;
+ DMAVoidFunc *cmd_done;
+ DMAVoidFunc *reset;
+};
+
+struct IDEDMA {
+ const IDEDMAOps *ops;
+ QEMUIOVector qiov;
+ BlockAIOCB *aiocb;
+};
+
+#endif
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
deleted file mode 100644
index 880413ddc7..0000000000
--- a/include/hw/ide/internal.h
+++ /dev/null
@@ -1,649 +0,0 @@
-#ifndef HW_IDE_INTERNAL_H
-#define HW_IDE_INTERNAL_H
-
-/*
- * QEMU IDE Emulation -- internal header file
- * only files in hw/ide/ are supposed to include this file.
- * non-internal declarations are in hw/ide.h
- */
-#include "hw/ide.h"
-#include "hw/isa/isa.h"
-#include "sysemu/dma.h"
-#include "sysemu/sysemu.h"
-#include "hw/block/block.h"
-#include "scsi/constants.h"
-
-/* debug IDE devices */
-#define USE_DMA_CDROM
-
-typedef struct IDEBus IDEBus;
-typedef struct IDEDevice IDEDevice;
-typedef struct IDEState IDEState;
-typedef struct IDEDMA IDEDMA;
-typedef struct IDEDMAOps IDEDMAOps;
-
-#define TYPE_IDE_BUS "IDE"
-#define IDE_BUS(obj) OBJECT_CHECK(IDEBus, (obj), TYPE_IDE_BUS)
-
-/* Bits of HD_STATUS */
-#define ERR_STAT 0x01
-#define INDEX_STAT 0x02
-#define ECC_STAT 0x04 /* Corrected error */
-#define DRQ_STAT 0x08
-#define SEEK_STAT 0x10
-#define SRV_STAT 0x10
-#define WRERR_STAT 0x20
-#define READY_STAT 0x40
-#define BUSY_STAT 0x80
-
-/* Bits for HD_ERROR */
-#define MARK_ERR 0x01 /* Bad address mark */
-#define TRK0_ERR 0x02 /* couldn't find track 0 */
-#define ABRT_ERR 0x04 /* Command aborted */
-#define MCR_ERR 0x08 /* media change request */
-#define ID_ERR 0x10 /* ID field not found */
-#define MC_ERR 0x20 /* media changed */
-#define ECC_ERR 0x40 /* Uncorrectable ECC error */
-#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
-#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
-
-/* Bits of HD_NSECTOR */
-#define CD 0x01
-#define IO 0x02
-#define REL 0x04
-#define TAG_MASK 0xf8
-
-#define IDE_CMD_RESET 0x04
-#define IDE_CMD_DISABLE_IRQ 0x02
-
-/* ACS-2 T13/2015-D Table B.2 Command codes */
-#define WIN_NOP 0x00
-/* reserved 0x01..0x02 */
-#define CFA_REQ_EXT_ERROR_CODE 0x03 /* CFA Request Extended Error Code */
-/* reserved 0x04..0x05 */
-#define WIN_DSM 0x06
-/* reserved 0x07 */
-#define WIN_DEVICE_RESET 0x08
-/* reserved 0x09..0x0a */
-/* REQUEST SENSE DATA EXT 0x0B */
-/* reserved 0x0C..0x0F */
-#define WIN_RECAL 0x10 /* obsolete since ATA4 */
-/* obsolete since ATA3, retired in ATA4 0x11..0x1F */
-#define WIN_READ 0x20 /* 28-Bit */
-#define WIN_READ_ONCE 0x21 /* 28-Bit w/o retries, obsolete since ATA5 */
-/* obsolete since ATA4 0x22..0x23 */
-#define WIN_READ_EXT 0x24 /* 48-Bit */
-#define WIN_READDMA_EXT 0x25 /* 48-Bit */
-#define WIN_READDMA_QUEUED_EXT 0x26 /* 48-Bit, obsolete since ACS2 */
-#define WIN_READ_NATIVE_MAX_EXT 0x27 /* 48-Bit */
-/* reserved 0x28 */
-#define WIN_MULTREAD_EXT 0x29 /* 48-Bit */
-/* READ STREAM DMA EXT 0x2A */
-/* READ STREAM EXT 0x2B */
-/* reserved 0x2C..0x2E */
-/* READ LOG EXT 0x2F */
-#define WIN_WRITE 0x30 /* 28-Bit */
-#define WIN_WRITE_ONCE 0x31 /* 28-Bit w/o retries, obsolete since ATA5 */
-/* obsolete since ATA4 0x32..0x33 */
-#define WIN_WRITE_EXT 0x34 /* 48-Bit */
-#define WIN_WRITEDMA_EXT 0x35 /* 48-Bit */
-#define WIN_WRITEDMA_QUEUED_EXT 0x36 /* 48-Bit */
-#define WIN_SET_MAX_EXT 0x37 /* 48-Bit, obsolete since ACS2 */
-#define WIN_SET_MAX_EXT 0x37 /* 48-Bit */
-#define CFA_WRITE_SECT_WO_ERASE 0x38 /* CFA Write Sectors without erase */
-#define WIN_MULTWRITE_EXT 0x39 /* 48-Bit */
-/* WRITE STREAM DMA EXT 0x3A */
-/* WRITE STREAM EXT 0x3B */
-#define WIN_WRITE_VERIFY 0x3C /* 28-Bit, obsolete since ATA4 */
-/* WRITE DMA FUA EXT 0x3D */
-/* obsolete since ACS2 0x3E */
-/* WRITE LOG EXT 0x3F */
-#define WIN_VERIFY 0x40 /* 28-Bit - Read Verify Sectors */
-#define WIN_VERIFY_ONCE 0x41 /* 28-Bit - w/o retries, obsolete since ATA5 */
-#define WIN_VERIFY_EXT 0x42 /* 48-Bit */
-/* reserved 0x43..0x44 */
-/* WRITE UNCORRECTABLE EXT 0x45 */
-/* reserved 0x46 */
-/* READ LOG DMA EXT 0x47 */
-/* reserved 0x48..0x4F */
-/* obsolete since ATA4 0x50 */
-/* CONFIGURE STREAM 0x51 */
-/* reserved 0x52..0x56 */
-/* WRITE LOG DMA EXT 0x57 */
-/* reserved 0x58..0x5A */
-/* TRUSTED NON DATA 0x5B */
-/* TRUSTED RECEIVE 0x5C */
-/* TRUSTED RECEIVE DMA 0x5D */
-/* TRUSTED SEND 0x5E */
-/* TRUSTED SEND DMA 0x5F */
-/* READ FPDMA QUEUED 0x60 */
-/* WRITE FPDMA QUEUED 0x61 */
-/* reserved 0x62->0x6F */
-#define WIN_SEEK 0x70 /* obsolete since ATA7 */
-/* reserved 0x71-0x7F */
-/* vendor specific 0x80-0x86 */
-#define CFA_TRANSLATE_SECTOR 0x87 /* CFA Translate Sector */
-/* vendor specific 0x88-0x8F */
-#define WIN_DIAGNOSE 0x90
-#define WIN_SPECIFY 0x91 /* set drive geometry translation, obsolete since ATA6 */
-#define WIN_DOWNLOAD_MICROCODE 0x92
-/* DOWNLOAD MICROCODE DMA 0x93 */
-#define WIN_STANDBYNOW2 0x94 /* retired in ATA4 */
-#define WIN_IDLEIMMEDIATE2 0x95 /* force drive to become "ready", retired in ATA4 */
-#define WIN_STANDBY2 0x96 /* retired in ATA4 */
-#define WIN_SETIDLE2 0x97 /* retired in ATA4 */
-#define WIN_CHECKPOWERMODE2 0x98 /* retired in ATA4 */
-#define WIN_SLEEPNOW2 0x99 /* retired in ATA4 */
-/* vendor specific 0x9A */
-/* reserved 0x9B..0x9F */
-#define WIN_PACKETCMD 0xA0 /* Send a packet command. */
-#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */
-#define WIN_QUEUED_SERVICE 0xA2 /* obsolete since ACS2 */
-/* reserved 0xA3..0xAF */
-#define WIN_SMART 0xB0 /* self-monitoring and reporting */
-/* Device Configuration Overlay 0xB1 */
-/* reserved 0xB2..0xB3 */
-/* Sanitize Device 0xB4 */
-/* reserved 0xB5 */
-/* NV Cache 0xB6 */
-/* reserved for CFA 0xB7..0xBB */
-#define CFA_ACCESS_METADATA_STORAGE 0xB8
-/* reserved 0xBC..0xBF */
-#define CFA_ERASE_SECTORS 0xC0 /* microdrives implement as NOP */
-/* vendor specific 0xC1..0xC3 */
-#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/
-#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */
-#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */
-#define WIN_READDMA_QUEUED 0xC7 /* read sectors using Queued DMA transfers, obsolete since ACS2 */
-#define WIN_READDMA 0xC8 /* read sectors using DMA transfers */
-#define WIN_READDMA_ONCE 0xC9 /* 28-Bit - w/o retries, obsolete since ATA5 */
-#define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */
-#define WIN_WRITEDMA_ONCE 0xCB /* 28-Bit - w/o retries, obsolete since ATA5 */
-#define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers, obsolete since ACS2 */
-#define CFA_WRITE_MULTI_WO_ERASE 0xCD /* CFA Write multiple without erase */
-/* WRITE MULTIPLE FUA EXT 0xCE */
-/* reserved 0xCF..0xDO */
-/* CHECK MEDIA CARD TYPE 0xD1 */
-/* reserved for media card pass through 0xD2..0xD4 */
-/* reserved 0xD5..0xD9 */
-#define WIN_GETMEDIASTATUS 0xDA /* obsolete since ATA8 */
-/* obsolete since ATA3, retired in ATA4 0xDB..0xDD */
-#define WIN_DOORLOCK 0xDE /* lock door on removable drives, obsolete since ATA8 */
-#define WIN_DOORUNLOCK 0xDF /* unlock door on removable drives, obsolete since ATA8 */
-#define WIN_STANDBYNOW1 0xE0
-#define WIN_IDLEIMMEDIATE 0xE1 /* force drive to become "ready" */
-#define WIN_STANDBY 0xE2 /* Set device in Standby Mode */
-#define WIN_SETIDLE1 0xE3
-#define WIN_READ_BUFFER 0xE4 /* force read only 1 sector */
-#define WIN_CHECKPOWERMODE1 0xE5
-#define WIN_SLEEPNOW1 0xE6
-#define WIN_FLUSH_CACHE 0xE7
-#define WIN_WRITE_BUFFER 0xE8 /* force write only 1 sector */
-/* READ BUFFER DMA 0xE9 */
-#define WIN_FLUSH_CACHE_EXT 0xEA /* 48-Bit */
-/* WRITE BUFFER DMA 0xEB */
-#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */
-#define WIN_MEDIAEJECT 0xED /* obsolete since ATA8 */
-/* obsolete since ATA4 0xEE */
-#define WIN_SETFEATURES 0xEF /* set special drive features */
-#define IBM_SENSE_CONDITION 0xF0 /* measure disk temperature, vendor specific */
-#define WIN_SECURITY_SET_PASS 0xF1
-#define WIN_SECURITY_UNLOCK 0xF2
-#define WIN_SECURITY_ERASE_PREPARE 0xF3
-#define WIN_SECURITY_ERASE_UNIT 0xF4
-#define WIN_SECURITY_FREEZE_LOCK 0xF5
-#define CFA_WEAR_LEVEL 0xF5 /* microdrives implement as NOP; not specified in T13! */
-#define WIN_SECURITY_DISABLE 0xF6
-/* vendor specific 0xF7 */
-#define WIN_READ_NATIVE_MAX 0xF8 /* return the native maximum address */
-#define WIN_SET_MAX 0xF9
-/* vendor specific 0xFA..0xFF */
-
-/* set to 1 set disable mult support */
-#define MAX_MULT_SECTORS 16
-
-#define IDE_DMA_BUF_SECTORS 256
-
-/* feature values for Data Set Management */
-#define DSM_TRIM 0x01
-
-#if (IDE_DMA_BUF_SECTORS < MAX_MULT_SECTORS)
-#error "IDE_DMA_BUF_SECTORS must be bigger or equal to MAX_MULT_SECTORS"
-#endif
-
-/* ATAPI defines */
-
-#define ATAPI_PACKET_SIZE 12
-
-/* The generic packet command opcodes for CD/DVD Logical Units,
- * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */
-#define GPCMD_BLANK 0xa1
-#define GPCMD_CLOSE_TRACK 0x5b
-#define GPCMD_FLUSH_CACHE 0x35
-#define GPCMD_FORMAT_UNIT 0x04
-#define GPCMD_GET_CONFIGURATION 0x46
-#define GPCMD_GET_EVENT_STATUS_NOTIFICATION 0x4a
-#define GPCMD_GET_PERFORMANCE 0xac
-#define GPCMD_INQUIRY 0x12
-#define GPCMD_LOAD_UNLOAD 0xa6
-#define GPCMD_MECHANISM_STATUS 0xbd
-#define GPCMD_MODE_SELECT_10 0x55
-#define GPCMD_MODE_SENSE_10 0x5a
-#define GPCMD_PAUSE_RESUME 0x4b
-#define GPCMD_PLAY_AUDIO_10 0x45
-#define GPCMD_PLAY_AUDIO_MSF 0x47
-#define GPCMD_PLAY_AUDIO_TI 0x48
-#define GPCMD_PLAY_CD 0xbc
-#define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
-#define GPCMD_READ_10 0x28
-#define GPCMD_READ_12 0xa8
-#define GPCMD_READ_CDVD_CAPACITY 0x25
-#define GPCMD_READ_CD 0xbe
-#define GPCMD_READ_CD_MSF 0xb9
-#define GPCMD_READ_DISC_INFO 0x51
-#define GPCMD_READ_DVD_STRUCTURE 0xad
-#define GPCMD_READ_FORMAT_CAPACITIES 0x23
-#define GPCMD_READ_HEADER 0x44
-#define GPCMD_READ_TRACK_RZONE_INFO 0x52
-#define GPCMD_READ_SUBCHANNEL 0x42
-#define GPCMD_READ_TOC_PMA_ATIP 0x43
-#define GPCMD_REPAIR_RZONE_TRACK 0x58
-#define GPCMD_REPORT_KEY 0xa4
-#define GPCMD_REQUEST_SENSE 0x03
-#define GPCMD_RESERVE_RZONE_TRACK 0x53
-#define GPCMD_SCAN 0xba
-#define GPCMD_SEEK 0x2b
-#define GPCMD_SEND_DVD_STRUCTURE 0xad
-#define GPCMD_SEND_EVENT 0xa2
-#define GPCMD_SEND_KEY 0xa3
-#define GPCMD_SEND_OPC 0x54
-#define GPCMD_SET_READ_AHEAD 0xa7
-#define GPCMD_SET_STREAMING 0xb6
-#define GPCMD_START_STOP_UNIT 0x1b
-#define GPCMD_STOP_PLAY_SCAN 0x4e
-#define GPCMD_TEST_UNIT_READY 0x00
-#define GPCMD_VERIFY_10 0x2f
-#define GPCMD_WRITE_10 0x2a
-#define GPCMD_WRITE_AND_VERIFY_10 0x2e
-/* This is listed as optional in ATAPI 2.6, but is (curiously)
- * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji
- * Table 377 as an MMC command for SCSi devices though... Most ATAPI
- * drives support it. */
-#define GPCMD_SET_SPEED 0xbb
-/* This seems to be a SCSI specific CD-ROM opcode
- * to play data at track/index */
-#define GPCMD_PLAYAUDIO_TI 0x48
-/*
- * From MS Media Status Notification Support Specification. For
- * older drives only.
- */
-#define GPCMD_GET_MEDIA_STATUS 0xda
-#define GPCMD_MODE_SENSE_6 0x1a
-
-#define ATAPI_INT_REASON_CD 0x01 /* 0 = data transfer */
-#define ATAPI_INT_REASON_IO 0x02 /* 1 = transfer to the host */
-#define ATAPI_INT_REASON_REL 0x04
-#define ATAPI_INT_REASON_TAG 0xf8
-
-/* same constants as bochs */
-#define ASC_NO_SEEK_COMPLETE 0x02
-#define ASC_ILLEGAL_OPCODE 0x20
-#define ASC_LOGICAL_BLOCK_OOR 0x21
-#define ASC_INV_FIELD_IN_CMD_PACKET 0x24
-#define ASC_MEDIUM_MAY_HAVE_CHANGED 0x28
-#define ASC_INCOMPATIBLE_FORMAT 0x30
-#define ASC_MEDIUM_NOT_PRESENT 0x3a
-#define ASC_SAVING_PARAMETERS_NOT_SUPPORTED 0x39
-#define ASC_DATA_PHASE_ERROR 0x4b
-#define ASC_MEDIA_REMOVAL_PREVENTED 0x53
-
-#define CFA_NO_ERROR 0x00
-#define CFA_MISC_ERROR 0x09
-#define CFA_INVALID_COMMAND 0x20
-#define CFA_INVALID_ADDRESS 0x21
-#define CFA_ADDRESS_OVERFLOW 0x2f
-
-#define SMART_READ_DATA 0xd0
-#define SMART_READ_THRESH 0xd1
-#define SMART_ATTR_AUTOSAVE 0xd2
-#define SMART_SAVE_ATTR 0xd3
-#define SMART_EXECUTE_OFFLINE 0xd4
-#define SMART_READ_LOG 0xd5
-#define SMART_WRITE_LOG 0xd6
-#define SMART_ENABLE 0xd8
-#define SMART_DISABLE 0xd9
-#define SMART_STATUS 0xda
-
-typedef enum { IDE_HD, IDE_CD, IDE_CFATA } IDEDriveKind;
-
-typedef void EndTransferFunc(IDEState *);
-
-typedef void DMAStartFunc(IDEDMA *, IDEState *, BlockCompletionFunc *);
-typedef void DMAVoidFunc(IDEDMA *);
-typedef int DMAIntFunc(IDEDMA *, int);
-typedef int32_t DMAInt32Func(IDEDMA *, int32_t len);
-typedef void DMAu32Func(IDEDMA *, uint32_t);
-typedef void DMAStopFunc(IDEDMA *, bool);
-typedef void DMARestartFunc(void *, int, RunState);
-
-struct unreported_events {
- bool eject_request;
- bool new_media;
-};
-
-enum ide_dma_cmd {
- IDE_DMA_READ = 0,
- IDE_DMA_WRITE,
- IDE_DMA_TRIM,
- IDE_DMA_ATAPI,
- IDE_DMA__COUNT
-};
-
-extern const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT];
-
-#define ide_cmd_is_read(s) \
- ((s)->dma_cmd == IDE_DMA_READ)
-
-typedef struct IDEBufferedRequest {
- QLIST_ENTRY(IDEBufferedRequest) list;
- struct iovec iov;
- QEMUIOVector qiov;
- QEMUIOVector *original_qiov;
- BlockCompletionFunc *original_cb;
- void *original_opaque;
- bool orphaned;
-} IDEBufferedRequest;
-
-/* NOTE: IDEState represents in fact one drive */
-struct IDEState {
- IDEBus *bus;
- uint8_t unit;
- /* ide config */
- IDEDriveKind drive_kind;
- int cylinders, heads, sectors, chs_trans;
- int64_t nb_sectors;
- int mult_sectors;
- int identify_set;
- uint8_t identify_data[512];
- int drive_serial;
- char drive_serial_str[21];
- char drive_model_str[41];
- uint64_t wwn;
- /* ide regs */
- uint8_t feature;
- uint8_t error;
- uint32_t nsector;
- uint8_t sector;
- uint8_t lcyl;
- uint8_t hcyl;
- /* other part of tf for lba48 support */
- uint8_t hob_feature;
- uint8_t hob_nsector;
- uint8_t hob_sector;
- uint8_t hob_lcyl;
- uint8_t hob_hcyl;
-
- uint8_t select;
- uint8_t status;
-
- /* set for lba48 access */
- uint8_t lba48;
- BlockBackend *blk;
- char version[9];
- /* ATAPI specific */
- struct unreported_events events;
- uint8_t sense_key;
- uint8_t asc;
- bool tray_open;
- bool tray_locked;
- uint8_t cdrom_changed;
- int packet_transfer_size;
- int elementary_transfer_size;
- int32_t io_buffer_index;
- int lba;
- int cd_sector_size;
- int atapi_dma; /* true if dma is requested for the packet cmd */
- BlockAcctCookie acct;
- BlockAIOCB *pio_aiocb;
- struct iovec iov;
- QEMUIOVector qiov;
- QLIST_HEAD(, IDEBufferedRequest) buffered_requests;
- /* ATA DMA state */
- uint64_t io_buffer_offset;
- int32_t io_buffer_size;
- QEMUSGList sg;
- /* PIO transfer handling */
- int req_nb_sectors; /* number of sectors per interrupt */
- EndTransferFunc *end_transfer_func;
- uint8_t *data_ptr;
- uint8_t *data_end;
- uint8_t *io_buffer;
- /* PIO save/restore */
- int32_t io_buffer_total_len;
- int32_t cur_io_buffer_offset;
- int32_t cur_io_buffer_len;
- uint8_t end_transfer_fn_idx;
- QEMUTimer *sector_write_timer; /* only used for win2k install hack */
- uint32_t irq_count; /* counts IRQs when using win2k install hack */
- /* CF-ATA extended error */
- uint8_t ext_error;
- /* CF-ATA metadata storage */
- uint32_t mdata_size;
- uint8_t *mdata_storage;
- int media_changed;
- enum ide_dma_cmd dma_cmd;
- /* SMART */
- uint8_t smart_enabled;
- uint8_t smart_autosave;
- int smart_errors;
- uint8_t smart_selftest_count;
- uint8_t *smart_selftest_data;
- /* AHCI */
- int ncq_queues;
-};
-
-struct IDEDMAOps {
- DMAStartFunc *start_dma;
- DMAVoidFunc *pio_transfer;
- DMAInt32Func *prepare_buf;
- DMAu32Func *commit_buf;
- DMAIntFunc *rw_buf;
- DMAVoidFunc *restart;
- DMAVoidFunc *restart_dma;
- DMAStopFunc *set_inactive;
- DMAVoidFunc *cmd_done;
- DMAVoidFunc *reset;
-};
-
-struct IDEDMA {
- const struct IDEDMAOps *ops;
- struct iovec iov;
- QEMUIOVector qiov;
- BlockAIOCB *aiocb;
-};
-
-struct IDEBus {
- BusState qbus;
- IDEDevice *master;
- IDEDevice *slave;
- IDEState ifs[2];
- QEMUBH *bh;
-
- int bus_id;
- int max_units;
- IDEDMA *dma;
- uint8_t unit;
- uint8_t cmd;
- qemu_irq irq;
-
- int error_status;
- uint8_t retry_unit;
- int64_t retry_sector_num;
- uint32_t retry_nsector;
- PortioList portio_list;
- PortioList portio2_list;
- VMChangeStateEntry *vmstate;
-};
-
-#define TYPE_IDE_DEVICE "ide-device"
-#define IDE_DEVICE(obj) \
- OBJECT_CHECK(IDEDevice, (obj), TYPE_IDE_DEVICE)
-#define IDE_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(IDEDeviceClass, (klass), TYPE_IDE_DEVICE)
-#define IDE_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IDEDeviceClass, (obj), TYPE_IDE_DEVICE)
-
-typedef struct IDEDeviceClass {
- DeviceClass parent_class;
- void (*realize)(IDEDevice *dev, Error **errp);
-} IDEDeviceClass;
-
-struct IDEDevice {
- DeviceState qdev;
- uint32_t unit;
- BlockConf conf;
- int chs_trans;
- char *version;
- char *serial;
- char *model;
- uint64_t wwn;
- /*
- * 0x0000 - rotation rate not reported
- * 0x0001 - non-rotating medium (SSD)
- * 0x0002-0x0400 - reserved
- * 0x0401-0xffe - rotations per minute
- * 0xffff - reserved
- */
- uint16_t rotation_rate;
-};
-
-/* These are used for the error_status field of IDEBus */
-#define IDE_RETRY_MASK 0xf8
-#define IDE_RETRY_DMA 0x08
-#define IDE_RETRY_PIO 0x10
-#define IDE_RETRY_ATAPI 0x20 /* reused IDE_RETRY_READ bit */
-#define IDE_RETRY_READ 0x20
-#define IDE_RETRY_FLUSH 0x40
-#define IDE_RETRY_TRIM 0x80
-#define IDE_RETRY_HBA 0x100
-
-#define IS_IDE_RETRY_DMA(_status) \
- ((_status) & IDE_RETRY_DMA)
-
-#define IS_IDE_RETRY_PIO(_status) \
- ((_status) & IDE_RETRY_PIO)
-
-/*
- * The method of the IDE_RETRY_ATAPI determination is to use a previously
- * impossible bit combination as a new status value.
- */
-#define IS_IDE_RETRY_ATAPI(_status) \
- (((_status) & IDE_RETRY_MASK) == IDE_RETRY_ATAPI)
-
-static inline uint8_t ide_dma_cmd_to_retry(uint8_t dma_cmd)
-{
- switch (dma_cmd) {
- case IDE_DMA_READ:
- return IDE_RETRY_DMA | IDE_RETRY_READ;
- case IDE_DMA_WRITE:
- return IDE_RETRY_DMA;
- case IDE_DMA_TRIM:
- return IDE_RETRY_DMA | IDE_RETRY_TRIM;
- case IDE_DMA_ATAPI:
- return IDE_RETRY_ATAPI;
- default:
- break;
- }
- return 0;
-}
-
-static inline IDEState *idebus_active_if(IDEBus *bus)
-{
- return bus->ifs + bus->unit;
-}
-
-static inline void ide_set_irq(IDEBus *bus)
-{
- if (!(bus->cmd & IDE_CMD_DISABLE_IRQ)) {
- qemu_irq_raise(bus->irq);
- }
-}
-
-/* hw/ide/core.c */
-extern const VMStateDescription vmstate_ide_bus;
-
-#define VMSTATE_IDE_BUS(_field, _state) \
- VMSTATE_STRUCT(_field, _state, 1, vmstate_ide_bus, IDEBus)
-
-#define VMSTATE_IDE_BUS_ARRAY(_field, _state, _num) \
- VMSTATE_STRUCT_ARRAY(_field, _state, _num, 1, vmstate_ide_bus, IDEBus)
-
-extern const VMStateDescription vmstate_ide_drive;
-
-#define VMSTATE_IDE_DRIVES(_field, _state) \
- VMSTATE_STRUCT_ARRAY(_field, _state, 2, 3, vmstate_ide_drive, IDEState)
-
-#define VMSTATE_IDE_DRIVE(_field, _state) \
- VMSTATE_STRUCT(_field, _state, 1, vmstate_ide_drive, IDEState)
-
-void ide_bus_reset(IDEBus *bus);
-int64_t ide_get_sector(IDEState *s);
-void ide_set_sector(IDEState *s, int64_t sector_num);
-
-void ide_start_dma(IDEState *s, BlockCompletionFunc *cb);
-void dma_buf_commit(IDEState *s, uint32_t tx_bytes);
-void ide_dma_error(IDEState *s);
-void ide_abort_command(IDEState *s);
-
-void ide_atapi_cmd_ok(IDEState *s);
-void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc);
-void ide_atapi_dma_restart(IDEState *s);
-void ide_atapi_io_error(IDEState *s, int ret);
-
-void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val);
-uint32_t ide_ioport_read(void *opaque, uint32_t addr1);
-uint32_t ide_status_read(void *opaque, uint32_t addr);
-void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val);
-void ide_data_writew(void *opaque, uint32_t addr, uint32_t val);
-uint32_t ide_data_readw(void *opaque, uint32_t addr);
-void ide_data_writel(void *opaque, uint32_t addr, uint32_t val);
-uint32_t ide_data_readl(void *opaque, uint32_t addr);
-
-int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
- const char *version, const char *serial, const char *model,
- uint64_t wwn,
- uint32_t cylinders, uint32_t heads, uint32_t secs,
- int chs_trans, Error **errp);
-void ide_init2(IDEBus *bus, qemu_irq irq);
-void ide_exit(IDEState *s);
-void ide_init_ioport(IDEBus *bus, ISADevice *isa, int iobase, int iobase2);
-void ide_register_restart_cb(IDEBus *bus);
-
-void ide_exec_cmd(IDEBus *bus, uint32_t val);
-
-void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
- EndTransferFunc *end_transfer_func);
-bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
- EndTransferFunc *end_transfer_func);
-void ide_transfer_stop(IDEState *s);
-void ide_set_inactive(IDEState *s, bool more);
-BlockAIOCB *ide_issue_trim(
- int64_t offset, QEMUIOVector *qiov,
- BlockCompletionFunc *cb, void *cb_opaque, void *opaque);
-BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque);
-void ide_cancel_dma_sync(IDEState *s);
-
-/* hw/ide/atapi.c */
-void ide_atapi_cmd(IDEState *s);
-void ide_atapi_cmd_reply_end(IDEState *s);
-
-/* hw/ide/qdev.c */
-void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
- int bus_id, int max_units);
-IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive);
-
-int ide_handle_rw_error(IDEState *s, int error, int op);
-
-#endif /* HW_IDE_INTERNAL_H */
diff --git a/include/hw/ide/isa.h b/include/hw/ide/isa.h
new file mode 100644
index 0000000000..1cd0ff1fa6
--- /dev/null
+++ b/include/hw/ide/isa.h
@@ -0,0 +1,20 @@
+/*
+ * QEMU IDE Emulation: ISA Bus support.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#ifndef HW_IDE_ISA_H
+#define HW_IDE_ISA_H
+
+#include "qom/object.h"
+
+#define TYPE_ISA_IDE "isa-ide"
+OBJECT_DECLARE_SIMPLE_TYPE(ISAIDEState, ISA_IDE)
+
+ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int irqnum,
+ DriveInfo *hd0, DriveInfo *hd1);
+
+#endif
diff --git a/include/hw/ide/mmio.h b/include/hw/ide/mmio.h
new file mode 100644
index 0000000000..d726a49848
--- /dev/null
+++ b/include/hw/ide/mmio.h
@@ -0,0 +1,26 @@
+/*
+ * QEMU IDE Emulation: mmio support (for embedded).
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef HW_IDE_MMIO_H
+#define HW_IDE_MMIO_H
+
+#include "qom/object.h"
+
+/*
+ * QEMU interface:
+ * + sysbus IRQ 0: asserted by the IDE channel
+ * + sysbus MMIO region 0: data registers
+ * + sysbus MMIO region 1: status & control registers
+ */
+#define TYPE_MMIO_IDE "mmio-ide"
+OBJECT_DECLARE_SIMPLE_TYPE(MMIOIDEState, MMIO_IDE)
+
+void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1);
+
+#endif
diff --git a/include/hw/ide/pci.h b/include/hw/ide/pci.h
index dbc6a0383d..ef03764caa 100644
--- a/include/hw/ide/pci.h
+++ b/include/hw/ide/pci.h
@@ -1,7 +1,9 @@
#ifndef HW_IDE_PCI_H
#define HW_IDE_PCI_H
-#include "hw/ide/internal.h"
+#include "hw/ide/ide-bus.h"
+#include "hw/pci/pci_device.h"
+#include "qom/object.h"
#define BM_STATUS_DMAING 0x01
#define BM_STATUS_ERROR 0x02
@@ -37,40 +39,31 @@ typedef struct BMDMAState {
struct PCIIDEState *pci_dev;
} BMDMAState;
-typedef struct CMD646BAR {
- MemoryRegion cmd;
- MemoryRegion data;
- IDEBus *bus;
- struct PCIIDEState *pci_dev;
-} CMD646BAR;
-
#define TYPE_PCI_IDE "pci-ide"
-#define PCI_IDE(obj) OBJECT_CHECK(PCIIDEState, (obj), TYPE_PCI_IDE)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIIDEState, PCI_IDE)
-typedef struct PCIIDEState {
+struct PCIIDEState {
/*< private >*/
PCIDevice parent_obj;
/*< public >*/
IDEBus bus[2];
BMDMAState bmdma[2];
+ qemu_irq isa_irq[2];
uint32_t secondary; /* used only for cmd646 */
MemoryRegion bmdma_bar;
- CMD646BAR cmd646_bar[2]; /* used only for cmd646 */
-} PCIIDEState;
-
-
-static inline IDEState *bmdma_active_if(BMDMAState *bmdma)
-{
- assert(bmdma->bus->retry_unit != (uint8_t)-1);
- return bmdma->bus->ifs + bmdma->bus->retry_unit;
-}
-
+ MemoryRegion cmd_bar[2];
+ MemoryRegion data_bar[2];
+};
void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d);
void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val);
+void bmdma_status_writeb(BMDMAState *bm, uint32_t val);
extern MemoryRegionOps bmdma_addr_ioport_ops;
-void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table);
+void pci_ide_create_devs(PCIDevice *dev);
+void pci_ide_update_mode(PCIIDEState *s);
extern const VMStateDescription vmstate_ide_pci;
+extern const MemoryRegionOps pci_ide_cmd_le_ops;
+extern const MemoryRegionOps pci_ide_data_le_ops;
#endif
diff --git a/include/hw/ide/piix.h b/include/hw/ide/piix.h
new file mode 100644
index 0000000000..ef3ef3d62d
--- /dev/null
+++ b/include/hw/ide/piix.h
@@ -0,0 +1,7 @@
+#ifndef HW_IDE_PIIX_H
+#define HW_IDE_PIIX_H
+
+#define TYPE_PIIX3_IDE "piix3-ide"
+#define TYPE_PIIX4_IDE "piix4-ide"
+
+#endif /* HW_IDE_PIIX_H */
diff --git a/include/hw/input/adb.h b/include/hw/input/adb.h
index f99d478252..20fced15f7 100644
--- a/include/hw/input/adb.h
+++ b/include/hw/input/adb.h
@@ -26,21 +26,23 @@
#ifndef ADB_H
#define ADB_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
#define MAX_ADB_DEVICES 16
#define ADB_MAX_OUT_LEN 16
-typedef struct ADBBusState ADBBusState;
typedef struct ADBDevice ADBDevice;
/* buf = NULL means polling */
typedef int ADBDeviceRequest(ADBDevice *d, uint8_t *buf_out,
const uint8_t *buf, int len);
+typedef bool ADBDeviceHasData(ADBDevice *d);
+
#define TYPE_ADB_DEVICE "adb-device"
-#define ADB_DEVICE(obj) OBJECT_CHECK(ADBDevice, (obj), TYPE_ADB_DEVICE)
+OBJECT_DECLARE_TYPE(ADBDevice, ADBDeviceClass, ADB_DEVICE)
struct ADBDevice {
/*< private >*/
@@ -49,24 +51,23 @@ struct ADBDevice {
int devaddr;
int handler;
- bool disable_direct_reg3_writes;
};
-#define ADB_DEVICE_CLASS(cls) \
- OBJECT_CLASS_CHECK(ADBDeviceClass, (cls), TYPE_ADB_DEVICE)
-#define ADB_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ADBDeviceClass, (obj), TYPE_ADB_DEVICE)
-typedef struct ADBDeviceClass {
+struct ADBDeviceClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
ADBDeviceRequest *devreq;
-} ADBDeviceClass;
+ ADBDeviceHasData *devhasdata;
+};
#define TYPE_ADB_BUS "apple-desktop-bus"
-#define ADB_BUS(obj) OBJECT_CHECK(ADBBusState, (obj), TYPE_ADB_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(ADBBusState, ADB_BUS)
+
+#define ADB_STATUS_BUSTIMEOUT 0x1
+#define ADB_STATUS_POLLREPLY 0x2
struct ADBBusState {
/*< private >*/
@@ -74,14 +75,33 @@ struct ADBBusState {
/*< public >*/
ADBDevice *devices[MAX_ADB_DEVICES];
+ uint16_t pending;
int nb_devices;
int poll_index;
+ uint8_t status;
+
+ QEMUTimer *autopoll_timer;
+ bool autopoll_enabled;
+ bool autopoll_blocked;
+ uint8_t autopoll_rate_ms;
+ uint16_t autopoll_mask;
+ void (*autopoll_cb)(void *opaque);
+ void *autopoll_cb_opaque;
};
int adb_request(ADBBusState *s, uint8_t *buf_out,
const uint8_t *buf, int len);
int adb_poll(ADBBusState *s, uint8_t *buf_out, uint16_t poll_mask);
+void adb_autopoll_block(ADBBusState *s);
+void adb_autopoll_unblock(ADBBusState *s);
+
+void adb_set_autopoll_enabled(ADBBusState *s, bool enabled);
+void adb_set_autopoll_rate_ms(ADBBusState *s, int rate_ms);
+void adb_set_autopoll_mask(ADBBusState *s, uint16_t mask);
+void adb_register_autopoll_callback(ADBBusState *s, void (*cb)(void *opaque),
+ void *opaque);
+
#define TYPE_ADB_KEYBOARD "adb-keyboard"
#define TYPE_ADB_MOUSE "adb-mouse"
diff --git a/include/hw/input/hid.h b/include/hw/input/hid.h
index 2127c7ce45..6a9d7bf466 100644
--- a/include/hw/input/hid.h
+++ b/include/hw/input/hid.h
@@ -1,7 +1,6 @@
#ifndef QEMU_HID_H
#define QEMU_HID_H
-#include "migration/vmstate.h"
#include "ui/input.h"
#define HID_MOUSE 1
diff --git a/include/hw/input/i8042.h b/include/hw/input/i8042.h
index f6ff146364..e90f008b66 100644
--- a/include/hw/input/i8042.h
+++ b/include/hw/input/i8042.h
@@ -8,17 +8,101 @@
#ifndef HW_INPUT_I8042_H
#define HW_INPUT_I8042_H
-#include "hw/hw.h"
#include "hw/isa/isa.h"
+#include "hw/sysbus.h"
+#include "hw/input/ps2.h"
+#include "qom/object.h"
+
+#define I8042_KBD_IRQ 0
+#define I8042_MOUSE_IRQ 1
+
+typedef struct KBDState {
+ uint8_t write_cmd; /* if non zero, write data to port 60 is expected */
+ uint8_t status;
+ uint8_t mode;
+ uint8_t outport;
+ uint32_t migration_flags;
+ uint32_t obsrc;
+ bool outport_present;
+ bool extended_state;
+ bool extended_state_loaded;
+ /* Bitmask of devices with data available. */
+ uint8_t pending;
+ uint8_t obdata;
+ uint8_t cbdata;
+ uint8_t pending_tmp;
+ PS2KbdState ps2kbd;
+ PS2MouseState ps2mouse;
+ QEMUTimer *throttle_timer;
+
+ qemu_irq irqs[2];
+ qemu_irq a20_out;
+ hwaddr mask;
+} KBDState;
+
+/*
+ * QEMU interface:
+ * + Named GPIO input "ps2-kbd-input-irq": set to 1 if the downstream PS2
+ * keyboard device has asserted its irq
+ * + Named GPIO input "ps2-mouse-input-irq": set to 1 if the downstream PS2
+ * mouse device has asserted its irq
+ * + Named GPIO output "a20": A20 line for x86 PCs
+ * + Unnamed GPIO output 0-1: i8042 output irqs for keyboard (0) or mouse (1)
+ */
#define TYPE_I8042 "i8042"
+OBJECT_DECLARE_SIMPLE_TYPE(ISAKBDState, I8042)
+
+struct ISAKBDState {
+ ISADevice parent_obj;
+
+ KBDState kbd;
+ bool kbd_throttle;
+ MemoryRegion io[2];
+ uint8_t kbd_irq;
+ uint8_t mouse_irq;
+};
+
+/*
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion defining the command/status/data
+ * registers (access determined by mask property and access type)
+ * + Named GPIO input "ps2-kbd-input-irq": set to 1 if the downstream PS2
+ * keyboard device has asserted its irq
+ * + Named GPIO input "ps2-mouse-input-irq": set to 1 if the downstream PS2
+ * mouse device has asserted its irq
+ * + Unnamed GPIO output 0-1: i8042 output irqs for keyboard (0) or mouse (1)
+ */
+
+#define TYPE_I8042_MMIO "i8042-mmio"
+OBJECT_DECLARE_SIMPLE_TYPE(MMIOKBDState, I8042_MMIO)
+
+struct MMIOKBDState {
+ SysBusDevice parent_obj;
+
+ KBDState kbd;
+ uint32_t size;
+ MemoryRegion region;
+};
#define I8042_A20_LINE "a20"
-void i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq,
- MemoryRegion *region, ram_addr_t size,
- hwaddr mask);
-void i8042_isa_mouse_fake_event(void *opaque);
-void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out);
+
+void i8042_isa_mouse_fake_event(ISAKBDState *isa);
+
+static inline bool i8042_present(void)
+{
+ bool amb = false;
+ return object_resolve_path_type("", TYPE_I8042, &amb) || amb;
+}
+
+/*
+ * ACPI v2, Table 5-10 - Fixed ACPI Description Table Boot Architecture
+ * Flags, bit offset 1 - 8042.
+ */
+static inline uint16_t iapc_boot_arch_8042(void)
+{
+ return i8042_present() ? 0x1 << 1 : 0x0 ;
+}
#endif /* HW_INPUT_I8042_H */
diff --git a/include/hw/input/lasips2.h b/include/hw/input/lasips2.h
new file mode 100644
index 0000000000..01911c50f9
--- /dev/null
+++ b/include/hw/input/lasips2.h
@@ -0,0 +1,80 @@
+/*
+ * QEMU LASI PS/2 emulation
+ *
+ * Copyright (c) 2019 Sven Schnelle
+ *
+ */
+
+/*
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion defining the LASI PS2 keyboard
+ * registers
+ * + sysbus MMIO region 1: MemoryRegion defining the LASI PS2 mouse
+ * registers
+ * + sysbus IRQ 0: LASI PS2 output irq
+ * + Named GPIO input "lasips2-port-input-irq[0..1]": set to 1 if the downstream
+ * LASIPS2Port has asserted its irq
+ */
+
+#ifndef HW_INPUT_LASIPS2_H
+#define HW_INPUT_LASIPS2_H
+
+#include "exec/hwaddr.h"
+#include "hw/sysbus.h"
+#include "hw/input/ps2.h"
+
+#define TYPE_LASIPS2_PORT "lasips2-port"
+OBJECT_DECLARE_TYPE(LASIPS2Port, LASIPS2PortDeviceClass, LASIPS2_PORT)
+
+struct LASIPS2PortDeviceClass {
+ DeviceClass parent;
+
+ DeviceRealize parent_realize;
+};
+
+typedef struct LASIPS2State LASIPS2State;
+
+struct LASIPS2Port {
+ DeviceState parent_obj;
+
+ LASIPS2State *lasips2;
+ MemoryRegion reg;
+ PS2State *ps2dev;
+ uint8_t id;
+ uint8_t control;
+ uint8_t buf;
+ bool loopback_rbne;
+ qemu_irq irq;
+};
+
+#define TYPE_LASIPS2_KBD_PORT "lasips2-kbd-port"
+OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2KbdPort, LASIPS2_KBD_PORT)
+
+struct LASIPS2KbdPort {
+ LASIPS2Port parent_obj;
+
+ PS2KbdState kbd;
+};
+
+#define TYPE_LASIPS2_MOUSE_PORT "lasips2-mouse-port"
+OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2MousePort, LASIPS2_MOUSE_PORT)
+
+struct LASIPS2MousePort {
+ LASIPS2Port parent_obj;
+
+ PS2MouseState mouse;
+};
+
+struct LASIPS2State {
+ SysBusDevice parent_obj;
+
+ LASIPS2KbdPort kbd_port;
+ LASIPS2MousePort mouse_port;
+ uint8_t int_status;
+ qemu_irq irq;
+};
+
+#define TYPE_LASIPS2 "lasips2"
+OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2State, LASIPS2)
+
+#endif /* HW_INPUT_LASIPS2_H */
diff --git a/include/hw/input/lm832x.h b/include/hw/input/lm832x.h
new file mode 100644
index 0000000000..e0e5d5ef20
--- /dev/null
+++ b/include/hw/input/lm832x.h
@@ -0,0 +1,28 @@
+/*
+ * National Semiconductor LM8322/8323 GPIO keyboard & PWM chips.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_INPUT_LM832X_H
+#define HW_INPUT_LM832X_H
+
+#define TYPE_LM8323 "lm8323"
+
+void lm832x_key_event(DeviceState *dev, int key, int state);
+
+#endif
diff --git a/include/hw/input/pl050.h b/include/hw/input/pl050.h
new file mode 100644
index 0000000000..4cb8985f31
--- /dev/null
+++ b/include/hw/input/pl050.h
@@ -0,0 +1,58 @@
+/*
+ * Arm PrimeCell PL050 Keyboard / Mouse Interface
+ *
+ * Copyright (c) 2006-2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#ifndef HW_PL050_H
+#define HW_PL050_H
+
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "hw/input/ps2.h"
+#include "hw/irq.h"
+
+struct PL050DeviceClass {
+ SysBusDeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+};
+
+#define TYPE_PL050 "pl050"
+OBJECT_DECLARE_TYPE(PL050State, PL050DeviceClass, PL050)
+
+struct PL050State {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ PS2State *ps2dev;
+ uint32_t cr;
+ uint32_t clk;
+ uint32_t last;
+ int pending;
+ qemu_irq irq;
+ bool is_mouse;
+};
+
+#define TYPE_PL050_KBD_DEVICE "pl050_keyboard"
+OBJECT_DECLARE_SIMPLE_TYPE(PL050KbdState, PL050_KBD_DEVICE)
+
+struct PL050KbdState {
+ PL050State parent_obj;
+
+ PS2KbdState kbd;
+};
+
+#define TYPE_PL050_MOUSE_DEVICE "pl050_mouse"
+OBJECT_DECLARE_SIMPLE_TYPE(PL050MouseState, PL050_MOUSE_DEVICE)
+
+struct PL050MouseState {
+ PL050State parent_obj;
+
+ PS2MouseState mouse;
+};
+
+#endif
diff --git a/include/hw/input/ps2.h b/include/hw/input/ps2.h
index 213aa16aa3..cd61a634c3 100644
--- a/include/hw/input/ps2.h
+++ b/include/hw/input/ps2.h
@@ -25,25 +25,89 @@
#ifndef HW_PS2_H
#define HW_PS2_H
+#include "hw/sysbus.h"
+
#define PS2_MOUSE_BUTTON_LEFT 0x01
#define PS2_MOUSE_BUTTON_RIGHT 0x02
#define PS2_MOUSE_BUTTON_MIDDLE 0x04
#define PS2_MOUSE_BUTTON_SIDE 0x08
#define PS2_MOUSE_BUTTON_EXTRA 0x10
+struct PS2DeviceClass {
+ SysBusDeviceClass parent_class;
+
+ ResettablePhases parent_phases;
+};
+
+/*
+ * PS/2 buffer size. Keep 256 bytes for compatibility with
+ * older QEMU versions.
+ */
+#define PS2_BUFFER_SIZE 256
+
+typedef struct {
+ uint8_t data[PS2_BUFFER_SIZE];
+ int rptr, wptr, cwptr, count;
+} PS2Queue;
+
+/* Output IRQ */
+#define PS2_DEVICE_IRQ 0
+
+struct PS2State {
+ SysBusDevice parent_obj;
+
+ PS2Queue queue;
+ int32_t write_cmd;
+ qemu_irq irq;
+};
+
+#define TYPE_PS2_DEVICE "ps2-device"
+OBJECT_DECLARE_TYPE(PS2State, PS2DeviceClass, PS2_DEVICE)
+
+struct PS2KbdState {
+ PS2State parent_obj;
+
+ int scan_enabled;
+ int translate;
+ int scancode_set; /* 1=XT, 2=AT, 3=PS/2 */
+ int ledstate;
+ bool need_high_bit;
+ unsigned int modifiers; /* bitmask of MOD_* constants above */
+};
+
+#define TYPE_PS2_KBD_DEVICE "ps2-kbd"
+OBJECT_DECLARE_SIMPLE_TYPE(PS2KbdState, PS2_KBD_DEVICE)
+
+struct PS2MouseState {
+ PS2State parent_obj;
+
+ uint8_t mouse_status;
+ uint8_t mouse_resolution;
+ uint8_t mouse_sample_rate;
+ uint8_t mouse_wrap;
+ uint8_t mouse_type; /* 0 = PS2, 3 = IMPS/2, 4 = IMEX */
+ uint8_t mouse_detect_state;
+ int mouse_dx; /* current values, needed for 'poll' mode */
+ int mouse_dy;
+ int mouse_dz;
+ int mouse_dw;
+ uint8_t mouse_buttons;
+};
+
+#define TYPE_PS2_MOUSE_DEVICE "ps2-mouse"
+OBJECT_DECLARE_SIMPLE_TYPE(PS2MouseState, PS2_MOUSE_DEVICE)
+
/* ps2.c */
-void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg);
-void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg);
-void ps2_write_mouse(void *, int val);
-void ps2_write_keyboard(void *, int val);
+void ps2_write_mouse(PS2MouseState *s, int val);
+void ps2_write_keyboard(PS2KbdState *s, int val);
uint32_t ps2_read_data(PS2State *s);
void ps2_queue_noirq(PS2State *s, int b);
-void ps2_raise_irq(PS2State *s);
void ps2_queue(PS2State *s, int b);
void ps2_queue_2(PS2State *s, int b1, int b2);
void ps2_queue_3(PS2State *s, int b1, int b2, int b3);
void ps2_queue_4(PS2State *s, int b1, int b2, int b3, int b4);
-void ps2_keyboard_set_translation(void *opaque, int mode);
-void ps2_mouse_fake_event(void *opaque);
+void ps2_keyboard_set_translation(PS2KbdState *s, int mode);
+void ps2_mouse_fake_event(PS2MouseState *s);
+int ps2_queue_empty(PS2State *s);
#endif /* HW_PS2_H */
diff --git a/include/hw/input/stellaris_gamepad.h b/include/hw/input/stellaris_gamepad.h
new file mode 100644
index 0000000000..51085e166c
--- /dev/null
+++ b/include/hw/input/stellaris_gamepad.h
@@ -0,0 +1,37 @@
+/*
+ * Gamepad style buttons connected to IRQ/GPIO lines
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_INPUT_STELLARIS_GAMEPAD_H
+#define HW_INPUT_STELLARIS_GAMEPAD_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+/*
+ * QEMU interface:
+ * + QOM array property "keycodes": uint32_t QEMU keycodes to handle
+ * (these are QCodes, ie the Q_KEY_* values)
+ * + unnamed GPIO outputs: one per keycode, in the same order as the
+ * "keycodes" array property entries; asserted when key is down
+ */
+
+#define TYPE_STELLARIS_GAMEPAD "stellaris-gamepad"
+OBJECT_DECLARE_SIMPLE_TYPE(StellarisGamepad, STELLARIS_GAMEPAD)
+
+struct StellarisGamepad {
+ SysBusDevice parent_obj;
+
+ uint32_t num_buttons;
+ qemu_irq *irqs;
+ uint32_t *keycodes;
+ uint8_t *pressed;
+};
+
+#endif
diff --git a/include/hw/input/tsc2xxx.h b/include/hw/input/tsc2xxx.h
new file mode 100644
index 0000000000..00eca17674
--- /dev/null
+++ b/include/hw/input/tsc2xxx.h
@@ -0,0 +1,41 @@
+/*
+ * TI touchscreen controller
+ *
+ * Copyright (c) 2006 Andrzej Zaborowski
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_INPUT_TSC2XXX_H
+#define HW_INPUT_TSC2XXX_H
+
+typedef struct MouseTransformInfo {
+ /* Touchscreen resolution */
+ int x;
+ int y;
+ /* Calibration values as used/generated by tslib */
+ int a[7];
+} MouseTransformInfo;
+
+typedef struct uWireSlave {
+ uint16_t (*receive)(void *opaque);
+ void (*send)(void *opaque, uint16_t data);
+ void *opaque;
+} uWireSlave;
+
+/* tsc210x.c */
+uWireSlave *tsc2102_init(qemu_irq pint);
+uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav);
+I2SCodec *tsc210x_codec(uWireSlave *chip);
+uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len);
+void tsc210x_set_transform(uWireSlave *chip, const MouseTransformInfo *info);
+void tsc210x_key_event(uWireSlave *chip, int key, int down);
+
+/* tsc2005.c */
+void *tsc2005_init(qemu_irq pintdav);
+uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len);
+void tsc2005_set_transform(void *opaque, const MouseTransformInfo *info);
+
+#endif
diff --git a/include/hw/intc/allwinner-a10-pic.h b/include/hw/intc/allwinner-a10-pic.h
index 1d314a70d9..b8364d3ed4 100644
--- a/include/hw/intc/allwinner-a10-pic.h
+++ b/include/hw/intc/allwinner-a10-pic.h
@@ -1,8 +1,11 @@
#ifndef ALLWINNER_A10_PIC_H
#define ALLWINNER_A10_PIC_H
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
#define TYPE_AW_A10_PIC "allwinner-a10-pic"
-#define AW_A10_PIC(obj) OBJECT_CHECK(AwA10PICState, (obj), TYPE_AW_A10_PIC)
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10PICState, AW_A10_PIC)
#define AW_A10_PIC_VECTOR 0
#define AW_A10_PIC_BASE_ADDR 4
@@ -17,7 +20,7 @@
#define AW_A10_PIC_INT_NR 95
#define AW_A10_PIC_REG_NUM DIV_ROUND_UP(AW_A10_PIC_INT_NR, 32)
-typedef struct AwA10PICState {
+struct AwA10PICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -35,6 +38,6 @@ typedef struct AwA10PICState {
uint32_t enable[AW_A10_PIC_REG_NUM];
uint32_t mask[AW_A10_PIC_REG_NUM];
/*priority setting here*/
-} AwA10PICState;
+};
#endif
diff --git a/include/hw/intc/arm_gic.h b/include/hw/intc/arm_gic.h
index ed703a1720..48f6a51a70 100644
--- a/include/hw/intc/arm_gic.h
+++ b/include/hw/intc/arm_gic.h
@@ -65,24 +65,27 @@
#define HW_ARM_GIC_H
#include "arm_gic_common.h"
+#include "qom/object.h"
/* Number of SGI target-list bits */
#define GIC_TARGETLIST_BITS 8
+#define GIC_MAX_PRIORITY_BITS 8
+#define GIC_MIN_PRIORITY_BITS 4
#define TYPE_ARM_GIC "arm_gic"
-#define ARM_GIC(obj) \
- OBJECT_CHECK(GICState, (obj), TYPE_ARM_GIC)
-#define ARM_GIC_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMGICClass, (klass), TYPE_ARM_GIC)
-#define ARM_GIC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMGICClass, (obj), TYPE_ARM_GIC)
+typedef struct ARMGICClass ARMGICClass;
+/* This is reusing the GICState typedef from TYPE_ARM_GIC_COMMON */
+DECLARE_OBJ_CHECKERS(GICState, ARMGICClass,
+ ARM_GIC, TYPE_ARM_GIC)
-typedef struct ARMGICClass {
+struct ARMGICClass {
/*< private >*/
ARMGICCommonClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
-} ARMGICClass;
+};
+
+const char *gic_class_name(void);
#endif
diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h
index b5585fec45..97fea4102d 100644
--- a/include/hw/intc/arm_gic_common.h
+++ b/include/hw/intc/arm_gic_common.h
@@ -22,6 +22,7 @@
#define HW_ARM_GIC_COMMON_H
#include "hw/sysbus.h"
+#include "qom/object.h"
/* Maximum number of possible interrupts, determined by the GIC architecture */
#define GIC_MAXIRQ 1020
@@ -61,7 +62,7 @@ typedef struct gic_irq_state {
uint8_t group;
} gic_irq_state;
-typedef struct GICState {
+struct GICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -70,6 +71,8 @@ typedef struct GICState {
qemu_irq parent_fiq[GIC_NCPU];
qemu_irq parent_virq[GIC_NCPU];
qemu_irq parent_vfiq[GIC_NCPU];
+ qemu_irq parent_nmi[GIC_NCPU];
+ qemu_irq parent_vnmi[GIC_NCPU];
qemu_irq maintenance_irq[GIC_NCPU];
/* GICD_CTLR; for a GIC with the security extensions the NS banked version
@@ -96,6 +99,7 @@ typedef struct GICState {
uint16_t priority_mask[GIC_NCPU_VCPU];
uint16_t running_priority[GIC_NCPU_VCPU];
uint16_t current_pending[GIC_NCPU_VCPU];
+ uint32_t n_prio_bits;
/* If we present the GICv2 without security extensions to a guest,
* the guest can configure the GICC_CTLR to configure group 1 binary point
@@ -142,24 +146,22 @@ typedef struct GICState {
bool irq_reset_nonsecure; /* configure IRQs as group 1 (NS) on reset? */
int dev_fd; /* kvm device fd if backed by kvm vgic support */
Error *migration_blocker;
-} GICState;
+};
+typedef struct GICState GICState;
#define TYPE_ARM_GIC_COMMON "arm_gic_common"
-#define ARM_GIC_COMMON(obj) \
- OBJECT_CHECK(GICState, (obj), TYPE_ARM_GIC_COMMON)
-#define ARM_GIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMGICCommonClass, (klass), TYPE_ARM_GIC_COMMON)
-#define ARM_GIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMGICCommonClass, (obj), TYPE_ARM_GIC_COMMON)
-
-typedef struct ARMGICCommonClass {
+typedef struct ARMGICCommonClass ARMGICCommonClass;
+DECLARE_OBJ_CHECKERS(GICState, ARMGICCommonClass,
+ ARM_GIC_COMMON, TYPE_ARM_GIC_COMMON)
+
+struct ARMGICCommonClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/
void (*pre_save)(GICState *s);
void (*post_load)(GICState *s);
-} ARMGICCommonClass;
+};
void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
const MemoryRegionOps *ops,
diff --git a/include/hw/intc/arm_gicv3.h b/include/hw/intc/arm_gicv3.h
index 4a6fd85e22..a81a6ae7ec 100644
--- a/include/hw/intc/arm_gicv3.h
+++ b/include/hw/intc/arm_gicv3.h
@@ -13,20 +13,20 @@
#define HW_ARM_GICV3_H
#include "arm_gicv3_common.h"
+#include "qom/object.h"
#define TYPE_ARM_GICV3 "arm-gicv3"
-#define ARM_GICV3(obj) OBJECT_CHECK(GICv3State, (obj), TYPE_ARM_GICV3)
-#define ARM_GICV3_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMGICv3Class, (klass), TYPE_ARM_GICV3)
-#define ARM_GICV3_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMGICv3Class, (obj), TYPE_ARM_GICV3)
+typedef struct ARMGICv3Class ARMGICv3Class;
+/* This is reusing the GICState typedef from TYPE_ARM_GICV3_COMMON */
+DECLARE_OBJ_CHECKERS(GICv3State, ARMGICv3Class,
+ ARM_GICV3, TYPE_ARM_GICV3)
-typedef struct ARMGICv3Class {
+struct ARMGICv3Class {
/*< private >*/
ARMGICv3CommonClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
-} ARMGICv3Class;
+};
#endif
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index 31ec9a1ae4..cd09bee3bc 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -26,6 +26,7 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gic_common.h"
+#include "qom/object.h"
/*
* Maximum number of possible interrupts, determined by the GIC architecture.
@@ -35,7 +36,14 @@
#define GICV3_MAXIRQ 1020
#define GICV3_MAXSPI (GICV3_MAXIRQ - GIC_INTERNAL)
+#define GICV3_LPI_INTID_START 8192
+
+/*
+ * The redistributor in GICv3 has two 64KB frames per CPU; in
+ * GICv4 it has four 64KB frames per CPU.
+ */
#define GICV3_REDIST_SIZE 0x20000
+#define GICV4_REDIST_SIZE 0x40000
/* Number of SGI target-list bits */
#define GICV3_TARGETLIST_BITS 16
@@ -43,11 +51,6 @@
/* Maximum number of list registers (architectural limit) */
#define GICV3_LR_MAX 16
-/* Minimum BPR for Secure, or when security not enabled */
-#define GIC_MIN_BPR 0
-/* Minimum BPR for Nonsecure when security is enabled */
-#define GIC_MIN_BPR_NS (GIC_MIN_BPR + 1)
-
/* For some distributor fields we want to model the array of 32-bit
* register values which hold various bitmaps corresponding to enabled,
* pending, etc bits. These macros and functions facilitate that; the
@@ -143,6 +146,7 @@ typedef struct {
int irq;
uint8_t prio;
int grp;
+ bool nmi;
} PendingIrq;
struct GICv3CPUState {
@@ -152,7 +156,8 @@ struct GICv3CPUState {
qemu_irq parent_fiq;
qemu_irq parent_virq;
qemu_irq parent_vfiq;
- qemu_irq maintenance_irq;
+ qemu_irq parent_nmi;
+ qemu_irq parent_vnmi;
/* Redistributor */
uint32_t level; /* Current IRQ level */
@@ -168,10 +173,14 @@ struct GICv3CPUState {
uint32_t gicr_ienabler0;
uint32_t gicr_ipendr0;
uint32_t gicr_iactiver0;
+ uint32_t gicr_inmir0;
uint32_t edge_trigger; /* ICFGR0 and ICFGR1 even bits */
uint32_t gicr_igrpmodr0;
uint32_t gicr_nsacr;
uint8_t gicr_ipriorityr[GIC_INTERNAL];
+ /* VLPI_base page registers */
+ uint64_t gicr_vpropbaser;
+ uint64_t gicr_vpendbaser;
/* CPU interface */
uint64_t icc_sre_el1;
@@ -196,36 +205,71 @@ struct GICv3CPUState {
int num_list_regs;
int vpribits; /* number of virtual priority bits */
int vprebits; /* number of virtual preemption bits */
+ int pribits; /* number of physical priority bits */
+ int prebits; /* number of physical preemption bits */
/* Current highest priority pending interrupt for this CPU.
* This is cached information that can be recalculated from the
* real state above; it doesn't need to be migrated.
*/
PendingIrq hppi;
+
+ /*
+ * Cached information recalculated from LPI tables
+ * in guest memory
+ */
+ PendingIrq hpplpi;
+
+ /* Cached information recalculated from vLPI tables in guest memory */
+ PendingIrq hppvlpi;
+
/* This is temporary working state, to avoid a malloc in gicv3_update() */
bool seenbetter;
+
+ /*
+ * Whether the CPU interface has NMI support (FEAT_GICv3_NMI). The
+ * CPU interface may support NMIs even when the GIC proper (what the
+ * spec calls the IRI; the redistributors and distributor) does not.
+ */
+ bool nmi_support;
};
+/*
+ * The redistributor pages might be split into more than one region
+ * on some machine types if there are many CPUs.
+ */
+typedef struct GICv3RedistRegion {
+ GICv3State *gic;
+ MemoryRegion iomem;
+ uint32_t cpuidx; /* index of first CPU this region covers */
+} GICv3RedistRegion;
+
struct GICv3State {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem_dist; /* Distributor */
- MemoryRegion *iomem_redist; /* Redistributor Regions */
+ GICv3RedistRegion *redist_regions; /* Redistributor Regions */
uint32_t *redist_region_count; /* redistributor count within each region */
uint32_t nb_redist_regions; /* number of redist regions */
uint32_t num_cpu;
uint32_t num_irq;
uint32_t revision;
+ bool lpi_enable;
+ bool nmi_support;
bool security_extn;
+ bool force_8bit_prio;
bool irq_reset_nonsecure;
bool gicd_no_migration_shift_bug;
int dev_fd; /* kvm device fd if backed by kvm vgic support */
Error *migration_blocker;
+ MemoryRegion *dma;
+ AddressSpace dma_as;
+
/* Distributor */
/* for a GIC with the security extensions the NS banked version of this
@@ -240,6 +284,7 @@ struct GICv3State {
GIC_DECLARE_BITMAP(active); /* GICD_ISACTIVER */
GIC_DECLARE_BITMAP(level); /* Current level */
GIC_DECLARE_BITMAP(edge_trigger); /* GICD_ICFGR even bits */
+ GIC_DECLARE_BITMAP(nmi); /* GICD_INMIR */
uint8_t gicd_ipriority[GICV3_MAXIRQ];
uint64_t gicd_irouter[GICV3_MAXIRQ];
/* Cached information: pointer to the cpu i/f for the CPUs specified
@@ -249,6 +294,8 @@ struct GICv3State {
uint32_t gicd_nsacr[DIV_ROUND_UP(GICV3_MAXIRQ, 16)];
GICv3CPUState *cpu;
+ /* List of all ITSes connected to this GIC */
+ GPtrArray *itslist;
};
#define GICV3_BITMAP_ACCESSORS(BMP) \
@@ -277,25 +324,33 @@ GICV3_BITMAP_ACCESSORS(pending)
GICV3_BITMAP_ACCESSORS(active)
GICV3_BITMAP_ACCESSORS(level)
GICV3_BITMAP_ACCESSORS(edge_trigger)
+GICV3_BITMAP_ACCESSORS(nmi)
#define TYPE_ARM_GICV3_COMMON "arm-gicv3-common"
-#define ARM_GICV3_COMMON(obj) \
- OBJECT_CHECK(GICv3State, (obj), TYPE_ARM_GICV3_COMMON)
-#define ARM_GICV3_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMGICv3CommonClass, (klass), TYPE_ARM_GICV3_COMMON)
-#define ARM_GICV3_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMGICv3CommonClass, (obj), TYPE_ARM_GICV3_COMMON)
-
-typedef struct ARMGICv3CommonClass {
+typedef struct ARMGICv3CommonClass ARMGICv3CommonClass;
+DECLARE_OBJ_CHECKERS(GICv3State, ARMGICv3CommonClass,
+ ARM_GICV3_COMMON, TYPE_ARM_GICV3_COMMON)
+
+struct ARMGICv3CommonClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/
void (*pre_save)(GICv3State *s);
void (*post_load)(GICv3State *s);
-} ARMGICv3CommonClass;
+};
void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
- const MemoryRegionOps *ops, Error **errp);
+ const MemoryRegionOps *ops);
+
+/**
+ * gicv3_class_name
+ *
+ * Return name of GICv3 class to use depending on whether KVM acceleration is
+ * in use. May throw an error if the chosen implementation is not available.
+ *
+ * Returns: class name to use
+ */
+const char *gicv3_class_name(void);
#endif
diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h
index fd1fe64c03..7dc712b38d 100644
--- a/include/hw/intc/arm_gicv3_its_common.h
+++ b/include/hw/intc/arm_gicv3_its_common.h
@@ -23,6 +23,9 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gicv3_common.h"
+#include "qom/object.h"
+
+#define TYPE_ARM_GICV3_ITS "arm-gicv3-its"
#define ITS_CONTROL_SIZE 0x10000
#define ITS_TRANS_SIZE 0x10000
@@ -30,11 +33,27 @@
#define GITS_CTLR 0x0
#define GITS_IIDR 0x4
+#define GITS_TYPER 0x8
#define GITS_CBASER 0x80
#define GITS_CWRITER 0x88
#define GITS_CREADR 0x90
#define GITS_BASER 0x100
+#define GITS_TRANSLATER 0x0040
+
+typedef struct {
+ bool indirect;
+ uint16_t entry_sz;
+ uint32_t page_sz;
+ uint32_t num_entries;
+ uint64_t base_addr;
+} TableDesc;
+
+typedef struct {
+ uint32_t num_entries;
+ uint64_t base_addr;
+} CmdQDesc;
+
struct GICv3ITSState {
SysBusDevice parent_obj;
@@ -51,25 +70,47 @@ struct GICv3ITSState {
/* Registers */
uint32_t ctlr;
uint32_t iidr;
+ uint64_t typer;
uint64_t cbaser;
uint64_t cwriter;
uint64_t creadr;
uint64_t baser[8];
+ TableDesc dt;
+ TableDesc ct;
+ TableDesc vpet;
+ CmdQDesc cq;
+
Error *migration_blocker;
};
typedef struct GICv3ITSState GICv3ITSState;
-void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops);
+void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops,
+ const MemoryRegionOps *tops);
+
+/*
+ * The ITS should call this when it is realized to add itself
+ * to its GIC's list of connected ITSes.
+ */
+static inline void gicv3_add_its(GICv3State *s, DeviceState *its)
+{
+ g_ptr_array_add(s->itslist, its);
+}
+
+/*
+ * The ITS can use this for operations that must be performed on
+ * every ITS connected to the same GIC that it is
+ */
+static inline void gicv3_foreach_its(GICv3State *s, GFunc func, void *opaque)
+{
+ g_ptr_array_foreach(s->itslist, func, opaque);
+}
#define TYPE_ARM_GICV3_ITS_COMMON "arm-gicv3-its-common"
-#define ARM_GICV3_ITS_COMMON(obj) \
- OBJECT_CHECK(GICv3ITSState, (obj), TYPE_ARM_GICV3_ITS_COMMON)
-#define ARM_GICV3_ITS_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(GICv3ITSCommonClass, (klass), TYPE_ARM_GICV3_ITS_COMMON)
-#define ARM_GICV3_ITS_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(GICv3ITSCommonClass, (obj), TYPE_ARM_GICV3_ITS_COMMON)
+typedef struct GICv3ITSCommonClass GICv3ITSCommonClass;
+DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSCommonClass,
+ ARM_GICV3_ITS_COMMON, TYPE_ARM_GICV3_ITS_COMMON)
struct GICv3ITSCommonClass {
/*< private >*/
@@ -81,6 +122,14 @@ struct GICv3ITSCommonClass {
void (*post_load)(GICv3ITSState *s);
};
-typedef struct GICv3ITSCommonClass GICv3ITSCommonClass;
+/**
+ * its_class_name:
+ *
+ * Return the ITS class name to use depending on whether KVM acceleration
+ * and KVM CAP_SIGNAL_MSI are supported
+ *
+ * Returns: class name to use or NULL
+ */
+const char *its_class_name(void);
#endif
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
index a472c9b8f0..89fe8aedaa 100644
--- a/include/hw/intc/armv7m_nvic.h
+++ b/include/hw/intc/armv7m_nvic.h
@@ -10,14 +10,13 @@
#ifndef HW_ARM_ARMV7M_NVIC_H
#define HW_ARM_ARMV7M_NVIC_H
-#include "target/arm/cpu.h"
+#include "target/arm/cpu-qom.h"
#include "hw/sysbus.h"
#include "hw/timer/armv7m_systick.h"
+#include "qom/object.h"
#define TYPE_NVIC "armv7m_nvic"
-
-#define NVIC(obj) \
- OBJECT_CHECK(NVICState, (obj), TYPE_NVIC)
+OBJECT_DECLARE_SIMPLE_TYPE(NVICState, NVIC)
/* Highest permitted number of exceptions (architectural limit) */
#define NVIC_MAX_VECTORS 512
@@ -35,7 +34,7 @@ typedef struct VecInfo {
uint8_t level; /* exceptions <=15 never set level */
} VecInfo;
-typedef struct NVICState {
+struct NVICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -75,19 +74,136 @@ typedef struct NVICState {
*/
bool vectpending_is_s_banked;
int exception_prio; /* group prio of the highest prio active exception */
- int vectpending_prio; /* group prio of the exeception in vectpending */
+ int vectpending_prio; /* group prio of the exception in vectpending */
MemoryRegion sysregmem;
- MemoryRegion sysreg_ns_mem;
- MemoryRegion systickmem;
- MemoryRegion systick_ns_mem;
- MemoryRegion container;
uint32_t num_irq;
qemu_irq excpout;
qemu_irq sysresetreq;
+};
- SysTickState systick[M_REG_NUM_BANKS];
-} NVICState;
+/* Interface between CPU and Interrupt controller. */
+/**
+ * armv7m_nvic_set_pending: mark the specified exception as pending
+ * @s: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Marks the specified exception as pending. Note that we will assert()
+ * if @secure is true and @irq does not specify one of the fixed set
+ * of architecturally banked exceptions.
+ */
+void armv7m_nvic_set_pending(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_set_pending_derived: mark this derived exception as pending
+ * @s: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Similar to armv7m_nvic_set_pending(), but specifically for derived
+ * exceptions (exceptions generated in the course of trying to take
+ * a different exception).
+ */
+void armv7m_nvic_set_pending_derived(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_set_pending_lazyfp: mark this lazy FP exception as pending
+ * @s: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Similar to armv7m_nvic_set_pending(), but specifically for exceptions
+ * generated in the course of lazy stacking of FP registers.
+ */
+void armv7m_nvic_set_pending_lazyfp(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_get_pending_irq_info: return highest priority pending
+ * exception, and whether it targets Secure state
+ * @s: the NVIC
+ * @pirq: set to pending exception number
+ * @ptargets_secure: set to whether pending exception targets Secure
+ *
+ * This function writes the number of the highest priority pending
+ * exception (the one which would be made active by
+ * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure
+ * to true if the current highest priority pending exception should
+ * be taken to Secure state, false for NS.
+ */
+void armv7m_nvic_get_pending_irq_info(NVICState *s, int *pirq,
+ bool *ptargets_secure);
+/**
+ * armv7m_nvic_acknowledge_irq: make highest priority pending exception active
+ * @s: the NVIC
+ *
+ * Move the current highest priority pending exception from the pending
+ * state to the active state, and update v7m.exception to indicate that
+ * it is the exception currently being handled.
+ */
+void armv7m_nvic_acknowledge_irq(NVICState *s);
+/**
+ * armv7m_nvic_complete_irq: complete specified interrupt or exception
+ * @s: the NVIC
+ * @irq: the exception number to complete
+ * @secure: true if this exception was secure
+ *
+ * Returns: -1 if the irq was not active
+ * 1 if completing this irq brought us back to base (no active irqs)
+ * 0 if there is still an irq active after this one was completed
+ * (Ignoring -1, this is the same as the RETTOBASE value before completion.)
+ */
+int armv7m_nvic_complete_irq(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
+ * @s: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Return whether an exception is "ready", i.e. whether the exception is
+ * enabled and is configured at a priority which would allow it to
+ * interrupt the current execution priority. This controls whether the
+ * RDY bit for it in the FPCCR is set.
+ */
+bool armv7m_nvic_get_ready_status(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_raw_execution_priority: return the raw execution priority
+ * @s: the NVIC
+ *
+ * Returns: the raw execution priority as defined by the v8M architecture.
+ * This is the execution priority minus the effects of AIRCR.PRIS,
+ * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting.
+ * (v8M ARM ARM I_PKLD.)
+ */
+int armv7m_nvic_raw_execution_priority(NVICState *s);
+/**
+ * armv7m_nvic_neg_prio_requested: return true if the requested execution
+ * priority is negative for the specified security state.
+ * @s: the NVIC
+ * @secure: the security state to test
+ * This corresponds to the pseudocode IsReqExecPriNeg().
+ */
+#ifndef CONFIG_USER_ONLY
+bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure);
+#else
+static inline bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure)
+{
+ return false;
+}
+#endif
+#ifndef CONFIG_USER_ONLY
+bool armv7m_nvic_can_take_pending_exception(NVICState *s);
+#else
+static inline bool armv7m_nvic_can_take_pending_exception(NVICState *s)
+{
+ return true;
+}
+#endif
#endif
diff --git a/include/hw/intc/aspeed_vic.h b/include/hw/intc/aspeed_vic.h
index 107ff17c3b..68d6ab997a 100644
--- a/include/hw/intc/aspeed_vic.h
+++ b/include/hw/intc/aspeed_vic.h
@@ -14,13 +14,14 @@
#define ASPEED_VIC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_VIC "aspeed.vic"
-#define ASPEED_VIC(obj) OBJECT_CHECK(AspeedVICState, (obj), TYPE_ASPEED_VIC)
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedVICState, ASPEED_VIC)
#define ASPEED_VIC_NR_IRQS 51
-typedef struct AspeedVICState {
+struct AspeedVICState {
/*< private >*/
SysBusDevice parent_obj;
@@ -43,6 +44,6 @@ typedef struct AspeedVICState {
/* 0=low-sensitive/falling-edge, 1=high-sensitive/rising-edge */
uint64_t event;
-} AspeedVICState;
+};
#endif /* ASPEED_VIC_H */
diff --git a/include/hw/intc/bcm2835_ic.h b/include/hw/intc/bcm2835_ic.h
index fb75fa0064..588eb76c5c 100644
--- a/include/hw/intc/bcm2835_ic.h
+++ b/include/hw/intc/bcm2835_ic.h
@@ -1,20 +1,23 @@
/*
* Raspberry Pi emulation (c) 2012 Gregory Estrade
- * This code is licensed under the GNU GPLv2 and later.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2835_IC_H
#define BCM2835_IC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_BCM2835_IC "bcm2835-ic"
-#define BCM2835_IC(obj) OBJECT_CHECK(BCM2835ICState, (obj), TYPE_BCM2835_IC)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835ICState, BCM2835_IC)
#define BCM2835_IC_GPU_IRQ "gpu-irq"
#define BCM2835_IC_ARM_IRQ "arm-irq"
-typedef struct BCM2835ICState {
+struct BCM2835ICState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -28,6 +31,6 @@ typedef struct BCM2835ICState {
uint8_t arm_irq_level, arm_irq_enable;
bool fiq_enable;
uint8_t fiq_select;
-} BCM2835ICState;
+};
#endif
diff --git a/include/hw/intc/bcm2836_control.h b/include/hw/intc/bcm2836_control.h
index 613f3c4186..a410c817e8 100644
--- a/include/hw/intc/bcm2836_control.h
+++ b/include/hw/intc/bcm2836_control.h
@@ -5,23 +5,28 @@
* Rasperry Pi 2 emulation and refactoring Copyright (c) 2015, Microsoft
* Written by Andrew Baumann
*
- * This code is licensed under the GNU GPLv2 and later.
+ * ARM Local Timer IRQ Copyright (c) 2019. Zoltán Baldaszti
+ * Added basic IRQ_TIMER interrupt support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2836_CONTROL_H
#define BCM2836_CONTROL_H
#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
/* 4 mailboxes per core, for 16 total */
#define BCM2836_NCORES 4
#define BCM2836_MBPERCORE 4
#define TYPE_BCM2836_CONTROL "bcm2836-control"
-#define BCM2836_CONTROL(obj) \
- OBJECT_CHECK(BCM2836ControlState, (obj), TYPE_BCM2836_CONTROL)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2836ControlState, BCM2836_CONTROL)
-typedef struct BCM2836ControlState {
+struct BCM2836ControlState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -39,6 +44,11 @@ typedef struct BCM2836ControlState {
bool gpu_irq, gpu_fiq;
uint8_t timerirqs[BCM2836_NCORES];
+ /* local timer */
+ QEMUTimer timer;
+ uint32_t local_timer_control;
+ uint8_t route_localtimer;
+
/* interrupt source registers, post-routing (also input-derived; visible) */
uint32_t irqsrc[BCM2836_NCORES];
uint32_t fiqsrc[BCM2836_NCORES];
@@ -46,6 +56,6 @@ typedef struct BCM2836ControlState {
/* outputs to CPU cores */
qemu_irq irq[BCM2836_NCORES];
qemu_irq fiq[BCM2836_NCORES];
-} BCM2836ControlState;
+};
#endif
diff --git a/include/hw/intc/exynos4210_combiner.h b/include/hw/intc/exynos4210_combiner.h
new file mode 100644
index 0000000000..bd207a7e6e
--- /dev/null
+++ b/include/hw/intc/exynos4210_combiner.h
@@ -0,0 +1,57 @@
+/*
+ * Samsung exynos4210 Interrupt Combiner
+ *
+ * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd.
+ * All rights reserved.
+ *
+ * Evgeny Voevodin <e.voevodin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_INTC_EXYNOS4210_COMBINER_H
+#define HW_INTC_EXYNOS4210_COMBINER_H
+
+#include "hw/sysbus.h"
+
+/*
+ * State for each output signal of internal combiner
+ */
+typedef struct CombinerGroupState {
+ uint8_t src_mask; /* 1 - source enabled, 0 - disabled */
+ uint8_t src_pending; /* Pending source interrupts before masking */
+} CombinerGroupState;
+
+#define TYPE_EXYNOS4210_COMBINER "exynos4210.combiner"
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210CombinerState, EXYNOS4210_COMBINER)
+
+/* Number of groups and total number of interrupts for the internal combiner */
+#define IIC_NGRP 64
+#define IIC_NIRQ (IIC_NGRP * 8)
+#define IIC_REGSET_SIZE 0x41
+
+struct Exynos4210CombinerState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+
+ struct CombinerGroupState group[IIC_NGRP];
+ uint32_t reg_set[IIC_REGSET_SIZE];
+ uint32_t icipsr[2];
+ uint32_t external; /* 1 means that this combiner is external */
+
+ qemu_irq output_irq[IIC_NGRP];
+};
+
+#endif
diff --git a/include/hw/intc/exynos4210_gic.h b/include/hw/intc/exynos4210_gic.h
new file mode 100644
index 0000000000..f64c4069c6
--- /dev/null
+++ b/include/hw/intc/exynos4210_gic.h
@@ -0,0 +1,43 @@
+/*
+ * Samsung exynos4210 GIC implementation. Based on hw/arm_gic.c
+ *
+ * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd.
+ * All rights reserved.
+ *
+ * Evgeny Voevodin <e.voevodin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HW_INTC_EXYNOS4210_GIC_H
+#define HW_INTC_EXYNOS4210_GIC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_EXYNOS4210_GIC "exynos4210.gic"
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210GicState, EXYNOS4210_GIC)
+
+#define EXYNOS4210_GIC_NCPUS 2
+
+struct Exynos4210GicState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion cpu_container;
+ MemoryRegion dist_container;
+ MemoryRegion cpu_alias[EXYNOS4210_GIC_NCPUS];
+ MemoryRegion dist_alias[EXYNOS4210_GIC_NCPUS];
+ uint32_t num_cpu;
+ DeviceState *gic;
+};
+
+#endif
diff --git a/include/hw/intc/goldfish_pic.h b/include/hw/intc/goldfish_pic.h
new file mode 100644
index 0000000000..3e79580367
--- /dev/null
+++ b/include/hw/intc/goldfish_pic.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Goldfish PIC
+ *
+ * (c) 2020 Laurent Vivier <laurent@vivier.eu>
+ *
+ */
+
+#ifndef HW_INTC_GOLDFISH_PIC_H
+#define HW_INTC_GOLDFISH_PIC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_GOLDFISH_PIC "goldfish_pic"
+OBJECT_DECLARE_SIMPLE_TYPE(GoldfishPICState, GOLDFISH_PIC)
+
+#define GOLDFISH_PIC_IRQ_NB 32
+
+struct GoldfishPICState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t pending;
+ uint32_t enabled;
+
+ /* statistics */
+ uint64_t stats_irq_count[32];
+ /* for tracing */
+ uint8_t idx;
+};
+
+#endif
diff --git a/include/hw/intc/grlib_irqmp.h b/include/hw/intc/grlib_irqmp.h
new file mode 100644
index 0000000000..a76acbf940
--- /dev/null
+++ b/include/hw/intc/grlib_irqmp.h
@@ -0,0 +1,41 @@
+/*
+ * QEMU GRLIB Components
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2010-2024 AdaCore
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef GRLIB_IRQMP_H
+#define GRLIB_IRQMP_H
+
+#include "hw/sysbus.h"
+
+/* Emulation of GrLib device is base on the GRLIB IP Core User's Manual:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ */
+
+/* IRQMP */
+#define TYPE_GRLIB_IRQMP "grlib-irqmp"
+
+void grlib_irqmp_ack(DeviceState *dev, unsigned int cpu, int intno);
+
+#endif /* GRLIB_IRQMP_H */
diff --git a/include/hw/intc/heathrow_pic.h b/include/hw/intc/heathrow_pic.h
index 56c2ef339f..c0a7f6f546 100644
--- a/include/hw/intc/heathrow_pic.h
+++ b/include/hw/intc/heathrow_pic.h
@@ -23,11 +23,14 @@
* THE SOFTWARE.
*/
-#ifndef HEATHROW_H
-#define HEATHROW_H
+#ifndef HW_INTC_HEATHROW_PIC_H
+#define HW_INTC_HEATHROW_PIC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_HEATHROW "heathrow"
-#define HEATHROW(obj) OBJECT_CHECK(HeathrowState, (obj), TYPE_HEATHROW)
+OBJECT_DECLARE_SIMPLE_TYPE(HeathrowState, HEATHROW)
typedef struct HeathrowPICState {
uint32_t events;
@@ -36,14 +39,14 @@ typedef struct HeathrowPICState {
uint32_t level_triggered;
} HeathrowPICState;
-typedef struct HeathrowState {
+struct HeathrowState {
SysBusDevice parent_obj;
MemoryRegion mem;
HeathrowPICState pics[2];
qemu_irq irqs[1];
-} HeathrowState;
+};
#define HEATHROW_NUM_IRQS 64
-#endif /* HEATHROW_H */
+#endif /* HW_INTC_HEATHROW_PIC_H */
diff --git a/include/hw/intc/i8259.h b/include/hw/intc/i8259.h
new file mode 100644
index 0000000000..c412575775
--- /dev/null
+++ b/include/hw/intc/i8259.h
@@ -0,0 +1,20 @@
+#ifndef HW_I8259_H
+#define HW_I8259_H
+
+/* i8259.c */
+
+extern PICCommonState *isa_pic;
+
+/*
+ * i8259_init()
+ *
+ * Create a i8259 device on an ISA @bus,
+ * connect its output to @parent_irq_in,
+ * return an (allocated) array of 16 input IRQs.
+ */
+qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq_in);
+qemu_irq *kvm_i8259_init(ISABus *bus);
+int pic_get_output(PICCommonState *s);
+int pic_read_irq(PICCommonState *s);
+
+#endif
diff --git a/include/hw/intc/imx_avic.h b/include/hw/intc/imx_avic.h
index 1b80769018..75fbd1a89c 100644
--- a/include/hw/intc/imx_avic.h
+++ b/include/hw/intc/imx_avic.h
@@ -18,9 +18,10 @@
#define IMX_AVIC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX_AVIC "imx.avic"
-#define IMX_AVIC(obj) OBJECT_CHECK(IMXAVICState, (obj), TYPE_IMX_AVIC)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXAVICState, IMX_AVIC)
#define IMX_AVIC_NUM_IRQS 64
@@ -36,7 +37,7 @@
#define PRIO_PER_WORD (sizeof(uint32_t) * 8 / 4)
#define PRIO_WORDS (IMX_AVIC_NUM_IRQS/PRIO_PER_WORD)
-typedef struct IMXAVICState{
+struct IMXAVICState {
/*< private >*/
SysBusDevice parent_obj;
@@ -50,6 +51,6 @@ typedef struct IMXAVICState{
qemu_irq irq;
qemu_irq fiq;
uint32_t prio[PRIO_WORDS]; /* Priorities are 4-bits each */
-} IMXAVICState;
+};
#endif /* IMX_AVIC_H */
diff --git a/include/hw/intc/imx_gpcv2.h b/include/hw/intc/imx_gpcv2.h
index ed978b24bb..7bdee7e80a 100644
--- a/include/hw/intc/imx_gpcv2.h
+++ b/include/hw/intc/imx_gpcv2.h
@@ -2,21 +2,22 @@
#define IMX_GPCV2_H
#include "hw/sysbus.h"
+#include "qom/object.h"
enum IMXGPCv2Registers {
GPC_NUM = 0xE00 / sizeof(uint32_t),
};
-typedef struct IMXGPCv2State {
+struct IMXGPCv2State {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
uint32_t regs[GPC_NUM];
-} IMXGPCv2State;
+};
#define TYPE_IMX_GPCV2 "imx-gpcv2"
-#define IMX_GPCV2(obj) OBJECT_CHECK(IMXGPCv2State, (obj), TYPE_IMX_GPCV2)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXGPCv2State, IMX_GPCV2)
#endif /* IMX_GPCV2_H */
diff --git a/include/hw/intc/intc.h b/include/hw/intc/intc.h
index fb3e8e621f..7018f608ca 100644
--- a/include/hw/intc/intc.h
+++ b/include/hw/intc/intc.h
@@ -5,19 +5,16 @@
#define TYPE_INTERRUPT_STATS_PROVIDER "intctrl"
-#define INTERRUPT_STATS_PROVIDER_CLASS(klass) \
- OBJECT_CLASS_CHECK(InterruptStatsProviderClass, (klass), \
+typedef struct InterruptStatsProviderClass InterruptStatsProviderClass;
+DECLARE_CLASS_CHECKERS(InterruptStatsProviderClass, INTERRUPT_STATS_PROVIDER,
TYPE_INTERRUPT_STATS_PROVIDER)
-#define INTERRUPT_STATS_PROVIDER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(InterruptStatsProviderClass, (obj), \
- TYPE_INTERRUPT_STATS_PROVIDER)
#define INTERRUPT_STATS_PROVIDER(obj) \
INTERFACE_CHECK(InterruptStatsProvider, (obj), \
TYPE_INTERRUPT_STATS_PROVIDER)
typedef struct InterruptStatsProvider InterruptStatsProvider;
-typedef struct InterruptStatsProviderClass {
+struct InterruptStatsProviderClass {
InterfaceClass parent;
/* The returned pointer and statistics must remain valid until
@@ -26,6 +23,6 @@ typedef struct InterruptStatsProviderClass {
bool (*get_statistics)(InterruptStatsProvider *obj, uint64_t **irq_counts,
unsigned int *nb_irqs);
void (*print_info)(InterruptStatsProvider *obj, Monitor *mon);
-} InterruptStatsProviderClass;
+};
#endif
diff --git a/include/hw/i386/ioapic.h b/include/hw/intc/ioapic.h
index 59fcb158a7..aa122e25e3 100644
--- a/include/hw/i386/ioapic.h
+++ b/include/hw/intc/ioapic.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -17,15 +17,17 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef HW_IOAPIC_H
-#define HW_IOAPIC_H
+#ifndef HW_INTC_IOAPIC_H
+#define HW_INTC_IOAPIC_H
#define IOAPIC_NUM_PINS 24
#define IO_APIC_DEFAULT_ADDRESS 0xfec00000
+#define IO_APIC_SECONDARY_ADDRESS (IO_APIC_DEFAULT_ADDRESS + 0x10000)
+#define IO_APIC_SECONDARY_IRQBASE 24 /* primary 0 -> 23, secondary 24 -> 47 */
#define TYPE_KVM_IOAPIC "kvm-ioapic"
#define TYPE_IOAPIC "ioapic"
void ioapic_eoi_broadcast(int vector);
-#endif /* HW_IOAPIC_H */
+#endif /* HW_INTC_IOAPIC_H */
diff --git a/include/hw/intc/kvm_irqcount.h b/include/hw/intc/kvm_irqcount.h
new file mode 100644
index 0000000000..0ed5999e49
--- /dev/null
+++ b/include/hw/intc/kvm_irqcount.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#ifndef KVM_IRQCOUNT_H
+#define KVM_IRQCOUNT_H
+
+void kvm_report_irq_delivered(int delivered);
+void kvm_reset_irq_delivered(void);
+int kvm_get_irq_delivered(void);
+
+#endif
diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h
new file mode 100644
index 0000000000..a0a46b888c
--- /dev/null
+++ b/include/hw/intc/loongarch_extioi.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch 3A5000 ext interrupt controller definitions
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "hw/sysbus.h"
+#include "hw/loongarch/virt.h"
+
+#ifndef LOONGARCH_EXTIOI_H
+#define LOONGARCH_EXTIOI_H
+
+#define LS3A_INTC_IP 8
+#define EXTIOI_IRQS (256)
+#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8)
+/* irq from EXTIOI is routed to no more than 4 cpus */
+#define EXTIOI_CPUS (4)
+/* map to ipnum per 32 irqs */
+#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32)
+#define EXTIOI_IRQS_COREMAP_SIZE 256
+#define EXTIOI_IRQS_NODETYPE_COUNT 16
+#define EXTIOI_IRQS_GROUP_COUNT 8
+
+#define APIC_OFFSET 0x400
+#define APIC_BASE (0x1000ULL + APIC_OFFSET)
+
+#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET)
+#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET)
+#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET)
+#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET)
+#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET)
+#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET)
+#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET)
+#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET)
+#define EXTIOI_ISR_START (0x700 - APIC_OFFSET)
+#define EXTIOI_ISR_END (0x720 - APIC_OFFSET)
+#define EXTIOI_COREISR_START (0x800 - APIC_OFFSET)
+#define EXTIOI_COREISR_END (0xB20 - APIC_OFFSET)
+#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET)
+#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET)
+
+typedef struct ExtIOICore {
+ uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT];
+ DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS);
+ qemu_irq parent_irq[LS3A_INTC_IP];
+} ExtIOICore;
+
+#define TYPE_LOONGARCH_EXTIOI "loongarch.extioi"
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchExtIOI, LOONGARCH_EXTIOI)
+struct LoongArchExtIOI {
+ SysBusDevice parent_obj;
+ uint32_t num_cpu;
+ /* hardware state */
+ uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2];
+ uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT];
+ uint32_t isr[EXTIOI_IRQS / 32];
+ uint32_t enable[EXTIOI_IRQS / 32];
+ uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4];
+ uint32_t coremap[EXTIOI_IRQS / 4];
+ uint32_t sw_pending[EXTIOI_IRQS / 32];
+ uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE];
+ uint8_t sw_coremap[EXTIOI_IRQS];
+ qemu_irq irq[EXTIOI_IRQS];
+ ExtIOICore *cpu;
+ MemoryRegion extioi_system_mem;
+};
+#endif /* LOONGARCH_EXTIOI_H */
diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h
new file mode 100644
index 0000000000..1c1e834849
--- /dev/null
+++ b/include/hw/intc/loongarch_ipi.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch ipi interrupt header files
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LOONGARCH_IPI_H
+#define HW_LOONGARCH_IPI_H
+
+#include "hw/sysbus.h"
+
+/* Mainy used by iocsr read and write */
+#define SMP_IPI_MAILBOX 0x1000ULL
+#define CORE_STATUS_OFF 0x0
+#define CORE_EN_OFF 0x4
+#define CORE_SET_OFF 0x8
+#define CORE_CLEAR_OFF 0xc
+#define CORE_BUF_20 0x20
+#define CORE_BUF_28 0x28
+#define CORE_BUF_30 0x30
+#define CORE_BUF_38 0x38
+#define IOCSR_IPI_SEND 0x40
+#define IOCSR_MAIL_SEND 0x48
+#define IOCSR_ANY_SEND 0x158
+
+#define MAIL_SEND_ADDR (SMP_IPI_MAILBOX + IOCSR_MAIL_SEND)
+#define MAIL_SEND_OFFSET 0
+#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND)
+
+#define IPI_MBX_NUM 4
+
+#define TYPE_LOONGARCH_IPI "loongarch_ipi"
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchIPI, LOONGARCH_IPI)
+
+typedef struct IPICore {
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ /* 64bit buf divide into 2 32bit buf */
+ uint32_t buf[IPI_MBX_NUM * 2];
+ qemu_irq irq;
+} IPICore;
+
+struct LoongArchIPI {
+ SysBusDevice parent_obj;
+ MemoryRegion ipi_iocsr_mem;
+ MemoryRegion ipi64_iocsr_mem;
+ uint32_t num_cpu;
+ IPICore *cpu;
+};
+
+#endif
diff --git a/include/hw/intc/loongarch_pch_msi.h b/include/hw/intc/loongarch_pch_msi.h
new file mode 100644
index 0000000000..b8586fb3b6
--- /dev/null
+++ b/include/hw/intc/loongarch_pch_msi.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch 7A1000 I/O interrupt controller definitions
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "hw/sysbus.h"
+
+#define TYPE_LOONGARCH_PCH_MSI "loongarch_pch_msi"
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchPCHMSI, LOONGARCH_PCH_MSI)
+
+/* MSI irq start from 32 to 255 */
+#define PCH_MSI_IRQ_START 32
+#define PCH_MSI_IRQ_END 255
+#define PCH_MSI_IRQ_NUM 224
+
+struct LoongArchPCHMSI {
+ SysBusDevice parent_obj;
+ qemu_irq *pch_msi_irq;
+ MemoryRegion msi_mmio;
+ /* irq base passed to upper extioi intc */
+ unsigned int irq_base;
+ unsigned int irq_num;
+};
diff --git a/include/hw/intc/loongarch_pch_pic.h b/include/hw/intc/loongarch_pch_pic.h
new file mode 100644
index 0000000000..d5437e88f2
--- /dev/null
+++ b/include/hw/intc/loongarch_pch_pic.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch 7A1000 I/O interrupt controller definitions
+ *
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "hw/sysbus.h"
+
+#define TYPE_LOONGARCH_PCH_PIC "loongarch_pch_pic"
+#define PCH_PIC_NAME(name) TYPE_LOONGARCH_PCH_PIC#name
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchPCHPIC, LOONGARCH_PCH_PIC)
+
+#define PCH_PIC_INT_ID_VAL 0x7000000UL
+#define PCH_PIC_INT_ID_VER 0x1UL
+
+#define PCH_PIC_INT_ID_LO 0x00
+#define PCH_PIC_INT_ID_HI 0x04
+#define PCH_PIC_INT_MASK_LO 0x20
+#define PCH_PIC_INT_MASK_HI 0x24
+#define PCH_PIC_HTMSI_EN_LO 0x40
+#define PCH_PIC_HTMSI_EN_HI 0x44
+#define PCH_PIC_INT_EDGE_LO 0x60
+#define PCH_PIC_INT_EDGE_HI 0x64
+#define PCH_PIC_INT_CLEAR_LO 0x80
+#define PCH_PIC_INT_CLEAR_HI 0x84
+#define PCH_PIC_AUTO_CTRL0_LO 0xc0
+#define PCH_PIC_AUTO_CTRL0_HI 0xc4
+#define PCH_PIC_AUTO_CTRL1_LO 0xe0
+#define PCH_PIC_AUTO_CTRL1_HI 0xe4
+#define PCH_PIC_ROUTE_ENTRY_OFFSET 0x100
+#define PCH_PIC_ROUTE_ENTRY_END 0x13f
+#define PCH_PIC_HTMSI_VEC_OFFSET 0x200
+#define PCH_PIC_HTMSI_VEC_END 0x23f
+#define PCH_PIC_INT_STATUS_LO 0x3a0
+#define PCH_PIC_INT_STATUS_HI 0x3a4
+#define PCH_PIC_INT_POL_LO 0x3e0
+#define PCH_PIC_INT_POL_HI 0x3e4
+
+#define STATUS_LO_START 0
+#define STATUS_HI_START 0x4
+#define POL_LO_START 0x40
+#define POL_HI_START 0x44
+struct LoongArchPCHPIC {
+ SysBusDevice parent_obj;
+ qemu_irq parent_irq[64];
+ uint64_t int_mask; /*0x020 interrupt mask register*/
+ uint64_t htmsi_en; /*0x040 1=msi*/
+ uint64_t intedge; /*0x060 edge=1 level =0*/
+ uint64_t intclr; /*0x080 for clean edge int,set 1 clean,set 0 is noused*/
+ uint64_t auto_crtl0; /*0x0c0*/
+ uint64_t auto_crtl1; /*0x0e0*/
+ uint64_t last_intirr; /* edge detection */
+ uint64_t intirr; /* 0x380 interrupt request register */
+ uint64_t intisr; /* 0x3a0 interrupt service register */
+ /*
+ * 0x3e0 interrupt level polarity selection
+ * register 0 for high level trigger
+ */
+ uint64_t int_polarity;
+
+ uint8_t route_entry[64]; /*0x100 - 0x138*/
+ uint8_t htmsi_vector[64]; /*0x200 - 0x238*/
+
+ MemoryRegion iomem32_low;
+ MemoryRegion iomem32_high;
+ MemoryRegion iomem8;
+ unsigned int irq_num;
+};
diff --git a/include/hw/intc/loongson_liointc.h b/include/hw/intc/loongson_liointc.h
new file mode 100644
index 0000000000..848e65eb35
--- /dev/null
+++ b/include/hw/intc/loongson_liointc.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2020 Huacai Chen <chenhc@lemote.com>
+ * Copyright (c) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ */
+
+#ifndef LOONGSON_LIOINTC_H
+#define LOONGSON_LIOINTC_H
+
+#include "qemu/units.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_LOONGSON_LIOINTC "loongson.liointc"
+DECLARE_INSTANCE_CHECKER(struct loongson_liointc, LOONGSON_LIOINTC,
+ TYPE_LOONGSON_LIOINTC)
+
+#endif /* LOONGSON_LIOINTC_H */
diff --git a/include/hw/intc/m68k_irqc.h b/include/hw/intc/m68k_irqc.h
new file mode 100644
index 0000000000..693e33b0aa
--- /dev/null
+++ b/include/hw/intc/m68k_irqc.h
@@ -0,0 +1,42 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * QEMU Motorola 680x0 IRQ Controller
+ *
+ * (c) 2020 Laurent Vivier <laurent@vivier.eu>
+ *
+ */
+
+#ifndef M68K_IRQC_H
+#define M68K_IRQC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_M68K_IRQC "m68k-irq-controller"
+#define M68K_IRQC(obj) OBJECT_CHECK(M68KIRQCState, (obj), \
+ TYPE_M68K_IRQC)
+
+#define M68K_IRQC_AUTOVECTOR_BASE 25
+
+enum {
+ M68K_IRQC_LEVEL_1 = 0,
+ M68K_IRQC_LEVEL_2,
+ M68K_IRQC_LEVEL_3,
+ M68K_IRQC_LEVEL_4,
+ M68K_IRQC_LEVEL_5,
+ M68K_IRQC_LEVEL_6,
+ M68K_IRQC_LEVEL_7,
+};
+#define M68K_IRQC_LEVEL_NUM (M68K_IRQC_LEVEL_7 - M68K_IRQC_LEVEL_1 + 1)
+
+typedef struct M68KIRQCState {
+ SysBusDevice parent_obj;
+
+ uint8_t ipr;
+ ArchCPU *cpu;
+
+ /* statistics */
+ uint64_t stats_irq_count[M68K_IRQC_LEVEL_NUM];
+} M68KIRQCState;
+
+#endif
diff --git a/include/hw/intc/mips_gic.h b/include/hw/intc/mips_gic.h
index 902a12b178..5e4c71edd4 100644
--- a/include/hw/intc/mips_gic.h
+++ b/include/hw/intc/mips_gic.h
@@ -13,7 +13,9 @@
#include "qemu/units.h"
#include "hw/timer/mips_gictimer.h"
+#include "hw/sysbus.h"
#include "cpu.h"
+#include "qom/object.h"
/*
* GIC Specific definitions
*/
@@ -169,13 +171,12 @@
#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
#define TYPE_MIPS_GIC "mips-gic"
-#define MIPS_GIC(obj) OBJECT_CHECK(MIPSGICState, (obj), TYPE_MIPS_GIC)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSGICState, MIPS_GIC)
/* Support up to 32 VPs and 256 IRQs */
#define GIC_MAX_VPS 32
#define GIC_MAX_INTRS 256
-typedef struct MIPSGICState MIPSGICState;
typedef struct MIPSGICIRQState MIPSGICIRQState;
typedef struct MIPSGICVPState MIPSGICVPState;
@@ -210,8 +211,8 @@ struct MIPSGICState {
/* GIC VP Timer */
MIPSGICTimerState *gic_timer;
- int32_t num_vps;
- int32_t num_irq;
+ uint32_t num_vps;
+ uint32_t num_irq;
};
#endif /* MIPS_GIC_H */
diff --git a/include/hw/intc/ppc-uic.h b/include/hw/intc/ppc-uic.h
new file mode 100644
index 0000000000..4d82e9a3c6
--- /dev/null
+++ b/include/hw/intc/ppc-uic.h
@@ -0,0 +1,78 @@
+/*
+ * "Universal" Interrupt Controller for PowerPPC 4xx embedded processors
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_INTC_PPC_UIC_H
+#define HW_INTC_PPC_UIC_H
+
+#include "hw/ppc/ppc4xx.h"
+
+#define TYPE_PPC_UIC "ppc-uic"
+OBJECT_DECLARE_SIMPLE_TYPE(PPCUIC, PPC_UIC)
+
+/*
+ * QEMU interface:
+ * QOM property "cpu": link to the PPC CPU
+ * (no default, must be set)
+ * QOM property "dcr-base": base of the bank of DCR registers for the UIC
+ * (default 0x30)
+ * QOM property "use-vectors": true if the UIC has vector registers
+ * (default true)
+ * unnamed GPIO inputs 0..UIC_MAX_IRQ: input IRQ lines
+ * sysbus IRQs:
+ * 0 (PPCUIC_OUTPUT_INT): output INT line to the CPU
+ * 1 (PPCUIC_OUTPUT_CINT): output CINT line to the CPU
+ */
+
+#define UIC_MAX_IRQ 32
+
+/* Symbolic constants for the sysbus IRQ outputs */
+enum {
+ PPCUIC_OUTPUT_INT = 0,
+ PPCUIC_OUTPUT_CINT = 1,
+ PPCUIC_OUTPUT_NB,
+};
+
+struct PPCUIC {
+ /*< private >*/
+ Ppc4xxDcrDeviceState parent_obj;
+
+ /*< public >*/
+ qemu_irq output_int;
+ qemu_irq output_cint;
+
+ /* properties */
+ uint32_t dcr_base;
+ bool use_vectors;
+
+ uint32_t level; /* Remembers the state of level-triggered interrupts. */
+ uint32_t uicsr; /* Status register */
+ uint32_t uicer; /* Enable register */
+ uint32_t uiccr; /* Critical register */
+ uint32_t uicpr; /* Polarity register */
+ uint32_t uictr; /* Triggering register */
+ uint32_t uicvcr; /* Vector configuration register */
+ uint32_t uicvr;
+};
+
+#endif
diff --git a/include/hw/intc/realview_gic.h b/include/hw/intc/realview_gic.h
index 1783ea11b9..f37339dc0b 100644
--- a/include/hw/intc/realview_gic.h
+++ b/include/hw/intc/realview_gic.h
@@ -12,17 +12,17 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gic.h"
+#include "qom/object.h"
#define TYPE_REALVIEW_GIC "realview_gic"
-#define REALVIEW_GIC(obj) \
- OBJECT_CHECK(RealViewGICState, (obj), TYPE_REALVIEW_GIC)
+OBJECT_DECLARE_SIMPLE_TYPE(RealViewGICState, REALVIEW_GIC)
-typedef struct RealViewGICState {
+struct RealViewGICState {
SysBusDevice parent_obj;
MemoryRegion container;
GICState gic;
-} RealViewGICState;
+};
#endif
diff --git a/include/hw/intc/riscv_aclint.h b/include/hw/intc/riscv_aclint.h
new file mode 100644
index 0000000000..693415eb6d
--- /dev/null
+++ b/include/hw/intc/riscv_aclint.h
@@ -0,0 +1,83 @@
+/*
+ * RISC-V ACLINT (Advanced Core Local Interruptor) interface
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017 SiFive, Inc.
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_ACLINT_H
+#define HW_RISCV_ACLINT_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_RISCV_ACLINT_MTIMER "riscv.aclint.mtimer"
+
+#define RISCV_ACLINT_MTIMER(obj) \
+ OBJECT_CHECK(RISCVAclintMTimerState, (obj), TYPE_RISCV_ACLINT_MTIMER)
+
+typedef struct RISCVAclintMTimerState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ uint64_t time_delta;
+ uint64_t *timecmp;
+ QEMUTimer **timers;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t hartid_base;
+ uint32_t num_harts;
+ uint32_t timecmp_base;
+ uint32_t time_base;
+ uint32_t aperture_size;
+ uint32_t timebase_freq;
+ qemu_irq *timer_irqs;
+} RISCVAclintMTimerState;
+
+DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size,
+ uint32_t hartid_base, uint32_t num_harts,
+ uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq,
+ bool provide_rdtime);
+
+#define TYPE_RISCV_ACLINT_SWI "riscv.aclint.swi"
+
+#define RISCV_ACLINT_SWI(obj) \
+ OBJECT_CHECK(RISCVAclintSwiState, (obj), TYPE_RISCV_ACLINT_SWI)
+
+typedef struct RISCVAclintSwiState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t hartid_base;
+ uint32_t num_harts;
+ uint32_t sswi;
+ qemu_irq *soft_irqs;
+} RISCVAclintSwiState;
+
+DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base,
+ uint32_t num_harts, bool sswi);
+
+enum {
+ RISCV_ACLINT_DEFAULT_MTIMECMP = 0x0,
+ RISCV_ACLINT_DEFAULT_MTIME = 0x7ff8,
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE = 0x8000,
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ = 10000000,
+ RISCV_ACLINT_MAX_HARTS = 4095,
+ RISCV_ACLINT_SWI_SIZE = 0x4000
+};
+
+#endif
diff --git a/include/hw/intc/riscv_aplic.h b/include/hw/intc/riscv_aplic.h
new file mode 100644
index 0000000000..de8532fbc3
--- /dev/null
+++ b/include/hw/intc/riscv_aplic.h
@@ -0,0 +1,79 @@
+/*
+ * RISC-V APLIC (Advanced Platform Level Interrupt Controller) interface
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_APLIC_H
+#define HW_RISCV_APLIC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_RISCV_APLIC "riscv.aplic"
+
+typedef struct RISCVAPLICState RISCVAPLICState;
+DECLARE_INSTANCE_CHECKER(RISCVAPLICState, RISCV_APLIC, TYPE_RISCV_APLIC)
+
+#define APLIC_MIN_SIZE 0x4000
+#define APLIC_SIZE_ALIGN(__x) (((__x) + (APLIC_MIN_SIZE - 1)) & \
+ ~(APLIC_MIN_SIZE - 1))
+#define APLIC_SIZE(__num_harts) (APLIC_MIN_SIZE + \
+ APLIC_SIZE_ALIGN(32 * (__num_harts)))
+
+struct RISCVAPLICState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ qemu_irq *external_irqs;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t bitfield_words;
+ uint32_t domaincfg;
+ uint32_t mmsicfgaddr;
+ uint32_t mmsicfgaddrH;
+ uint32_t smsicfgaddr;
+ uint32_t smsicfgaddrH;
+ uint32_t genmsi;
+ uint32_t *sourcecfg;
+ uint32_t *state;
+ uint32_t *target;
+ uint32_t *idelivery;
+ uint32_t *iforce;
+ uint32_t *ithreshold;
+
+ /* topology */
+#define QEMU_APLIC_MAX_CHILDREN 16
+ struct RISCVAPLICState *parent;
+ struct RISCVAPLICState *children[QEMU_APLIC_MAX_CHILDREN];
+ uint16_t num_children;
+
+ /* config */
+ uint32_t aperture_size;
+ uint32_t hartid_base;
+ uint32_t num_harts;
+ uint32_t iprio_mask;
+ uint32_t num_irqs;
+ bool msimode;
+ bool mmode;
+};
+
+void riscv_aplic_add_child(DeviceState *parent, DeviceState *child);
+
+DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
+ uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources,
+ uint32_t iprio_bits, bool msimode, bool mmode, DeviceState *parent);
+
+#endif
diff --git a/include/hw/intc/riscv_imsic.h b/include/hw/intc/riscv_imsic.h
new file mode 100644
index 0000000000..58c2aaa8dc
--- /dev/null
+++ b/include/hw/intc/riscv_imsic.h
@@ -0,0 +1,68 @@
+/*
+ * RISC-V IMSIC (Incoming Message Signal Interrupt Controller) interface
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_IMSIC_H
+#define HW_RISCV_IMSIC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_RISCV_IMSIC "riscv.imsic"
+
+typedef struct RISCVIMSICState RISCVIMSICState;
+DECLARE_INSTANCE_CHECKER(RISCVIMSICState, RISCV_IMSIC, TYPE_RISCV_IMSIC)
+
+#define IMSIC_MMIO_PAGE_SHIFT 12
+#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
+#define IMSIC_MMIO_SIZE(__num_pages) ((__num_pages) * IMSIC_MMIO_PAGE_SZ)
+
+#define IMSIC_MMIO_HART_GUEST_MAX_BTIS 6
+#define IMSIC_MMIO_GROUP_MIN_SHIFT 24
+
+#define IMSIC_HART_NUM_GUESTS(__guest_bits) \
+ (1U << (__guest_bits))
+#define IMSIC_HART_SIZE(__guest_bits) \
+ (IMSIC_HART_NUM_GUESTS(__guest_bits) * IMSIC_MMIO_PAGE_SZ)
+#define IMSIC_GROUP_NUM_HARTS(__hart_bits) \
+ (1U << (__hart_bits))
+#define IMSIC_GROUP_SIZE(__hart_bits, __guest_bits) \
+ (IMSIC_GROUP_NUM_HARTS(__hart_bits) * IMSIC_HART_SIZE(__guest_bits))
+
+struct RISCVIMSICState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ qemu_irq *external_irqs;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t num_eistate;
+ uint32_t *eidelivery;
+ uint32_t *eithreshold;
+ uint32_t *eistate;
+
+ /* config */
+ bool mmode;
+ uint32_t hartid;
+ uint32_t num_pages;
+ uint32_t num_irqs;
+};
+
+DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
+ uint32_t num_pages, uint32_t num_ids);
+
+#endif
diff --git a/include/hw/intc/rx_icu.h b/include/hw/intc/rx_icu.h
new file mode 100644
index 0000000000..b23504f3dd
--- /dev/null
+++ b/include/hw/intc/rx_icu.h
@@ -0,0 +1,76 @@
+/*
+ * RX Interrupt Control Unit
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_INTC_RX_ICU_H
+#define HW_INTC_RX_ICU_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+enum TRG_MODE {
+ TRG_LEVEL = 0,
+ TRG_NEDGE = 1, /* Falling */
+ TRG_PEDGE = 2, /* Raising */
+ TRG_BEDGE = 3, /* Both */
+};
+
+struct IRQSource {
+ enum TRG_MODE sense;
+ int level;
+};
+
+enum {
+ /* Software interrupt request */
+ SWI = 27,
+ NR_IRQS = 256
+};
+
+struct RXICUState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion memory;
+ struct IRQSource src[NR_IRQS];
+ uint32_t nr_irqs;
+ uint8_t *map;
+ uint32_t nr_sense;
+ uint8_t *init_sense;
+
+ uint8_t ir[NR_IRQS];
+ uint8_t dtcer[NR_IRQS];
+ uint8_t ier[NR_IRQS / 8];
+ uint8_t ipr[142];
+ uint8_t dmasr[4];
+ uint16_t fir;
+ uint8_t nmisr;
+ uint8_t nmier;
+ uint8_t nmiclr;
+ uint8_t nmicr;
+ int16_t req_irq;
+ qemu_irq _irq;
+ qemu_irq _fir;
+ qemu_irq _swi;
+};
+
+#define TYPE_RX_ICU "rx-icu"
+OBJECT_DECLARE_SIMPLE_TYPE(RXICUState, RX_ICU)
+
+#endif /* HW_INTC_RX_ICU_H */
diff --git a/include/hw/riscv/sifive_plic.h b/include/hw/intc/sifive_plic.h
index 688cd97f82..d3f45ec248 100644
--- a/include/hw/riscv/sifive_plic.h
+++ b/include/hw/intc/sifive_plic.h
@@ -21,17 +21,18 @@
#ifndef HW_SIFIVE_PLIC_H
#define HW_SIFIVE_PLIC_H
-#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_SIFIVE_PLIC "riscv.sifive.plic"
-#define SIFIVE_PLIC(obj) \
- OBJECT_CHECK(SiFivePLICState, (obj), TYPE_SIFIVE_PLIC)
+typedef struct SiFivePLICState SiFivePLICState;
+DECLARE_INSTANCE_CHECKER(SiFivePLICState, SIFIVE_PLIC,
+ TYPE_SIFIVE_PLIC)
typedef enum PLICMode {
PLICMode_U,
PLICMode_S,
- PLICMode_H,
PLICMode_M
} PLICMode;
@@ -41,14 +42,16 @@ typedef struct PLICAddr {
PLICMode mode;
} PLICAddr;
-typedef struct SiFivePLICState {
+struct SiFivePLICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion mmio;
uint32_t num_addrs;
+ uint32_t num_harts;
uint32_t bitfield_words;
+ uint32_t num_enables;
PLICAddr *addr_config;
uint32_t *source_priority;
uint32_t *target_priority;
@@ -58,6 +61,7 @@ typedef struct SiFivePLICState {
/* config */
char *hart_config;
+ uint32_t hartid_base;
uint32_t num_sources;
uint32_t num_priorities;
uint32_t priority_base;
@@ -67,17 +71,17 @@ typedef struct SiFivePLICState {
uint32_t context_base;
uint32_t context_stride;
uint32_t aperture_size;
-} SiFivePLICState;
-void sifive_plic_raise_irq(SiFivePLICState *plic, uint32_t irq);
-void sifive_plic_lower_irq(SiFivePLICState *plic, uint32_t irq);
+ qemu_irq *m_external_irqs;
+ qemu_irq *s_external_irqs;
+};
DeviceState *sifive_plic_create(hwaddr addr, char *hart_config,
- uint32_t num_sources, uint32_t num_priorities,
- uint32_t priority_base, uint32_t pending_base,
- uint32_t enable_base, uint32_t enable_stride,
- uint32_t context_base, uint32_t context_stride,
- uint32_t aperture_size);
+ uint32_t num_harts,
+ uint32_t hartid_base, uint32_t num_sources,
+ uint32_t num_priorities, uint32_t priority_base,
+ uint32_t pending_base, uint32_t enable_base,
+ uint32_t enable_stride, uint32_t context_base,
+ uint32_t context_stride, uint32_t aperture_size);
#endif
-
diff --git a/include/hw/intc/xlnx-pmu-iomod-intc.h b/include/hw/intc/xlnx-pmu-iomod-intc.h
index 01c9d040b8..ccc8bd272a 100644
--- a/include/hw/intc/xlnx-pmu-iomod-intc.h
+++ b/include/hw/intc/xlnx-pmu-iomod-intc.h
@@ -22,21 +22,21 @@
* THE SOFTWARE.
*/
-#ifndef XLNX_PMU_IO_INTC_H
-#define XLNX_PMU_IO_INTC_H
+#ifndef HW_INTC_XLNX_PMU_IOMOD_INTC_H
+#define HW_INTC_XLNX_PMU_IOMOD_INTC_H
#include "hw/sysbus.h"
#include "hw/register.h"
+#include "qom/object.h"
#define TYPE_XLNX_PMU_IO_INTC "xlnx.pmu_io_intc"
-#define XLNX_PMU_IO_INTC(obj) \
- OBJECT_CHECK(XlnxPMUIOIntc, (obj), TYPE_XLNX_PMU_IO_INTC)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxPMUIOIntc, XLNX_PMU_IO_INTC)
/* This is R_PIT3_CONTROL + 1 */
#define XLNXPMUIOINTC_R_MAX (0x78 + 1)
-typedef struct XlnxPMUIOIntc {
+struct XlnxPMUIOIntc {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -52,6 +52,6 @@ typedef struct XlnxPMUIOIntc {
uint32_t regs[XLNXPMUIOINTC_R_MAX];
RegisterInfo regs_info[XLNXPMUIOINTC_R_MAX];
-} XlnxPMUIOIntc;
+};
-#endif /* XLNX_PMU_IO_INTC_H */
+#endif /* HW_INTC_XLNX_PMU_IOMOD_INTC_H */
diff --git a/include/hw/intc/xlnx-zynqmp-ipi.h b/include/hw/intc/xlnx-zynqmp-ipi.h
index 866c719c6f..33eff1d4f6 100644
--- a/include/hw/intc/xlnx-zynqmp-ipi.h
+++ b/include/hw/intc/xlnx-zynqmp-ipi.h
@@ -27,18 +27,18 @@
#include "hw/sysbus.h"
#include "hw/register.h"
+#include "qom/object.h"
#define TYPE_XLNX_ZYNQMP_IPI "xlnx.zynqmp_ipi"
-#define XLNX_ZYNQMP_IPI(obj) \
- OBJECT_CHECK(XlnxZynqMPIPI, (obj), TYPE_XLNX_ZYNQMP_IPI)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPIPI, XLNX_ZYNQMP_IPI)
/* This is R_IPI_IDR + 1 */
#define R_XLNX_ZYNQMP_IPI_MAX ((0x1c / 4) + 1)
#define NUM_IPIS 11
-typedef struct XlnxZynqMPIPI {
+struct XlnxZynqMPIPI {
/* Private */
SysBusDevice parent_obj;
@@ -51,6 +51,6 @@ typedef struct XlnxZynqMPIPI {
uint32_t regs[R_XLNX_ZYNQMP_IPI_MAX];
RegisterInfo regs_info[R_XLNX_ZYNQMP_IPI_MAX];
-} XlnxZynqMPIPI;
+};
#endif /* XLNX_ZYNQMP_IPI_H */
diff --git a/include/hw/ipack/ipack.h b/include/hw/ipack/ipack.h
index e33e032ced..cbcdda509d 100644
--- a/include/hw/ipack/ipack.h
+++ b/include/hw/ipack/ipack.h
@@ -11,12 +11,12 @@
#ifndef QEMU_IPACK_H
#define QEMU_IPACK_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
-typedef struct IPackBus IPackBus;
#define TYPE_IPACK_BUS "IndustryPack"
-#define IPACK_BUS(obj) OBJECT_CHECK(IPackBus, (obj), TYPE_IPACK_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(IPackBus, IPACK_BUS)
struct IPackBus {
/*< private >*/
@@ -28,16 +28,10 @@ struct IPackBus {
qemu_irq_handler set_irq;
};
-typedef struct IPackDevice IPackDevice;
-typedef struct IPackDeviceClass IPackDeviceClass;
#define TYPE_IPACK_DEVICE "ipack-device"
-#define IPACK_DEVICE(obj) \
- OBJECT_CHECK(IPackDevice, (obj), TYPE_IPACK_DEVICE)
-#define IPACK_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(IPackDeviceClass, (klass), TYPE_IPACK_DEVICE)
-#define IPACK_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IPackDeviceClass, (obj), TYPE_IPACK_DEVICE)
+OBJECT_DECLARE_TYPE(IPackDevice, IPackDeviceClass,
+ IPACK_DEVICE)
struct IPackDeviceClass {
/*< private >*/
@@ -79,9 +73,9 @@ extern const VMStateDescription vmstate_ipack_device;
VMSTATE_STRUCT(_field, _state, 1, vmstate_ipack_device, IPackDevice)
IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot);
-void ipack_bus_new_inplace(IPackBus *bus, size_t bus_size,
- DeviceState *parent,
- const char *name, uint8_t n_slots,
- qemu_irq_handler handler);
+void ipack_bus_init(IPackBus *bus, size_t bus_size,
+ DeviceState *parent,
+ uint8_t n_slots,
+ qemu_irq_handler handler);
#endif
diff --git a/include/hw/ipmi/ipmi.h b/include/hw/ipmi/ipmi.h
index 99661d2bf0..77a7213ed9 100644
--- a/include/hw/ipmi/ipmi.h
+++ b/include/hw/ipmi/ipmi.h
@@ -26,8 +26,8 @@
#define HW_IPMI_H
#include "exec/memory.h"
-#include "qemu-common.h"
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
#define MAX_IPMI_MSG_SIZE 300
@@ -54,8 +54,10 @@ enum ipmi_op {
#define IPMI_CC_INVALID_DATA_FIELD 0xcc
#define IPMI_CC_BMC_INIT_IN_PROGRESS 0xd2
#define IPMI_CC_COMMAND_NOT_SUPPORTED 0xd5
+#define IPMI_CC_UNSPECIFIED 0xff
#define IPMI_NETFN_APP 0x06
+#define IPMI_NETFN_OEM 0x3a
#define IPMI_DEBUG 1
@@ -109,17 +111,21 @@ uint32_t ipmi_next_uuid(void);
#define TYPE_IPMI_INTERFACE "ipmi-interface"
#define IPMI_INTERFACE(obj) \
INTERFACE_CHECK(IPMIInterface, (obj), TYPE_IPMI_INTERFACE)
-#define IPMI_INTERFACE_CLASS(class) \
- OBJECT_CLASS_CHECK(IPMIInterfaceClass, (class), TYPE_IPMI_INTERFACE)
-#define IPMI_INTERFACE_GET_CLASS(class) \
- OBJECT_GET_CLASS(IPMIInterfaceClass, (class), TYPE_IPMI_INTERFACE)
+typedef struct IPMIInterfaceClass IPMIInterfaceClass;
+DECLARE_CLASS_CHECKERS(IPMIInterfaceClass, IPMI_INTERFACE,
+ TYPE_IPMI_INTERFACE)
typedef struct IPMIInterface IPMIInterface;
-typedef struct IPMIInterfaceClass {
+struct IPMIInterfaceClass {
InterfaceClass parent;
- void (*init)(struct IPMIInterface *s, Error **errp);
+ /*
+ * min_size is the requested I/O size and must be a power of 2.
+ * This is so PCI (or other busses) can request a bigger range.
+ * Use 0 for the default.
+ */
+ void (*init)(struct IPMIInterface *s, unsigned int min_size, Error **errp);
/*
* Perform various operations on the hardware. If checkonly is
@@ -164,28 +170,24 @@ typedef struct IPMIInterfaceClass {
* Return the firmware info for a device.
*/
void (*get_fwinfo)(struct IPMIInterface *s, IPMIFwInfo *info);
-} IPMIInterfaceClass;
+};
/*
* Define a BMC simulator (or perhaps a connection to a real BMC)
*/
#define TYPE_IPMI_BMC "ipmi-bmc"
-#define IPMI_BMC(obj) \
- OBJECT_CHECK(IPMIBmc, (obj), TYPE_IPMI_BMC)
-#define IPMI_BMC_CLASS(obj_class) \
- OBJECT_CLASS_CHECK(IPMIBmcClass, (obj_class), TYPE_IPMI_BMC)
-#define IPMI_BMC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IPMIBmcClass, (obj), TYPE_IPMI_BMC)
-
-typedef struct IPMIBmc {
+OBJECT_DECLARE_TYPE(IPMIBmc, IPMIBmcClass,
+ IPMI_BMC)
+
+struct IPMIBmc {
DeviceState parent;
uint8_t slave_addr;
IPMIInterface *intf;
-} IPMIBmc;
+};
-typedef struct IPMIBmcClass {
+struct IPMIBmcClass {
DeviceClass parent;
/* Called when the system resets to report to the bmc. */
@@ -198,7 +200,7 @@ typedef struct IPMIBmcClass {
uint8_t *cmd, unsigned int cmd_len,
unsigned int max_cmd_len,
uint8_t msg_id);
-} IPMIBmcClass;
+};
/*
* Add a link property to obj that points to a BMC.
@@ -261,4 +263,43 @@ int ipmi_bmc_sdr_find(IPMIBmc *b, uint16_t recid,
const struct ipmi_sdr_compact **sdr, uint16_t *nextrec);
void ipmi_bmc_gen_event(IPMIBmc *b, uint8_t *evt, bool log);
+#define TYPE_IPMI_BMC_SIMULATOR "ipmi-bmc-sim"
+OBJECT_DECLARE_SIMPLE_TYPE(IPMIBmcSim, IPMI_BMC_SIMULATOR)
+
+
+typedef struct RspBuffer {
+ uint8_t buffer[MAX_IPMI_MSG_SIZE];
+ unsigned int len;
+} RspBuffer;
+
+static inline void rsp_buffer_set_error(RspBuffer *rsp, uint8_t byte)
+{
+ rsp->buffer[2] = byte;
+}
+
+/* Add a byte to the response. */
+static inline void rsp_buffer_push(RspBuffer *rsp, uint8_t byte)
+{
+ if (rsp->len >= sizeof(rsp->buffer)) {
+ rsp_buffer_set_error(rsp, IPMI_CC_REQUEST_DATA_TRUNCATED);
+ return;
+ }
+ rsp->buffer[rsp->len++] = byte;
+}
+
+typedef struct IPMICmdHandler {
+ void (*cmd_handler)(IPMIBmcSim *s,
+ uint8_t *cmd, unsigned int cmd_len,
+ RspBuffer *rsp);
+ unsigned int cmd_len_min;
+} IPMICmdHandler;
+
+typedef struct IPMINetfn {
+ unsigned int cmd_nums;
+ const IPMICmdHandler *cmd_handlers;
+} IPMINetfn;
+
+int ipmi_sim_register_netfn(IPMIBmcSim *s, unsigned int netfn,
+ const IPMINetfn *netfnd);
+
#endif
diff --git a/include/hw/ipmi/ipmi_bt.h b/include/hw/ipmi/ipmi_bt.h
new file mode 100644
index 0000000000..8a4316ea7c
--- /dev/null
+++ b/include/hw/ipmi/ipmi_bt.h
@@ -0,0 +1,73 @@
+/*
+ * QEMU IPMI BT emulation
+ *
+ * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_IPMI_BT_H
+#define HW_IPMI_BT_H
+
+#include "hw/ipmi/ipmi.h"
+
+typedef struct IPMIBT {
+ IPMIBmc *bmc;
+
+ bool do_wake;
+
+ bool obf_irq_set;
+ bool atn_irq_set;
+ bool irqs_enabled;
+
+ uint8_t outmsg[MAX_IPMI_MSG_SIZE];
+ uint32_t outpos;
+ uint32_t outlen;
+
+ uint8_t inmsg[MAX_IPMI_MSG_SIZE];
+ uint32_t inlen;
+
+ uint8_t control_reg;
+ uint8_t mask_reg;
+
+ /*
+ * This is a response number that we send with the command to make
+ * sure that the response matches the command.
+ */
+ uint8_t waiting_rsp;
+ uint8_t waiting_seq;
+
+ uint32_t io_base;
+ unsigned long io_length;
+ MemoryRegion io;
+ unsigned long size_mask;
+
+ void (*raise_irq)(struct IPMIBT *ib);
+ void (*lower_irq)(struct IPMIBT *ib);
+ void *opaque;
+
+ bool use_irq;
+} IPMIBT;
+
+void ipmi_bt_get_fwinfo(IPMIBT *ik, IPMIFwInfo *info);
+void ipmi_bt_class_init(IPMIInterfaceClass *iic);
+extern const VMStateDescription vmstate_IPMIBT;
+int ipmi_bt_vmstate_post_load(void *opaque, int version);
+
+#endif /* HW_IPMI_BT_H */
diff --git a/include/hw/ipmi/ipmi_kcs.h b/include/hw/ipmi/ipmi_kcs.h
new file mode 100644
index 0000000000..6e6ef4c539
--- /dev/null
+++ b/include/hw/ipmi/ipmi_kcs.h
@@ -0,0 +1,76 @@
+/*
+ * QEMU IPMI KCS emulation
+ *
+ * Copyright (c) 2015,2017 Corey Minyard, MontaVista Software, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_IPMI_KCS_H
+#define HW_IPMI_KCS_H
+
+#include "hw/ipmi/ipmi.h"
+
+typedef struct IPMIKCS {
+ IPMIBmc *bmc;
+
+ bool do_wake;
+
+ bool obf_irq_set;
+ bool atn_irq_set;
+ bool irqs_enabled;
+
+ uint8_t outmsg[MAX_IPMI_MSG_SIZE];
+ uint32_t outpos;
+ uint32_t outlen;
+
+ uint8_t inmsg[MAX_IPMI_MSG_SIZE];
+ uint32_t inlen;
+ bool write_end;
+
+ uint8_t status_reg;
+ uint8_t data_out_reg;
+
+ int16_t data_in_reg; /* -1 means not written */
+ int16_t cmd_reg;
+
+ /*
+ * This is a response number that we send with the command to make
+ * sure that the response matches the command.
+ */
+ uint8_t waiting_rsp;
+
+ uint32_t io_base;
+ unsigned long io_length;
+ MemoryRegion io;
+ unsigned long size_mask;
+
+ void (*raise_irq)(struct IPMIKCS *ik);
+ void (*lower_irq)(struct IPMIKCS *ik);
+ void *opaque;
+
+ bool use_irq;
+} IPMIKCS;
+
+void ipmi_kcs_get_fwinfo(IPMIKCS *ik, IPMIFwInfo *info);
+void ipmi_kcs_class_init(IPMIInterfaceClass *iic);
+extern const VMStateDescription vmstate_IPMIKCS;
+int ipmi_kcs_vmstate_post_load(void *opaque, int version);
+
+#endif /* HW_IPMI_KCS_H */
diff --git a/include/hw/irq.h b/include/hw/irq.h
index 7a40e3ed26..645b73d251 100644
--- a/include/hw/irq.h
+++ b/include/hw/irq.h
@@ -5,10 +5,6 @@
#define TYPE_IRQ "irq"
-typedef struct IRQState *qemu_irq;
-
-typedef void (*qemu_irq_handler)(void *opaque, int n, int level);
-
void qemu_set_irq(qemu_irq irq, int level);
static inline void qemu_irq_raise(qemu_irq irq)
@@ -50,18 +46,26 @@ void qemu_free_irq(qemu_irq irq);
/* Returns a new IRQ with opposite polarity. */
qemu_irq qemu_irq_invert(qemu_irq irq);
-/* Returns a new IRQ which feeds into both the passed IRQs.
- * It's probably better to use the TYPE_SPLIT_IRQ device instead.
- */
-qemu_irq qemu_irq_split(qemu_irq irq1, qemu_irq irq2);
-
-/* Returns a new IRQ set which connects 1:1 to another IRQ set, which
- * may be set later.
- */
-qemu_irq *qemu_irq_proxy(qemu_irq **target, int n);
-
/* For internal use in qtest. Similar to qemu_irq_split, but operating
on an existing vector of qemu_irq. */
void qemu_irq_intercept_in(qemu_irq *gpio_in, qemu_irq_handler handler, int n);
+/**
+ * qemu_irq_is_connected: Return true if IRQ line is wired up
+ *
+ * If a qemu_irq has a device on the other (receiving) end of it,
+ * return true; otherwise return false.
+ *
+ * Usually device models don't need to care whether the machine model
+ * has wired up their outbound qemu_irq lines, because functions like
+ * qemu_set_irq() silently do nothing if there is nothing on the other
+ * end of the line. However occasionally a device model will want to
+ * provide default behaviour if its output is left floating, and
+ * it can use this function to identify when that is the case.
+ */
+static inline bool qemu_irq_is_connected(qemu_irq irq)
+{
+ return irq != NULL;
+}
+
#endif
diff --git a/include/hw/isa/apm.h b/include/hw/isa/apm.h
index b7098bf7ca..b6e070c00e 100644
--- a/include/hw/isa/apm.h
+++ b/include/hw/isa/apm.h
@@ -1,8 +1,6 @@
#ifndef APM_H
#define APM_H
-#include "qemu-common.h"
-#include "hw/hw.h"
#include "exec/memory.h"
#define APM_CNT_IOPORT 0xb2
diff --git a/include/hw/isa/i8259_internal.h b/include/hw/isa/i8259_internal.h
index f742c2a726..f9dcc4163e 100644
--- a/include/hw/isa/i8259_internal.h
+++ b/include/hw/isa/i8259_internal.h
@@ -25,28 +25,21 @@
#ifndef QEMU_I8259_INTERNAL_H
#define QEMU_I8259_INTERNAL_H
-#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
#include "hw/intc/intc.h"
+#include "hw/intc/i8259.h"
+#include "qom/object.h"
-typedef struct PICCommonState PICCommonState;
#define TYPE_PIC_COMMON "pic-common"
-#define PIC_COMMON(obj) \
- OBJECT_CHECK(PICCommonState, (obj), TYPE_PIC_COMMON)
-#define PIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(PICCommonClass, (klass), TYPE_PIC_COMMON)
-#define PIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PICCommonClass, (obj), TYPE_PIC_COMMON)
+OBJECT_DECLARE_TYPE(PICCommonState, PICCommonClass, PIC_COMMON)
-typedef struct PICCommonClass
-{
- ISADeviceClass parent_class;
+struct PICCommonClass {
+ DeviceClass parent_class;
void (*pre_save)(PICCommonState *s);
void (*post_load)(PICCommonState *s);
-} PICCommonClass;
+};
struct PICCommonState {
ISADevice parent_obj;
@@ -68,6 +61,7 @@ struct PICCommonState {
uint8_t single_mode; /* true if slave pic is not initialized */
uint8_t elcr; /* PIIX edge/trigger selection*/
uint8_t elcr_mask;
+ uint8_t ltim; /* Edge/Level Bank Select (pre-PIIX, chip-wide) */
qemu_irq int_out[1];
uint32_t master; /* reflects /SP input pin */
uint32_t iobase;
@@ -79,8 +73,5 @@ struct PICCommonState {
void pic_reset_common(PICCommonState *s);
ISADevice *i8259_init_chip(const char *name, ISABus *bus, bool master);
void pic_stat_update_irq(int irq, int level);
-bool pic_get_statistics(InterruptStatsProvider *obj,
- uint64_t **irq_counts, unsigned int *nb_irqs);
-void pic_print_info(InterruptStatsProvider *obj, Monitor *mon);
#endif /* QEMU_I8259_INTERNAL_H */
diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h
index e62ac91c19..40d6224a4e 100644
--- a/include/hw/isa/isa.h
+++ b/include/hw/isa/isa.h
@@ -5,41 +5,22 @@
#include "exec/memory.h"
#include "exec/ioport.h"
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
#define ISA_NUM_IRQS 16
#define TYPE_ISA_DEVICE "isa-device"
-#define ISA_DEVICE(obj) \
- OBJECT_CHECK(ISADevice, (obj), TYPE_ISA_DEVICE)
-#define ISA_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(ISADeviceClass, (klass), TYPE_ISA_DEVICE)
-#define ISA_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ISADeviceClass, (obj), TYPE_ISA_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(ISADevice, ISA_DEVICE)
#define TYPE_ISA_BUS "ISA"
-#define ISA_BUS(obj) OBJECT_CHECK(ISABus, (obj), TYPE_ISA_BUS)
-
-#define TYPE_APPLE_SMC "isa-applesmc"
-#define APPLESMC_MAX_DATA_LENGTH 32
-#define APPLESMC_PROP_IO_BASE "iobase"
-
-static inline uint16_t applesmc_port(void)
-{
- Object *obj = object_resolve_path_type("", TYPE_APPLE_SMC, NULL);
-
- if (obj) {
- return object_property_get_uint(obj, APPLESMC_PROP_IO_BASE, NULL);
- }
- return 0;
-}
+OBJECT_DECLARE_SIMPLE_TYPE(ISABus, ISA_BUS)
#define TYPE_ISADMA "isa-dma"
-#define ISADMA_CLASS(klass) \
- OBJECT_CLASS_CHECK(IsaDmaClass, (klass), TYPE_ISADMA)
-#define ISADMA_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IsaDmaClass, (obj), TYPE_ISADMA)
+typedef struct IsaDmaClass IsaDmaClass;
+DECLARE_CLASS_CHECKERS(IsaDmaClass, ISADMA,
+ TYPE_ISADMA)
#define ISADMA(obj) \
INTERFACE_CHECK(IsaDma, (obj), TYPE_ISADMA)
@@ -53,10 +34,9 @@ typedef enum {
typedef int (*IsaDmaTransferHandler)(void *opaque, int nchan, int pos,
int size);
-typedef struct IsaDmaClass {
+struct IsaDmaClass {
InterfaceClass parent;
- IsaDmaTransferMode (*get_transfer_mode)(IsaDma *obj, int nchan);
bool (*has_autoinitialization)(IsaDma *obj, int nchan);
int (*read_memory)(IsaDma *obj, int nchan, void *buf, int pos, int len);
int (*write_memory)(IsaDma *obj, int nchan, void *buf, int pos, int len);
@@ -66,11 +46,7 @@ typedef struct IsaDmaClass {
void (*register_channel)(IsaDma *obj, int nchan,
IsaDmaTransferHandler transfer_handler,
void *opaque);
-} IsaDmaClass;
-
-typedef struct ISADeviceClass {
- DeviceClass parent_class;
-} ISADeviceClass;
+};
struct ISABus {
/*< private >*/
@@ -79,7 +55,7 @@ struct ISABus {
MemoryRegion *address_space;
MemoryRegion *address_space_io;
- qemu_irq *irqs;
+ qemu_irq *irqs_in;
IsaDma *dma[2];
};
@@ -88,27 +64,35 @@ struct ISADevice {
DeviceState parent_obj;
/*< public >*/
- uint32_t isairq[2];
- int nirqs;
int ioport_id;
};
ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space,
MemoryRegion *address_space_io, Error **errp);
-void isa_bus_irqs(ISABus *bus, qemu_irq *irqs);
-qemu_irq isa_get_irq(ISADevice *dev, int isairq);
-void isa_init_irq(ISADevice *dev, qemu_irq *p, int isairq);
-void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, int isairq);
+void isa_bus_register_input_irqs(ISABus *bus, qemu_irq *irqs_in);
void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16);
-IsaDma *isa_get_dma(ISABus *bus, int nchan);
-MemoryRegion *isa_address_space(ISADevice *dev);
-MemoryRegion *isa_address_space_io(ISADevice *dev);
-ISADevice *isa_create(ISABus *bus, const char *name);
-ISADevice *isa_try_create(ISABus *bus, const char *name);
+IsaDma *isa_bus_get_dma(ISABus *bus, int nchan);
+/**
+ * isa_bus_get_irq: Return input IRQ on ISA bus.
+ * @bus: the #ISABus to plug ISA devices on.
+ * @irqnum: the ISA IRQ number.
+ *
+ * Return IRQ @irqnum from the PIC associated on ISA @bus.
+ */
+qemu_irq isa_bus_get_irq(ISABus *bus, unsigned irqnum);
+ISADevice *isa_new(const char *name);
+ISADevice *isa_try_new(const char *name);
+bool isa_realize_and_unref(ISADevice *dev, ISABus *bus, Error **errp);
ISADevice *isa_create_simple(ISABus *bus, const char *name);
ISADevice *isa_vga_init(ISABus *bus);
+qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq);
+void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq);
+MemoryRegion *isa_address_space(ISADevice *dev);
+MemoryRegion *isa_address_space_io(ISADevice *dev);
+ISABus *isa_bus_from_device(ISADevice *dev);
+
/**
* isa_register_ioport: Install an I/O port region on the ISA bus.
*
@@ -135,16 +119,14 @@ void isa_register_ioport(ISADevice *dev, MemoryRegion *io, uint16_t start);
* @portio: the ports, sorted by offset.
* @opaque: passed into the portio callbacks.
* @name: passed into memory_region_init_io.
+ *
+ * Returns: 0 on success, negative error code otherwise (e.g. if the
+ * ISA bus is not available)
*/
-void isa_register_portio_list(ISADevice *dev,
- PortioList *piolist,
- uint16_t start,
- const MemoryRegionPortio *portio,
- void *opaque, const char *name);
-
-static inline ISABus *isa_bus_from_device(ISADevice *d)
-{
- return ISA_BUS(qdev_get_parent_bus(DEVICE(d)));
-}
+int isa_register_portio_list(ISADevice *dev,
+ PortioList *piolist,
+ uint16_t start,
+ const MemoryRegionPortio *portio,
+ void *opaque, const char *name);
#endif
diff --git a/include/hw/isa/pc87312.h b/include/hw/isa/pc87312.h
index e16263d4b1..edaf723f4d 100644
--- a/include/hw/isa/pc87312.h
+++ b/include/hw/isa/pc87312.h
@@ -26,12 +26,13 @@
#define QEMU_PC87312_H
#include "hw/isa/superio.h"
+#include "qom/object.h"
-#define TYPE_PC87312_SUPERIO "pc87312"
-#define PC87312(obj) OBJECT_CHECK(PC87312State, (obj), TYPE_PC87312_SUPERIO)
+#define TYPE_PC87312 "pc87312"
+OBJECT_DECLARE_SIMPLE_TYPE(PC87312State, PC87312)
-typedef struct PC87312State {
+struct PC87312State {
/*< private >*/
ISASuperIODevice parent_dev;
/*< public >*/
@@ -49,7 +50,7 @@ typedef struct PC87312State {
uint8_t selected_index;
uint8_t regs[3];
-} PC87312State;
+};
#endif
diff --git a/include/hw/isa/superio.h b/include/hw/isa/superio.h
index 345f006081..0dc45104d4 100644
--- a/include/hw/isa/superio.h
+++ b/include/hw/isa/superio.h
@@ -3,28 +3,26 @@
*
* Copyright (c) 2018 Philippe Mathieu-Daudé
*
- * This code is licensed under the GNU GPLv2 and later.
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef HW_ISA_SUPERIO_H
#define HW_ISA_SUPERIO_H
-#include "qemu-common.h"
#include "sysemu/sysemu.h"
#include "hw/isa/isa.h"
+#include "qom/object.h"
#define TYPE_ISA_SUPERIO "isa-superio"
-#define ISA_SUPERIO(obj) \
- OBJECT_CHECK(ISASuperIODevice, (obj), TYPE_ISA_SUPERIO)
-#define ISA_SUPERIO_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ISASuperIOClass, (obj), TYPE_ISA_SUPERIO)
-#define ISA_SUPERIO_CLASS(klass) \
- OBJECT_CLASS_CHECK(ISASuperIOClass, (klass), TYPE_ISA_SUPERIO)
+typedef struct ISASuperIOClass ISASuperIOClass;
+typedef struct ISASuperIODevice ISASuperIODevice;
+DECLARE_OBJ_CHECKERS(ISASuperIODevice, ISASuperIOClass,
+ ISA_SUPERIO, TYPE_ISA_SUPERIO)
#define SUPERIO_MAX_SERIAL_PORTS 4
-typedef struct ISASuperIODevice {
+struct ISASuperIODevice {
/*< private >*/
ISADevice parent_obj;
/*< public >*/
@@ -34,7 +32,7 @@ typedef struct ISASuperIODevice {
ISADevice *floppy;
ISADevice *kbc;
ISADevice *ide;
-} ISASuperIODevice;
+};
typedef struct ISASuperIOFuncs {
size_t count;
@@ -44,9 +42,9 @@ typedef struct ISASuperIOFuncs {
unsigned int (*get_dma)(ISASuperIODevice *sio, uint8_t index);
} ISASuperIOFuncs;
-typedef struct ISASuperIOClass {
+struct ISASuperIOClass {
/*< private >*/
- ISADeviceClass parent_class;
+ DeviceClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
@@ -54,7 +52,7 @@ typedef struct ISASuperIOClass {
ISASuperIOFuncs serial;
ISASuperIOFuncs floppy;
ISASuperIOFuncs ide;
-} ISASuperIOClass;
+};
#define TYPE_FDC37M81X_SUPERIO "fdc37m81x-superio"
#define TYPE_SMC37C669_SUPERIO "smc37c669-superio"
diff --git a/include/hw/isa/vt82c686.h b/include/hw/isa/vt82c686.h
index c3c2b6e786..da1722daf2 100644
--- a/include/hw/isa/vt82c686.h
+++ b/include/hw/isa/vt82c686.h
@@ -1,13 +1,39 @@
#ifndef HW_VT82C686_H
#define HW_VT82C686_H
-#define TYPE_VT82C686B_SUPERIO "vt82c686b-superio"
-
-/* vt82c686.c */
-ISABus *vt82c686b_isa_init(PCIBus * bus, int devfn);
-void vt82c686b_ac97_init(PCIBus *bus, int devfn);
-void vt82c686b_mc97_init(PCIBus *bus, int devfn);
-I2CBus *vt82c686b_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
- qemu_irq sci_irq);
+#include "hw/pci/pci_device.h"
+#include "audio/audio.h"
+
+#define TYPE_VT82C686B_ISA "vt82c686b-isa"
+#define TYPE_VT82C686B_USB_UHCI "vt82c686b-usb-uhci"
+#define TYPE_VT8231_ISA "vt8231-isa"
+#define TYPE_VIA_AC97 "via-ac97"
+#define TYPE_VIA_IDE "via-ide"
+#define TYPE_VIA_MC97 "via-mc97"
+
+typedef struct {
+ uint8_t stat;
+ uint8_t type;
+ uint32_t base;
+ uint32_t curr;
+ uint32_t addr;
+ uint32_t clen;
+} ViaAC97SGDChannel;
+
+OBJECT_DECLARE_SIMPLE_TYPE(ViaAC97State, VIA_AC97);
+
+struct ViaAC97State {
+ PCIDevice dev;
+ QEMUSoundCard card;
+ MemoryRegion sgd;
+ MemoryRegion fm;
+ MemoryRegion midi;
+ SWVoiceOut *vo;
+ ViaAC97SGDChannel aur;
+ uint16_t codec_regs[128];
+ uint32_t ac97_cmd;
+};
+
+void via_isa_set_irq(PCIDevice *d, int n, int level);
#endif
diff --git a/include/hw/kvm/clock.h b/include/hw/kvm/clock.h
deleted file mode 100644
index 252ea13461..0000000000
--- a/include/hw/kvm/clock.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * QEMU KVM support, paravirtual clock device
- *
- * Copyright (C) 2011 Siemens AG
- *
- * Authors:
- * Jan Kiszka <jan.kiszka@siemens.com>
- *
- * This work is licensed under the terms of the GNU GPL version 2.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifdef CONFIG_KVM
-
-void kvmclock_create(void);
-
-#else /* CONFIG_KVM */
-
-static inline void kvmclock_create(void)
-{
-}
-
-#endif /* !CONFIG_KVM */
diff --git a/include/hw/lm32/lm32_pic.h b/include/hw/lm32/lm32_pic.h
deleted file mode 100644
index e6479b8f63..0000000000
--- a/include/hw/lm32/lm32_pic.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef QEMU_HW_LM32_PIC_H
-#define QEMU_HW_LM32_PIC_H
-
-#include "qemu-common.h"
-
-uint32_t lm32_pic_get_ip(DeviceState *d);
-uint32_t lm32_pic_get_im(DeviceState *d);
-void lm32_pic_set_ip(DeviceState *d, uint32_t ip);
-void lm32_pic_set_im(DeviceState *d, uint32_t im);
-
-#endif /* QEMU_HW_LM32_PIC_H */
diff --git a/include/hw/loader-fit.h b/include/hw/loader-fit.h
index 0284c3e02c..0832e379dc 100644
--- a/include/hw/loader-fit.h
+++ b/include/hw/loader-fit.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/hw/loader.h b/include/hw/loader.h
index de8a29603b..8685e27334 100644
--- a/include/hw/loader.h
+++ b/include/hw/loader.h
@@ -40,8 +40,8 @@ ssize_t load_image_size(const char *filename, void *addr, size_t size);
*
* Returns the size of the loaded image on success, -1 otherwise.
*/
-int load_image_targphys_as(const char *filename,
- hwaddr addr, uint64_t max_sz, AddressSpace *as);
+ssize_t load_image_targphys_as(const char *filename,
+ hwaddr addr, uint64_t max_sz, AddressSpace *as);
/**load_targphys_hex_as:
* @filename: Path to the .hex file
@@ -53,14 +53,15 @@ int load_image_targphys_as(const char *filename,
*
* Returns the size of the loaded .hex file on success, -1 otherwise.
*/
-int load_targphys_hex_as(const char *filename, hwaddr *entry, AddressSpace *as);
+ssize_t load_targphys_hex_as(const char *filename, hwaddr *entry,
+ AddressSpace *as);
/** load_image_targphys:
* Same as load_image_targphys_as(), but doesn't allow the caller to specify
* an AddressSpace.
*/
-int load_image_targphys(const char *filename, hwaddr,
- uint64_t max_sz);
+ssize_t load_image_targphys(const char *filename, hwaddr,
+ uint64_t max_sz);
/**
* load_image_mr: load an image into a memory region
@@ -73,7 +74,7 @@ int load_image_targphys(const char *filename, hwaddr,
* If the file is larger than the memory region's size the call will fail.
* Returns -1 on failure, or the size of the file.
*/
-int load_image_mr(const char *filename, MemoryRegion *mr);
+ssize_t load_image_mr(const char *filename, MemoryRegion *mr);
/* This is the limit on the maximum uncompressed image size that
* load_image_gzipped_buffer() and load_image_gzipped() will read. It prevents
@@ -81,23 +82,46 @@ int load_image_mr(const char *filename, MemoryRegion *mr);
*/
#define LOAD_IMAGE_MAX_GUNZIP_BYTES (256 << 20)
-int load_image_gzipped_buffer(const char *filename, uint64_t max_sz,
- uint8_t **buffer);
-int load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz);
+ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz,
+ uint8_t **buffer);
+ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz);
+
+/**
+ * unpack_efi_zboot_image:
+ * @buffer: pointer to a variable holding the address of a buffer containing the
+ * image
+ * @size: pointer to a variable holding the size of the buffer
+ *
+ * Check whether the buffer contains a EFI zboot image, and if it does, extract
+ * the compressed payload and decompress it into a new buffer. If successful,
+ * the old buffer is freed, and the *buffer and size variables pointed to by the
+ * function arguments are updated to refer to the newly populated buffer.
+ *
+ * Returns 0 if the image could not be identified as a EFI zboot image.
+ * Returns -1 if the buffer contents were identified as a EFI zboot image, but
+ * unpacking failed for any reason.
+ * Returns the size of the decompressed payload if decompression was performed
+ * successfully.
+ */
+ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size);
#define ELF_LOAD_FAILED -1
#define ELF_LOAD_NOT_ELF -2
#define ELF_LOAD_WRONG_ARCH -3
#define ELF_LOAD_WRONG_ENDIAN -4
-const char *load_elf_strerror(int error);
+#define ELF_LOAD_TOO_BIG -5
+const char *load_elf_strerror(ssize_t error);
/** load_elf_ram_sym:
* @filename: Path of ELF file
+ * @elf_note_fn: optional function to parse ELF Note type
+ * passed via @translate_opaque
* @translate_fn: optional function to translate load addresses
* @translate_opaque: opaque data passed to @translate_fn
* @pentry: Populated with program entry point. Ignored if NULL.
* @lowaddr: Populated with lowest loaded address. Ignored if NULL.
* @highaddr: Populated with highest loaded address. Ignored if NULL.
+ * @pflags: Populated with ELF processor-specific flags. Ignore if NULL.
* @bigendian: Expected ELF endianness. 0 for LE otherwise BE
* @elf_machine: Expected ELF machine type
* @clear_lsb: Set to mask off LSB of addresses (Some architectures use
@@ -124,41 +148,48 @@ const char *load_elf_strerror(int error);
typedef void (*symbol_fn_t)(const char *st_name, int st_info,
uint64_t st_value, uint64_t st_size);
-int load_elf_ram_sym(const char *filename,
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry,
- uint64_t *lowaddr, uint64_t *highaddr, int big_endian,
- int elf_machine, int clear_lsb, int data_swab,
- AddressSpace *as, bool load_rom, symbol_fn_t sym_cb);
+ssize_t load_elf_ram_sym(const char *filename,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry,
+ uint64_t *lowaddr, uint64_t *highaddr,
+ uint32_t *pflags, int big_endian, int elf_machine,
+ int clear_lsb, int data_swab,
+ AddressSpace *as, bool load_rom, symbol_fn_t sym_cb);
/** load_elf_ram:
* Same as load_elf_ram_sym(), but doesn't allow the caller to specify a
* symbol callback function
*/
-int load_elf_ram(const char *filename,
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, int big_endian, int elf_machine,
- int clear_lsb, int data_swab, AddressSpace *as,
- bool load_rom);
+ssize_t load_elf_ram(const char *filename,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry,
+ uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags,
+ int big_endian, int elf_machine, int clear_lsb,
+ int data_swab, AddressSpace *as, bool load_rom);
/** load_elf_as:
* Same as load_elf_ram(), but always loads the elf as ROM
*/
-int load_elf_as(const char *filename,
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, int big_endian, int elf_machine,
- int clear_lsb, int data_swab, AddressSpace *as);
+ssize_t load_elf_as(const char *filename,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
+ uint64_t *highaddr, uint32_t *pflags, int big_endian,
+ int elf_machine, int clear_lsb, int data_swab,
+ AddressSpace *as);
/** load_elf:
* Same as load_elf_as(), but doesn't allow the caller to specify an
* AddressSpace.
*/
-int load_elf(const char *filename, uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, int big_endian, int elf_machine,
- int clear_lsb, int data_swab);
+ssize_t load_elf(const char *filename,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
+ uint64_t *highaddr, uint32_t *pflags, int big_endian,
+ int elf_machine, int clear_lsb, int data_swab);
/** load_elf_hdr:
* @filename: Path of ELF file
@@ -172,8 +203,8 @@ int load_elf(const char *filename, uint64_t (*translate_fn)(void *, uint64_t),
*/
void load_elf_hdr(const char *filename, void *hdr, bool *is64, Error **errp);
-int load_aout(const char *filename, hwaddr addr, int max_sz,
- int bswap_needed, hwaddr target_page_size);
+ssize_t load_aout(const char *filename, hwaddr addr, int max_sz,
+ int bswap_needed, hwaddr target_page_size);
#define LOAD_UIMAGE_LOADADDR_INVALID (-1)
@@ -194,19 +225,19 @@ int load_aout(const char *filename, hwaddr addr, int max_sz,
*
* Returns the size of the loaded image on success, -1 otherwise.
*/
-int load_uimage_as(const char *filename, hwaddr *ep,
- hwaddr *loadaddr, int *is_linux,
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, AddressSpace *as);
+ssize_t load_uimage_as(const char *filename, hwaddr *ep,
+ hwaddr *loadaddr, int *is_linux,
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, AddressSpace *as);
/** load_uimage:
* Same as load_uimage_as(), but doesn't allow the caller to specify an
* AddressSpace.
*/
-int load_uimage(const char *filename, hwaddr *ep,
- hwaddr *loadaddr, int *is_linux,
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque);
+ssize_t load_uimage(const char *filename, hwaddr *ep,
+ hwaddr *loadaddr, int *is_linux,
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque);
/**
* load_ramdisk_as:
@@ -221,15 +252,15 @@ int load_uimage(const char *filename, hwaddr *ep,
*
* Returns the size of the loaded image on success, -1 otherwise.
*/
-int load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz,
- AddressSpace *as);
+ssize_t load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz,
+ AddressSpace *as);
/**
* load_ramdisk:
* Same as load_ramdisk_as(), but doesn't allow the caller to specify
* an AddressSpace.
*/
-int load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz);
+ssize_t load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz);
ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen);
@@ -239,20 +270,18 @@ void pstrcpy_targphys(const char *name,
hwaddr dest, int buf_size,
const char *source);
-extern bool option_rom_has_mr;
-extern bool rom_file_has_mr;
-
-int rom_add_file(const char *file, const char *fw_dir,
- hwaddr addr, int32_t bootindex,
- bool option_rom, MemoryRegion *mr, AddressSpace *as);
+ssize_t rom_add_file(const char *file, const char *fw_dir,
+ hwaddr addr, int32_t bootindex,
+ bool has_option_rom, MemoryRegion *mr, AddressSpace *as);
MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
size_t max_len, hwaddr addr,
const char *fw_file_name,
FWCfgCallback fw_callback,
void *callback_opaque, AddressSpace *as,
bool read_only);
-int rom_add_elf_program(const char *name, void *data, size_t datasize,
- size_t romsize, hwaddr addr, AddressSpace *as);
+int rom_add_elf_program(const char *name, GMappedFile *mapped_file, void *data,
+ size_t datasize, size_t romsize, hwaddr addr,
+ AddressSpace *as);
int rom_check_and_register_reset(void);
void rom_set_fw(FWCfgState *f);
void rom_set_order_override(int order);
@@ -278,6 +307,37 @@ void rom_transaction_end(bool commit);
int rom_copy(uint8_t *dest, hwaddr addr, size_t size);
void *rom_ptr(hwaddr addr, size_t size);
+/**
+ * rom_ptr_for_as: Return a pointer to ROM blob data for the address
+ * @as: AddressSpace to look for the ROM blob in
+ * @addr: Address within @as
+ * @size: size of data required in bytes
+ *
+ * Returns: pointer into the data which backs the matching ROM blob,
+ * or NULL if no blob covers the address range.
+ *
+ * This function looks for a ROM blob which covers the specified range
+ * of bytes of length @size starting at @addr within the address space
+ * @as. This is useful for code which runs as part of board
+ * initialization or CPU reset which wants to read data that is part
+ * of a user-supplied guest image or other guest memory contents, but
+ * which runs before the ROM loader's reset function has copied the
+ * blobs into guest memory.
+ *
+ * rom_ptr_for_as() will look not just for blobs loaded directly to
+ * the specified address, but also for blobs which were loaded to an
+ * alias of the region at a different location in the AddressSpace.
+ * In other words, if a machine model has RAM at address 0x0000_0000
+ * which is aliased to also appear at 0x1000_0000, rom_ptr_for_as()
+ * will return the correct data whether the guest image was linked and
+ * loaded at 0x0000_0000 or 0x1000_0000. Contrast rom_ptr(), which
+ * will only return data if the image load address is an exact match
+ * with the queried address.
+ *
+ * New code should prefer to use rom_ptr_for_as() instead of
+ * rom_ptr().
+ */
+void *rom_ptr_for_as(AddressSpace *as, hwaddr addr, size_t size);
void hmp_info_roms(Monitor *mon, const QDict *qdict);
#define rom_add_file_fixed(_f, _a, _i) \
@@ -293,17 +353,25 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict);
#define rom_add_blob_fixed_as(_f, _b, _l, _a, _as) \
rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as, true)
-#define PC_ROM_MIN_VGA 0xc0000
-#define PC_ROM_MIN_OPTION 0xc8000
-#define PC_ROM_MAX 0xe0000
-#define PC_ROM_ALIGN 0x800
-#define PC_ROM_SIZE (PC_ROM_MAX - PC_ROM_MIN_VGA)
-
-int rom_add_vga(const char *file);
-int rom_add_option(const char *file, int32_t bootindex);
+ssize_t rom_add_vga(const char *file);
+ssize_t rom_add_option(const char *file, int32_t bootindex);
/* This is the usual maximum in uboot, so if a uImage overflows this, it would
* overflow on real hardware too. */
#define UBOOT_MAX_GUNZIP_BYTES (64 << 20)
+typedef struct RomGap {
+ hwaddr base;
+ size_t size;
+} RomGap;
+
+/**
+ * rom_find_largest_gap_between: return largest gap between ROMs in given range
+ *
+ * Given a range of addresses, this function finds the largest
+ * contiguous subrange which has no ROMs loaded to it. That is,
+ * it finds the biggest gap which is free for use for other things.
+ */
+RomGap rom_find_largest_gap_between(hwaddr base, size_t size);
+
#endif
diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h
new file mode 100644
index 0000000000..252f7df7f4
--- /dev/null
+++ b/include/hw/loongarch/virt.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for loongarch board emulation.
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LOONGARCH_H
+#define HW_LOONGARCH_H
+
+#include "target/loongarch/cpu.h"
+#include "hw/boards.h"
+#include "qemu/queue.h"
+#include "hw/intc/loongarch_ipi.h"
+#include "hw/block/flash.h"
+
+#define LOONGARCH_MAX_CPUS 256
+
+#define VIRT_FWCFG_BASE 0x1e020000UL
+#define VIRT_BIOS_BASE 0x1c000000UL
+#define VIRT_BIOS_SIZE (16 * MiB)
+#define VIRT_FLASH_SECTOR_SIZE (128 * KiB)
+#define VIRT_FLASH0_BASE VIRT_BIOS_BASE
+#define VIRT_FLASH0_SIZE VIRT_BIOS_SIZE
+#define VIRT_FLASH1_BASE 0x1d000000UL
+#define VIRT_FLASH1_SIZE (16 * MiB)
+
+#define VIRT_LOWMEM_BASE 0
+#define VIRT_LOWMEM_SIZE 0x10000000
+#define VIRT_HIGHMEM_BASE 0x80000000
+#define VIRT_GED_EVT_ADDR 0x100e0000
+#define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN)
+#define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN)
+
+struct LoongArchMachineState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ MemoryRegion lowmem;
+ MemoryRegion highmem;
+ MemoryRegion bios;
+ bool bios_loaded;
+ /* State for other subsystems/APIs: */
+ FWCfgState *fw_cfg;
+ Notifier machine_done;
+ Notifier powerdown_notifier;
+ OnOffAuto acpi;
+ char *oem_id;
+ char *oem_table_id;
+ DeviceState *acpi_ged;
+ int fdt_size;
+ DeviceState *platform_bus_dev;
+ PCIBus *pci_bus;
+ PFlashCFI01 *flash[2];
+ MemoryRegion system_iocsr;
+ MemoryRegion iocsr_mem;
+ AddressSpace as_iocsr;
+};
+
+#define TYPE_LOONGARCH_MACHINE MACHINE_TYPE_NAME("virt")
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchMachineState, LOONGARCH_MACHINE)
+bool loongarch_is_acpi_enabled(LoongArchMachineState *lams);
+void loongarch_acpi_setup(LoongArchMachineState *lams);
+#endif
diff --git a/include/hw/m68k/mcf.h b/include/hw/m68k/mcf.h
index 0db49c5e60..5d9f876ffe 100644
--- a/include/hw/m68k/mcf.h
+++ b/include/hw/m68k/mcf.h
@@ -2,6 +2,7 @@
#define HW_MCF_H
/* Motorola ColdFire device prototypes. */
+#include "exec/hwaddr.h"
#include "target/m68k/cpu-qom.h"
/* mcf_uart.c */
@@ -9,8 +10,8 @@ uint64_t mcf_uart_read(void *opaque, hwaddr addr,
unsigned size);
void mcf_uart_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size);
-void *mcf_uart_init(qemu_irq irq, Chardev *chr);
-void mcf_uart_mm_init(hwaddr base, qemu_irq irq, Chardev *chr);
+DeviceState *mcf_uart_create(qemu_irq irq, Chardev *chr);
+DeviceState *mcf_uart_create_mmap(hwaddr base, qemu_irq irq, Chardev *chr);
/* mcf_intc.c */
qemu_irq *mcf_intc_init(struct MemoryRegion *sysmem,
@@ -18,7 +19,6 @@ qemu_irq *mcf_intc_init(struct MemoryRegion *sysmem,
M68kCPU *cpu);
/* mcf5206.c */
-qemu_irq *mcf5206_init(struct MemoryRegion *sysmem,
- uint32_t base, M68kCPU *cpu);
+#define TYPE_MCF5206_MBAR "mcf5206-mbar"
#endif
diff --git a/include/hw/m68k/mcf_fec.h b/include/hw/m68k/mcf_fec.h
index 7f029f7b59..80d4f651ba 100644
--- a/include/hw/m68k/mcf_fec.h
+++ b/include/hw/m68k/mcf_fec.h
@@ -7,7 +7,13 @@
* (at your option) any later version.
*/
+#ifndef HW_M68K_MCF_FEC_H
+#define HW_M68K_MCF_FEC_H
+#include "qom/object.h"
+
#define TYPE_MCF_FEC_NET "mcf-fec"
-#define MCF_FEC_NET(obj) OBJECT_CHECK(mcf_fec_state, (obj), TYPE_MCF_FEC_NET)
+OBJECT_DECLARE_SIMPLE_TYPE(mcf_fec_state, MCF_FEC_NET)
#define FEC_NUM_IRQ 13
+
+#endif
diff --git a/include/hw/m68k/next-cube.h b/include/hw/m68k/next-cube.h
new file mode 100644
index 0000000000..43577282d1
--- /dev/null
+++ b/include/hw/m68k/next-cube.h
@@ -0,0 +1,56 @@
+/*
+ * NeXT Cube
+ *
+ * Copyright (c) 2011 Bryce Lanham
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#ifndef NEXT_CUBE_H
+#define NEXT_CUBE_H
+
+#define TYPE_NEXTFB "next-fb"
+
+#define TYPE_NEXTKBD "next-kbd"
+
+enum next_dma_chan {
+ NEXTDMA_FD,
+ NEXTDMA_ENRX,
+ NEXTDMA_ENTX,
+ NEXTDMA_SCSI,
+ NEXTDMA_SCC,
+ NEXTDMA_SND
+};
+
+#define DMA_ENABLE 0x01000000
+#define DMA_SUPDATE 0x02000000
+#define DMA_COMPLETE 0x08000000
+
+#define DMA_M2DEV 0x0
+#define DMA_SETENABLE 0x00010000
+#define DMA_SETSUPDATE 0x00020000
+#define DMA_DEV2M 0x00040000
+#define DMA_CLRCOMPLETE 0x00080000
+#define DMA_RESET 0x00100000
+
+enum next_irqs {
+ NEXT_FD_I,
+ NEXT_KBD_I,
+ NEXT_PWR_I,
+ NEXT_ENRX_I,
+ NEXT_ENTX_I,
+ NEXT_SCSI_I,
+ NEXT_CLK_I,
+ NEXT_SCC_I,
+ NEXT_ENTX_DMA_I,
+ NEXT_ENRX_DMA_I,
+ NEXT_SCSI_DMA_I,
+ NEXT_SCC_DMA_I,
+ NEXT_SND_I,
+ NEXT_NUM_IRQS
+};
+
+#endif /* NEXT_CUBE_H */
diff --git a/include/hw/m68k/q800-glue.h b/include/hw/m68k/q800-glue.h
new file mode 100644
index 0000000000..04fac25f6c
--- /dev/null
+++ b/include/hw/m68k/q800-glue.h
@@ -0,0 +1,51 @@
+/*
+ * QEMU q800 logic GLUE (General Logic Unit)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_Q800_GLUE_H
+#define HW_Q800_GLUE_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_GLUE "q800-glue"
+OBJECT_DECLARE_SIMPLE_TYPE(GLUEState, GLUE)
+
+struct GLUEState {
+ SysBusDevice parent_obj;
+
+ M68kCPU *cpu;
+ uint8_t ipr;
+ uint8_t auxmode;
+ qemu_irq irqs[2];
+ QEMUTimer *nmi_release;
+};
+
+#define GLUE_IRQ_IN_VIA1 0
+#define GLUE_IRQ_IN_VIA2 1
+#define GLUE_IRQ_IN_SONIC 2
+#define GLUE_IRQ_IN_ESCC 3
+#define GLUE_IRQ_IN_NMI 4
+#define GLUE_IRQ_IN_ASC 5
+
+#define GLUE_IRQ_NUBUS_9 0
+#define GLUE_IRQ_ASC 1
+
+#endif
diff --git a/include/hw/m68k/q800.h b/include/hw/m68k/q800.h
new file mode 100644
index 0000000000..34365c9860
--- /dev/null
+++ b/include/hw/m68k/q800.h
@@ -0,0 +1,78 @@
+/*
+ * QEMU Motorla 680x0 Macintosh hardware System Emulator
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_Q800_H
+#define HW_Q800_H
+
+#include "hw/boards.h"
+#include "qom/object.h"
+#include "target/m68k/cpu-qom.h"
+#include "exec/memory.h"
+#include "hw/m68k/q800-glue.h"
+#include "hw/misc/mac_via.h"
+#include "hw/net/dp8393x.h"
+#include "hw/char/escc.h"
+#include "hw/or-irq.h"
+#include "hw/scsi/esp.h"
+#include "hw/block/swim.h"
+#include "hw/nubus/mac-nubus-bridge.h"
+#include "hw/display/macfb.h"
+#include "hw/misc/djmemc.h"
+#include "hw/misc/iosb.h"
+#include "hw/audio/asc.h"
+
+/*
+ * The main Q800 machine
+ */
+
+struct Q800MachineState {
+ MachineState parent_obj;
+
+ bool easc;
+ M68kCPU cpu;
+ MemoryRegion rom;
+ MemoryRegion rom_alias;
+ GLUEState glue;
+ MOS6522Q800VIA1State via1;
+ MOS6522Q800VIA2State via2;
+ dp8393xState dp8393x;
+ MemoryRegion dp8393x_prom;
+ ESCCState escc;
+ OrIRQState escc_orgate;
+ SysBusESPState esp;
+ Swim swim;
+ MacNubusBridge mac_nubus_bridge;
+ MacfbNubusState macfb;
+ DJMEMCState djmemc;
+ IOSBState iosb;
+ ASCState asc;
+ MemoryRegion ramio;
+ MemoryRegion macio;
+ MemoryRegion macio_alias;
+ MemoryRegion machine_id;
+ MemoryRegion escc_alias;
+};
+
+#define TYPE_Q800_MACHINE MACHINE_TYPE_NAME("q800")
+OBJECT_DECLARE_SIMPLE_TYPE(Q800MachineState, Q800_MACHINE)
+
+#endif
diff --git a/include/hw/mem/memory-device.h b/include/hw/mem/memory-device.h
index 0293a96abb..e0571c8a31 100644
--- a/include/hw/mem/memory-device.h
+++ b/include/hw/mem/memory-device.h
@@ -13,15 +13,15 @@
#ifndef MEMORY_DEVICE_H
#define MEMORY_DEVICE_H
+#include "hw/qdev-core.h"
+#include "qapi/qapi-types-machine.h"
#include "qom/object.h"
-#include "hw/qdev.h"
#define TYPE_MEMORY_DEVICE "memory-device"
-#define MEMORY_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(MemoryDeviceClass, (klass), TYPE_MEMORY_DEVICE)
-#define MEMORY_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(MemoryDeviceClass, (obj), TYPE_MEMORY_DEVICE)
+typedef struct MemoryDeviceClass MemoryDeviceClass;
+DECLARE_CLASS_CHECKERS(MemoryDeviceClass, MEMORY_DEVICE,
+ TYPE_MEMORY_DEVICE)
#define MEMORY_DEVICE(obj) \
INTERFACE_CHECK(MemoryDeviceState, (obj), TYPE_MEMORY_DEVICE)
@@ -37,12 +37,27 @@ typedef struct MemoryDeviceState MemoryDeviceState;
* address in guest physical memory can either be specified explicitly
* or get assigned automatically.
*
+ * Some memory device might not own a memory region in certain device
+ * configurations. Such devices can logically get (un)plugged, however,
+ * empty memory devices are mostly ignored by the memory device code.
+ *
* Conceptually, memory devices only span one memory region. If multiple
* successive memory regions are used, a covering memory region has to
* be provided. Scattered memory regions are not supported for single
* devices.
+ *
+ * The device memory region returned via @get_memory_region may either be a
+ * single RAM memory region or a memory region container with subregions
+ * that are RAM memory regions or aliases to RAM memory regions. Other
+ * memory regions or subregions are not supported.
+ *
+ * If the device memory region returned via @get_memory_region is a
+ * memory region container, it's supported to dynamically (un)map subregions
+ * as long as the number of memslots returned by @get_memslots() won't
+ * be exceeded and as long as all memory regions are of the same kind (e.g.,
+ * all RAM or all ROM).
*/
-typedef struct MemoryDeviceClass {
+struct MemoryDeviceClass {
/* private */
InterfaceClass parent_class;
@@ -79,7 +94,8 @@ typedef struct MemoryDeviceClass {
uint64_t (*get_plugged_size)(const MemoryDeviceState *md, Error **errp);
/*
- * Return the memory region of the memory device.
+ * Return the memory region of the memory device. If the device is
+ * completely empty, returns NULL without an error.
*
* Called when (un)plugging the memory device, to (un)map the
* memory region in guest physical memory, but also to detect the
@@ -89,14 +105,69 @@ typedef struct MemoryDeviceClass {
MemoryRegion *(*get_memory_region)(MemoryDeviceState *md, Error **errp);
/*
+ * Optional: Instruct the memory device to decide how many memory slots
+ * it requires, not exceeding the given limit.
+ *
+ * Called exactly once when pre-plugging the memory device, before
+ * querying the number of memslots using @get_memslots the first time.
+ */
+ void (*decide_memslots)(MemoryDeviceState *md, unsigned int limit);
+
+ /*
+ * Optional for memory devices that require only a single memslot,
+ * required for all other memory devices: Return the number of memslots
+ * (distinct RAM memory regions in the device memory region) that are
+ * required by the device.
+ *
+ * If this function is not implemented, the assumption is "1".
+ *
+ * Called when (un)plugging the memory device, to check if the requirements
+ * can be satisfied, and to do proper accounting.
+ */
+ unsigned int (*get_memslots)(MemoryDeviceState *md);
+
+ /*
+ * Optional: Return the desired minimum alignment of the device in guest
+ * physical address space. The final alignment is computed based on this
+ * alignment and the alignment requirements of the memory region.
+ *
+ * Called when plugging the memory device to detect the required alignment
+ * during address assignment.
+ */
+ uint64_t (*get_min_alignment)(const MemoryDeviceState *md);
+
+ /*
* Translate the memory device into #MemoryDeviceInfo.
*/
void (*fill_device_info)(const MemoryDeviceState *md,
MemoryDeviceInfo *info);
-} MemoryDeviceClass;
+};
+
+/*
+ * Traditionally, KVM/vhost in many setups supported 509 memslots, whereby
+ * 253 memslots were "reserved" for boot memory and other devices (such
+ * as PCI BARs, which can get mapped dynamically) and 256 memslots were
+ * dedicated for DIMMs. These magic numbers worked reliably in the past.
+ *
+ * Further, using many memslots can negatively affect performance, so setting
+ * the soft-limit of memslots used by memory devices to the traditional
+ * DIMM limit of 256 sounds reasonable.
+ *
+ * If we have less than 509 memslots, we will instruct memory devices that
+ * support automatically deciding how many memslots to use to only use a single
+ * one.
+ *
+ * Hotplugging vhost devices with at least 509 memslots is not expected to
+ * cause problems, not even when devices automatically decided how many memslots
+ * to use.
+ */
+#define MEMORY_DEVICES_SOFT_MEMSLOT_LIMIT 256
+#define MEMORY_DEVICES_SAFE_MAX_MEMSLOTS 509
MemoryDeviceInfoList *qmp_memory_device_list(void);
uint64_t get_plugged_memory_size(void);
+unsigned int memory_devices_get_reserved_memslots(void);
+bool memory_devices_memslot_auto_decision_active(void);
void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
const uint64_t *legacy_align, Error **errp);
void memory_device_plug(MemoryDeviceState *md, MachineState *ms);
diff --git a/include/hw/mem/npcm7xx_mc.h b/include/hw/mem/npcm7xx_mc.h
new file mode 100644
index 0000000000..7ed38be243
--- /dev/null
+++ b/include/hw/mem/npcm7xx_mc.h
@@ -0,0 +1,36 @@
+/*
+ * Nuvoton NPCM7xx Memory Controller stub
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_MC_H
+#define NPCM7XX_MC_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/**
+ * struct NPCM7xxMCState - Device state for the memory controller.
+ * @parent: System bus device.
+ * @mmio: Memory region through which registers are accessed.
+ */
+typedef struct NPCM7xxMCState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+} NPCM7xxMCState;
+
+#define TYPE_NPCM7XX_MC "npcm7xx-mc"
+#define NPCM7XX_MC(obj) OBJECT_CHECK(NPCM7xxMCState, (obj), TYPE_NPCM7XX_MC)
+
+#endif /* NPCM7XX_MC_H */
diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h
index c5c9b3c7f8..d3b763453a 100644
--- a/include/hw/mem/nvdimm.h
+++ b/include/hw/mem/nvdimm.h
@@ -25,14 +25,9 @@
#include "hw/mem/pc-dimm.h"
#include "hw/acpi/bios-linker-loader.h"
-
-#define NVDIMM_DEBUG 0
-#define nvdimm_debug(fmt, ...) \
- do { \
- if (NVDIMM_DEBUG) { \
- fprintf(stderr, "nvdimm: " fmt, ## __VA_ARGS__); \
- } \
- } while (0)
+#include "qemu/uuid.h"
+#include "hw/acpi/aml-build.h"
+#include "qom/object.h"
/*
* The minimum label data size is required by NVDIMM Namespace
@@ -43,12 +38,10 @@
#define MIN_NAMESPACE_LABEL_SIZE (128UL << 10)
#define TYPE_NVDIMM "nvdimm"
-#define NVDIMM(obj) OBJECT_CHECK(NVDIMMDevice, (obj), TYPE_NVDIMM)
-#define NVDIMM_CLASS(oc) OBJECT_CLASS_CHECK(NVDIMMClass, (oc), TYPE_NVDIMM)
-#define NVDIMM_GET_CLASS(obj) OBJECT_GET_CLASS(NVDIMMClass, (obj), \
- TYPE_NVDIMM)
+OBJECT_DECLARE_TYPE(NVDIMMDevice, NVDIMMClass, NVDIMM)
#define NVDIMM_LABEL_SIZE_PROP "label-size"
+#define NVDIMM_UUID_PROP "uuid"
#define NVDIMM_UNARMED_PROP "unarmed"
struct NVDIMMDevice {
@@ -83,8 +76,18 @@ struct NVDIMMDevice {
* the guest write persistence.
*/
bool unarmed;
+
+ /*
+ * Whether our DIMM is backed by ROM, and even label data cannot be
+ * written. If set, implies that "unarmed" is also set.
+ */
+ bool readonly;
+
+ /*
+ * The PPC64 - spapr requires each nvdimm device have a uuid.
+ */
+ QemuUUID uuid;
};
-typedef struct NVDIMMDevice NVDIMMDevice;
struct NVDIMMClass {
/* private */
@@ -98,8 +101,9 @@ struct NVDIMMClass {
/* write @size bytes from @buf to NVDIMM label data at @offset. */
void (*write_label_data)(NVDIMMDevice *nvdimm, const void *buf,
uint64_t size, uint64_t offset);
+ void (*realize)(NVDIMMDevice *nvdimm, Error **errp);
+ void (*unrealize)(NVDIMMDevice *nvdimm);
};
-typedef struct NVDIMMClass NVDIMMClass;
#define NVDIMM_DSM_MEM_FILE "etc/acpi/nvdimm-mem"
@@ -123,7 +127,7 @@ struct NvdimmFitBuffer {
};
typedef struct NvdimmFitBuffer NvdimmFitBuffer;
-struct AcpiNVDIMMState {
+struct NVDIMMState {
/* detect if NVDIMM support is enabled. */
bool is_enabled;
@@ -140,14 +144,18 @@ struct AcpiNVDIMMState {
*/
int32_t persistence;
char *persistence_string;
+ struct AcpiGenericAddress dsm_io;
};
-typedef struct AcpiNVDIMMState AcpiNVDIMMState;
+typedef struct NVDIMMState NVDIMMState;
-void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
+void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io,
+ struct AcpiGenericAddress dsm_io,
FWCfgState *fw_cfg, Object *owner);
+void nvdimm_build_srat(GArray *table_data);
void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
- BIOSLinker *linker, AcpiNVDIMMState *state,
- uint32_t ram_slots);
-void nvdimm_plug(AcpiNVDIMMState *state);
+ BIOSLinker *linker, NVDIMMState *state,
+ uint32_t ram_slots, const char *oem_id,
+ const char *oem_table_id);
+void nvdimm_plug(NVDIMMState *state);
void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev);
#endif
diff --git a/include/hw/mem/pc-dimm.h b/include/hw/mem/pc-dimm.h
index 01436b9f50..322bebe555 100644
--- a/include/hw/mem/pc-dimm.h
+++ b/include/hw/mem/pc-dimm.h
@@ -17,17 +17,12 @@
#define QEMU_PC_DIMM_H
#include "exec/memory.h"
-#include "sysemu/hostmem.h"
-#include "hw/qdev.h"
-#include "hw/boards.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
#define TYPE_PC_DIMM "pc-dimm"
-#define PC_DIMM(obj) \
- OBJECT_CHECK(PCDIMMDevice, (obj), TYPE_PC_DIMM)
-#define PC_DIMM_CLASS(oc) \
- OBJECT_CLASS_CHECK(PCDIMMDeviceClass, (oc), TYPE_PC_DIMM)
-#define PC_DIMM_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCDIMMDeviceClass, (obj), TYPE_PC_DIMM)
+OBJECT_DECLARE_TYPE(PCDIMMDevice, PCDIMMDeviceClass,
+ PC_DIMM)
#define PC_DIMM_ADDR_PROP "addr"
#define PC_DIMM_SLOT_PROP "slot"
@@ -46,7 +41,7 @@
* Default value: -1, means that slot is auto-allocated.
* @hostmem: host memory backend providing memory for @PCDIMMDevice
*/
-typedef struct PCDIMMDevice {
+struct PCDIMMDevice {
/* private */
DeviceState parent_obj;
@@ -55,28 +50,24 @@ typedef struct PCDIMMDevice {
uint32_t node;
int32_t slot;
HostMemoryBackend *hostmem;
-} PCDIMMDevice;
+};
/**
* PCDIMMDeviceClass:
* @realize: called after common dimm is realized so that the dimm based
* devices get the chance to do specified operations.
- * @get_vmstate_memory_region: returns #MemoryRegion which indicates the
- * memory of @dimm should be kept during live migration. Will not fail
- * after the device was realized.
*/
-typedef struct PCDIMMDeviceClass {
+struct PCDIMMDeviceClass {
/* private */
DeviceClass parent_class;
/* public */
void (*realize)(PCDIMMDevice *dimm, Error **errp);
- MemoryRegion *(*get_vmstate_memory_region)(PCDIMMDevice *dimm,
- Error **errp);
-} PCDIMMDeviceClass;
+ void (*unrealize)(PCDIMMDevice *dimm);
+};
void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine,
const uint64_t *legacy_align, Error **errp);
-void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine, Error **errp);
+void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine);
void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine);
#endif
diff --git a/include/hw/mem/sparse-mem.h b/include/hw/mem/sparse-mem.h
new file mode 100644
index 0000000000..f9863b154b
--- /dev/null
+++ b/include/hw/mem/sparse-mem.h
@@ -0,0 +1,19 @@
+/*
+ * A sparse memory device. Useful for fuzzing
+ *
+ * Copyright Red Hat Inc., 2021
+ *
+ * Authors:
+ * Alexander Bulekov <alxndr@bu.edu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SPARSE_MEM_H
+#define SPARSE_MEM_H
+#define TYPE_SPARSE_MEM "sparse-mem"
+
+MemoryRegion *sparse_mem_init(uint64_t addr, uint64_t length);
+
+#endif
diff --git a/include/hw/mips/bios.h b/include/hw/mips/bios.h
deleted file mode 100644
index d67ef33e83..0000000000
--- a/include/hw/mips/bios.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#include "qemu/units.h"
-#include "cpu.h"
-
-#define BIOS_SIZE (4 * MiB)
-#ifdef TARGET_WORDS_BIGENDIAN
-#define BIOS_FILENAME "mips_bios.bin"
-#else
-#define BIOS_FILENAME "mipsel_bios.bin"
-#endif
diff --git a/include/hw/mips/bootloader.h b/include/hw/mips/bootloader.h
new file mode 100644
index 0000000000..c32f6c2835
--- /dev/null
+++ b/include/hw/mips/bootloader.h
@@ -0,0 +1,26 @@
+/*
+ * Utility for QEMU MIPS to generate it's simple bootloader
+ *
+ * Copyright (C) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MIPS_BOOTLOADER_H
+#define HW_MIPS_BOOTLOADER_H
+
+#include "exec/cpu-defs.h"
+
+void bl_gen_jump_to(void **ptr, target_ulong jump_addr);
+void bl_gen_jump_kernel(void **ptr,
+ bool set_sp, target_ulong sp,
+ bool set_a0, target_ulong a0,
+ bool set_a1, target_ulong a1,
+ bool set_a2, target_ulong a2,
+ bool set_a3, target_ulong a3,
+ target_ulong kernel_addr);
+void bl_gen_write_ulong(void **ptr, target_ulong addr, target_ulong val);
+void bl_gen_write_u32(void **ptr, target_ulong addr, uint32_t val);
+void bl_gen_write_u64(void **ptr, target_ulong addr, uint64_t val);
+
+#endif
diff --git a/include/hw/mips/cps.h b/include/hw/mips/cps.h
index aab1af926d..04d636246a 100644
--- a/include/hw/mips/cps.h
+++ b/include/hw/mips/cps.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,15 +21,18 @@
#define MIPS_CPS_H
#include "hw/sysbus.h"
+#include "hw/clock.h"
#include "hw/misc/mips_cmgcr.h"
#include "hw/intc/mips_gic.h"
#include "hw/misc/mips_cpc.h"
#include "hw/misc/mips_itu.h"
+#include "target/mips/cpu.h"
+#include "qom/object.h"
#define TYPE_MIPS_CPS "mips-cps"
-#define MIPS_CPS(obj) OBJECT_CHECK(MIPSCPSState, (obj), TYPE_MIPS_CPS)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSCPSState, MIPS_CPS)
-typedef struct MIPSCPSState {
+struct MIPSCPSState {
SysBusDevice parent_obj;
uint32_t num_vp;
@@ -41,7 +44,8 @@ typedef struct MIPSCPSState {
MIPSGICState gic;
MIPSCPCState cpc;
MIPSITUState itu;
-} MIPSCPSState;
+ Clock *clock;
+};
qemu_irq get_cps_irq(MIPSCPSState *cps, int pin_number);
diff --git a/include/hw/mips/cpudevs.h b/include/hw/mips/cpudevs.h
deleted file mode 100644
index 291f59281a..0000000000
--- a/include/hw/mips/cpudevs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef HW_MIPS_CPUDEVS_H
-#define HW_MIPS_CPUDEVS_H
-
-#include "target/mips/cpu-qom.h"
-
-/* Definitions for MIPS CPU internal devices. */
-
-/* addr.c */
-uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr);
-uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr);
-uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr);
-bool mips_um_ksegs_enabled(void);
-void mips_um_ksegs_enable(void);
-
-/* mips_int.c */
-void cpu_mips_irq_init_cpu(MIPSCPU *cpu);
-
-/* mips_timer.c */
-void cpu_mips_clock_init(MIPSCPU *cpu);
-
-#endif
diff --git a/include/hw/mips/mips.h b/include/hw/mips/mips.h
index 2f6774d540..101799f7d3 100644
--- a/include/hw/mips/mips.h
+++ b/include/hw/mips/mips.h
@@ -2,14 +2,12 @@
#define HW_MIPS_H
/* Definitions for mips board emulation. */
+#include "qemu/units.h"
+
/* Kernels can be configured with 64KB pages */
-#define INITRD_PAGE_MASK (~((1 << 16) - 1))
+#define INITRD_PAGE_SIZE (64 * KiB)
#include "exec/memory.h"
-#include "hw/irq.h"
-
-/* gt64xxx.c */
-PCIBus *gt64120_register(qemu_irq *pic);
/* bonito.c */
PCIBus *bonito_init(qemu_irq *pic);
diff --git a/include/hw/misc/a9scu.h b/include/hw/misc/a9scu.h
index efb0c305c2..c3759fb8c8 100644
--- a/include/hw/misc/a9scu.h
+++ b/include/hw/misc/a9scu.h
@@ -11,10 +11,11 @@
#define HW_MISC_A9SCU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
/* A9MP private memory region. */
-typedef struct A9SCUState {
+struct A9SCUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -23,9 +24,9 @@ typedef struct A9SCUState {
uint32_t control;
uint32_t status;
uint32_t num_cpu;
-} A9SCUState;
+};
#define TYPE_A9_SCU "a9-scu"
-#define A9_SCU(obj) OBJECT_CHECK(A9SCUState, (obj), TYPE_A9_SCU)
+OBJECT_DECLARE_SIMPLE_TYPE(A9SCUState, A9_SCU)
#endif
diff --git a/include/hw/misc/allwinner-a10-ccm.h b/include/hw/misc/allwinner-a10-ccm.h
new file mode 100644
index 0000000000..7f22532efa
--- /dev/null
+++ b/include/hw/misc/allwinner-a10-ccm.h
@@ -0,0 +1,67 @@
+/*
+ * Allwinner A10 Clock Control Module emulation
+ *
+ * Copyright (C) 2022 Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
+ *
+ * This file is derived from Allwinner H3 CCU,
+ * by Niek Linnenbank.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_A10_CCM_H
+#define HW_MISC_ALLWINNER_A10_CCM_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+/**
+ * @name Constants
+ * @{
+ */
+
+/** Size of register I/O address space used by CCM device */
+#define AW_A10_CCM_IOSIZE (0x400)
+
+/** Total number of known registers */
+#define AW_A10_CCM_REGS_NUM (AW_A10_CCM_IOSIZE / sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * @name Object model
+ * @{
+ */
+
+#define TYPE_AW_A10_CCM "allwinner-a10-ccm"
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10ClockCtlState, AW_A10_CCM)
+
+/** @} */
+
+/**
+ * Allwinner A10 CCM object instance state.
+ */
+struct AwA10ClockCtlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_A10_CCM_REGS_NUM];
+};
+
+#endif /* HW_MISC_ALLWINNER_H3_CCU_H */
diff --git a/include/hw/misc/allwinner-a10-dramc.h b/include/hw/misc/allwinner-a10-dramc.h
new file mode 100644
index 0000000000..b61fbecbe7
--- /dev/null
+++ b/include/hw/misc/allwinner-a10-dramc.h
@@ -0,0 +1,68 @@
+/*
+ * Allwinner A10 DRAM Controller emulation
+ *
+ * Copyright (C) 2022 Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
+ *
+ * This file is derived from Allwinner H3 DRAMC,
+ * by Niek Linnenbank.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_A10_DRAMC_H
+#define HW_MISC_ALLWINNER_A10_DRAMC_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+/**
+ * @name Constants
+ * @{
+ */
+
+/** Size of register I/O address space used by DRAMC device */
+#define AW_A10_DRAMC_IOSIZE (0x1000)
+
+/** Total number of known registers */
+#define AW_A10_DRAMC_REGS_NUM (AW_A10_DRAMC_IOSIZE / sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * @name Object model
+ * @{
+ */
+
+#define TYPE_AW_A10_DRAMC "allwinner-a10-dramc"
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10DramControllerState, AW_A10_DRAMC)
+
+/** @} */
+
+/**
+ * Allwinner A10 DRAMC object instance state.
+ */
+struct AwA10DramControllerState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_A10_DRAMC_REGS_NUM];
+};
+
+#endif /* HW_MISC_ALLWINNER_A10_DRAMC_H */
diff --git a/include/hw/misc/allwinner-cpucfg.h b/include/hw/misc/allwinner-cpucfg.h
new file mode 100644
index 0000000000..a717b47299
--- /dev/null
+++ b/include/hw/misc/allwinner-cpucfg.h
@@ -0,0 +1,51 @@
+/*
+ * Allwinner CPU Configuration Module emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_CPUCFG_H
+#define HW_MISC_ALLWINNER_CPUCFG_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+/**
+ * Object model
+ * @{
+ */
+
+#define TYPE_AW_CPUCFG "allwinner-cpucfg"
+OBJECT_DECLARE_SIMPLE_TYPE(AwCpuCfgState, AW_CPUCFG)
+
+/** @} */
+
+/**
+ * Allwinner CPU Configuration Module instance state
+ */
+struct AwCpuCfgState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion iomem;
+ uint32_t gen_ctrl;
+ uint32_t super_standby;
+ uint32_t entry_addr;
+
+};
+
+#endif /* HW_MISC_ALLWINNER_CPUCFG_H */
diff --git a/include/hw/misc/allwinner-h3-ccu.h b/include/hw/misc/allwinner-h3-ccu.h
new file mode 100644
index 0000000000..a04875bfca
--- /dev/null
+++ b/include/hw/misc/allwinner-h3-ccu.h
@@ -0,0 +1,65 @@
+/*
+ * Allwinner H3 Clock Control Unit emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_H3_CCU_H
+#define HW_MISC_ALLWINNER_H3_CCU_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+/**
+ * @name Constants
+ * @{
+ */
+
+/** Size of register I/O address space used by CCU device */
+#define AW_H3_CCU_IOSIZE (0x400)
+
+/** Total number of known registers */
+#define AW_H3_CCU_REGS_NUM (AW_H3_CCU_IOSIZE / sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * @name Object model
+ * @{
+ */
+
+#define TYPE_AW_H3_CCU "allwinner-h3-ccu"
+OBJECT_DECLARE_SIMPLE_TYPE(AwH3ClockCtlState, AW_H3_CCU)
+
+/** @} */
+
+/**
+ * Allwinner H3 CCU object instance state.
+ */
+struct AwH3ClockCtlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_H3_CCU_REGS_NUM];
+
+};
+
+#endif /* HW_MISC_ALLWINNER_H3_CCU_H */
diff --git a/include/hw/misc/allwinner-h3-dramc.h b/include/hw/misc/allwinner-h3-dramc.h
new file mode 100644
index 0000000000..0b6c877ef7
--- /dev/null
+++ b/include/hw/misc/allwinner-h3-dramc.h
@@ -0,0 +1,105 @@
+/*
+ * Allwinner H3 SDRAM Controller emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_H3_DRAMC_H
+#define HW_MISC_ALLWINNER_H3_DRAMC_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "exec/hwaddr.h"
+
+/**
+ * Constants
+ * @{
+ */
+
+/** Highest register address used by DRAMCOM module */
+#define AW_H3_DRAMCOM_REGS_MAXADDR (0x804)
+
+/** Total number of known DRAMCOM registers */
+#define AW_H3_DRAMCOM_REGS_NUM (AW_H3_DRAMCOM_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** Highest register address used by DRAMCTL module */
+#define AW_H3_DRAMCTL_REGS_MAXADDR (0x88c)
+
+/** Total number of known DRAMCTL registers */
+#define AW_H3_DRAMCTL_REGS_NUM (AW_H3_DRAMCTL_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** Highest register address used by DRAMPHY module */
+#define AW_H3_DRAMPHY_REGS_MAXADDR (0x4)
+
+/** Total number of known DRAMPHY registers */
+#define AW_H3_DRAMPHY_REGS_NUM (AW_H3_DRAMPHY_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * Object model
+ * @{
+ */
+
+#define TYPE_AW_H3_DRAMC "allwinner-h3-dramc"
+OBJECT_DECLARE_SIMPLE_TYPE(AwH3DramCtlState, AW_H3_DRAMC)
+
+/** @} */
+
+/**
+ * Allwinner H3 SDRAM Controller object instance state.
+ */
+struct AwH3DramCtlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Physical base address for start of RAM */
+ hwaddr ram_addr;
+
+ /** Total RAM size in megabytes */
+ uint32_t ram_size;
+
+ /**
+ * @name Memory Regions
+ * @{
+ */
+
+ MemoryRegion row_mirror; /**< Simulates rows for RAM size detection */
+ MemoryRegion row_mirror_alias; /**< Alias of the row which is mirrored */
+ MemoryRegion dramcom_iomem; /**< DRAMCOM module I/O registers */
+ MemoryRegion dramctl_iomem; /**< DRAMCTL module I/O registers */
+ MemoryRegion dramphy_iomem; /**< DRAMPHY module I/O registers */
+
+ /** @} */
+
+ /**
+ * @name Hardware Registers
+ * @{
+ */
+
+ uint32_t dramcom[AW_H3_DRAMCOM_REGS_NUM]; /**< Array of DRAMCOM registers */
+ uint32_t dramctl[AW_H3_DRAMCTL_REGS_NUM]; /**< Array of DRAMCTL registers */
+ uint32_t dramphy[AW_H3_DRAMPHY_REGS_NUM] ;/**< Array of DRAMPHY registers */
+
+ /** @} */
+
+};
+
+#endif /* HW_MISC_ALLWINNER_H3_DRAMC_H */
diff --git a/include/hw/misc/allwinner-h3-sysctrl.h b/include/hw/misc/allwinner-h3-sysctrl.h
new file mode 100644
index 0000000000..ec1c220535
--- /dev/null
+++ b/include/hw/misc/allwinner-h3-sysctrl.h
@@ -0,0 +1,66 @@
+/*
+ * Allwinner H3 System Control emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_H3_SYSCTRL_H
+#define HW_MISC_ALLWINNER_H3_SYSCTRL_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+/**
+ * @name Constants
+ * @{
+ */
+
+/** Highest register address used by System Control device */
+#define AW_H3_SYSCTRL_REGS_MAXADDR (0x30)
+
+/** Total number of known registers */
+#define AW_H3_SYSCTRL_REGS_NUM ((AW_H3_SYSCTRL_REGS_MAXADDR / \
+ sizeof(uint32_t)) + 1)
+
+/** @} */
+
+/**
+ * @name Object model
+ * @{
+ */
+
+#define TYPE_AW_H3_SYSCTRL "allwinner-h3-sysctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(AwH3SysCtrlState, AW_H3_SYSCTRL)
+
+/** @} */
+
+/**
+ * Allwinner H3 System Control object instance state
+ */
+struct AwH3SysCtrlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_H3_SYSCTRL_REGS_NUM];
+
+};
+
+#endif /* HW_MISC_ALLWINNER_H3_SYSCTRL_H */
diff --git a/include/hw/misc/allwinner-r40-ccu.h b/include/hw/misc/allwinner-r40-ccu.h
new file mode 100644
index 0000000000..ceb74eff92
--- /dev/null
+++ b/include/hw/misc/allwinner-r40-ccu.h
@@ -0,0 +1,65 @@
+/*
+ * Allwinner R40 Clock Control Unit emulation
+ *
+ * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_R40_CCU_H
+#define HW_MISC_ALLWINNER_R40_CCU_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+/**
+ * @name Constants
+ * @{
+ */
+
+/** Size of register I/O address space used by CCU device */
+#define AW_R40_CCU_IOSIZE (0x400)
+
+/** Total number of known registers */
+#define AW_R40_CCU_REGS_NUM (AW_R40_CCU_IOSIZE / sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * @name Object model
+ * @{
+ */
+
+#define TYPE_AW_R40_CCU "allwinner-r40-ccu"
+OBJECT_DECLARE_SIMPLE_TYPE(AwR40ClockCtlState, AW_R40_CCU)
+
+/** @} */
+
+/**
+ * Allwinner R40 CCU object instance state.
+ */
+struct AwR40ClockCtlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_R40_CCU_REGS_NUM];
+
+};
+
+#endif /* HW_MISC_ALLWINNER_R40_CCU_H */
diff --git a/include/hw/misc/allwinner-r40-dramc.h b/include/hw/misc/allwinner-r40-dramc.h
new file mode 100644
index 0000000000..6a1a3a7893
--- /dev/null
+++ b/include/hw/misc/allwinner-r40-dramc.h
@@ -0,0 +1,108 @@
+/*
+ * Allwinner R40 SDRAM Controller emulation
+ *
+ * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_R40_DRAMC_H
+#define HW_MISC_ALLWINNER_R40_DRAMC_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "exec/hwaddr.h"
+
+/**
+ * Constants
+ * @{
+ */
+
+/** Highest register address used by DRAMCOM module */
+#define AW_R40_DRAMCOM_REGS_MAXADDR (0x804)
+
+/** Total number of known DRAMCOM registers */
+#define AW_R40_DRAMCOM_REGS_NUM (AW_R40_DRAMCOM_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** Highest register address used by DRAMCTL module */
+#define AW_R40_DRAMCTL_REGS_MAXADDR (0x88c)
+
+/** Total number of known DRAMCTL registers */
+#define AW_R40_DRAMCTL_REGS_NUM (AW_R40_DRAMCTL_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** Highest register address used by DRAMPHY module */
+#define AW_R40_DRAMPHY_REGS_MAXADDR (0x4)
+
+/** Total number of known DRAMPHY registers */
+#define AW_R40_DRAMPHY_REGS_NUM (AW_R40_DRAMPHY_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * Object model
+ * @{
+ */
+
+#define TYPE_AW_R40_DRAMC "allwinner-r40-dramc"
+OBJECT_DECLARE_SIMPLE_TYPE(AwR40DramCtlState, AW_R40_DRAMC)
+
+/** @} */
+
+/**
+ * Allwinner R40 SDRAM Controller object instance state.
+ */
+struct AwR40DramCtlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Physical base address for start of RAM */
+ hwaddr ram_addr;
+
+ /** Total RAM size in megabytes */
+ uint32_t ram_size;
+
+ uint8_t set_row_bits;
+ uint8_t set_bank_bits;
+ uint8_t set_col_bits;
+
+ /**
+ * @name Memory Regions
+ * @{
+ */
+ MemoryRegion dramcom_iomem; /**< DRAMCOM module I/O registers */
+ MemoryRegion dramctl_iomem; /**< DRAMCTL module I/O registers */
+ MemoryRegion dramphy_iomem; /**< DRAMPHY module I/O registers */
+ MemoryRegion dram_high; /**< The high 1G dram for dualrank detect */
+ MemoryRegion detect_cells; /**< DRAM memory cells for auto detect */
+
+ /** @} */
+
+ /**
+ * @name Hardware Registers
+ * @{
+ */
+
+ uint32_t dramcom[AW_R40_DRAMCOM_REGS_NUM]; /**< DRAMCOM registers */
+ uint32_t dramctl[AW_R40_DRAMCTL_REGS_NUM]; /**< DRAMCTL registers */
+ uint32_t dramphy[AW_R40_DRAMPHY_REGS_NUM] ;/**< DRAMPHY registers */
+
+ /** @} */
+
+};
+
+#endif /* HW_MISC_ALLWINNER_R40_DRAMC_H */
diff --git a/include/hw/misc/allwinner-sid.h b/include/hw/misc/allwinner-sid.h
new file mode 100644
index 0000000000..3bfa887a96
--- /dev/null
+++ b/include/hw/misc/allwinner-sid.h
@@ -0,0 +1,59 @@
+/*
+ * Allwinner Security ID emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_SID_H
+#define HW_MISC_ALLWINNER_SID_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "qemu/uuid.h"
+
+/**
+ * Object model
+ * @{
+ */
+
+#define TYPE_AW_SID "allwinner-sid"
+OBJECT_DECLARE_SIMPLE_TYPE(AwSidState, AW_SID)
+
+/** @} */
+
+/**
+ * Allwinner Security ID object instance state
+ */
+struct AwSidState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Control register defines how and what to read */
+ uint32_t control;
+
+ /** RdKey register contains the data retrieved by the device */
+ uint32_t rdkey;
+
+ /** Stores the emulated device identifier */
+ QemuUUID identifier;
+
+};
+
+#endif /* HW_MISC_ALLWINNER_SID_H */
diff --git a/include/hw/misc/allwinner-sramc.h b/include/hw/misc/allwinner-sramc.h
new file mode 100644
index 0000000000..66b01b8d04
--- /dev/null
+++ b/include/hw/misc/allwinner-sramc.h
@@ -0,0 +1,69 @@
+/*
+ * Allwinner SRAM controller emulation
+ *
+ * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_SRAMC_H
+#define HW_MISC_ALLWINNER_SRAMC_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "qemu/uuid.h"
+
+/**
+ * Object model
+ * @{
+ */
+#define TYPE_AW_SRAMC "allwinner-sramc"
+#define TYPE_AW_SRAMC_SUN8I_R40 TYPE_AW_SRAMC "-sun8i-r40"
+OBJECT_DECLARE_TYPE(AwSRAMCState, AwSRAMCClass, AW_SRAMC)
+
+/** @} */
+
+/**
+ * Allwinner SRAMC object instance state
+ */
+struct AwSRAMCState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /* registers */
+ uint32_t sram_ctl1;
+ uint32_t sram_ver;
+ uint32_t sram_soft_entry_reg0;
+};
+
+/**
+ * Allwinner SRAM Controller class-level struct.
+ *
+ * This struct is filled by each sunxi device specific code
+ * such that the generic code can use this struct to support
+ * all devices.
+ */
+struct AwSRAMCClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+ /*< public >*/
+
+ uint32_t sram_version_code;
+};
+
+#endif /* HW_MISC_ALLWINNER_SRAMC_H */
diff --git a/include/hw/misc/arm11scu.h b/include/hw/misc/arm11scu.h
index 5ad0f3d339..e5c0282aec 100644
--- a/include/hw/misc/arm11scu.h
+++ b/include/hw/misc/arm11scu.h
@@ -12,11 +12,12 @@
#define HW_MISC_ARM11SCU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ARM11_SCU "arm11-scu"
-#define ARM11_SCU(obj) OBJECT_CHECK(ARM11SCUState, (obj), TYPE_ARM11_SCU)
+OBJECT_DECLARE_SIMPLE_TYPE(ARM11SCUState, ARM11_SCU)
-typedef struct ARM11SCUState {
+struct ARM11SCUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -24,6 +25,6 @@ typedef struct ARM11SCUState {
uint32_t control;
uint32_t num_cpu;
MemoryRegion iomem;
-} ARM11SCUState;
+};
#endif
diff --git a/include/hw/misc/arm_integrator_debug.h b/include/hw/misc/arm_integrator_debug.h
index 0077dacb44..798b082164 100644
--- a/include/hw/misc/arm_integrator_debug.h
+++ b/include/hw/misc/arm_integrator_debug.h
@@ -3,7 +3,7 @@
*
* Browse the data sheet:
*
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0159b/Babbfijf.html
+ * https://developer.arm.com/documentation/dui0159/b/peripherals-and-interfaces/debug-leds-and-dip-switch-interface
*
* Copyright (c) 2013 Alex Bennée <alex@bennee.com>
*
diff --git a/include/hw/misc/armsse-cpu-pwrctrl.h b/include/hw/misc/armsse-cpu-pwrctrl.h
new file mode 100644
index 0000000000..51d45ede7d
--- /dev/null
+++ b/include/hw/misc/armsse-cpu-pwrctrl.h
@@ -0,0 +1,40 @@
+/*
+ * ARM SSE CPU PWRCTRL register block
+ *
+ * Copyright (c) 2021 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "CPU<N>_PWRCTRL block" which is part of the
+ * Arm Corstone SSE-300 Example Subsystem and documented in
+ * https://developer.arm.com/documentation/101773/0000
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: the register bank
+ */
+
+#ifndef HW_MISC_ARMSSE_CPU_PWRCTRL_H
+#define HW_MISC_ARMSSE_CPU_PWRCTRL_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_ARMSSE_CPU_PWRCTRL "armsse-cpu-pwrctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(ARMSSECPUPwrCtrl, ARMSSE_CPU_PWRCTRL)
+
+struct ARMSSECPUPwrCtrl {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+
+ uint32_t cpupwrcfg;
+};
+
+#endif
diff --git a/include/hw/misc/armsse-cpuid.h b/include/hw/misc/armsse-cpuid.h
new file mode 100644
index 0000000000..9c0926322c
--- /dev/null
+++ b/include/hw/misc/armsse-cpuid.h
@@ -0,0 +1,42 @@
+/*
+ * ARM SSE-200 CPU_IDENTITY register block
+ *
+ * Copyright (c) 2019 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "CPU_IDENTITY" register block which is part of the
+ * Arm SSE-200 and documented in
+ * https://developer.arm.com/documentation/101104/latest/
+ *
+ * QEMU interface:
+ * + QOM property "CPUID": the value to use for the CPUID register
+ * + sysbus MMIO region 0: the system information register bank
+ */
+
+#ifndef HW_MISC_ARMSSE_CPUID_H
+#define HW_MISC_ARMSSE_CPUID_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_ARMSSE_CPUID "armsse-cpuid"
+OBJECT_DECLARE_SIMPLE_TYPE(ARMSSECPUID, ARMSSE_CPUID)
+
+struct ARMSSECPUID {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+
+ /* Properties */
+ uint32_t cpuid;
+};
+
+#endif
diff --git a/include/hw/misc/armsse-mhu.h b/include/hw/misc/armsse-mhu.h
new file mode 100644
index 0000000000..41925ded89
--- /dev/null
+++ b/include/hw/misc/armsse-mhu.h
@@ -0,0 +1,45 @@
+/*
+ * ARM SSE-200 Message Handling Unit (MHU)
+ *
+ * Copyright (c) 2019 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the Message Handling Unit (MHU) which is part of the
+ * Arm SSE-200 and documented in
+ * https://developer.arm.com/documentation/101104/latest/
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: the system information register bank
+ * + sysbus IRQ 0: interrupt for CPU 0
+ * + sysbus IRQ 1: interrupt for CPU 1
+ */
+
+#ifndef HW_MISC_ARMSSE_MHU_H
+#define HW_MISC_ARMSSE_MHU_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_ARMSSE_MHU "armsse-mhu"
+OBJECT_DECLARE_SIMPLE_TYPE(ARMSSEMHU, ARMSSE_MHU)
+
+struct ARMSSEMHU {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ qemu_irq cpu0irq;
+ qemu_irq cpu1irq;
+
+ uint32_t cpu0intr;
+ uint32_t cpu1intr;
+};
+
+#endif
diff --git a/include/hw/misc/armv7m_ras.h b/include/hw/misc/armv7m_ras.h
new file mode 100644
index 0000000000..ba6daccf3f
--- /dev/null
+++ b/include/hw/misc/armv7m_ras.h
@@ -0,0 +1,37 @@
+/*
+ * Arm M-profile RAS (Reliability, Availability and Serviceability) block
+ *
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the RAS register block of an M-profile CPU
+ * (the registers starting at 0xE0005000 with ERRFRn).
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: the register bank
+ *
+ * The QEMU implementation currently provides "minimal RAS" only.
+ */
+
+#ifndef HW_MISC_ARMV7M_RAS_H
+#define HW_MISC_ARMV7M_RAS_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ARMV7M_RAS "armv7m-ras"
+OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MRAS, ARMV7M_RAS)
+
+struct ARMv7MRAS {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+};
+
+#endif
diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
new file mode 100644
index 0000000000..ecb1b67de8
--- /dev/null
+++ b/include/hw/misc/aspeed_hace.h
@@ -0,0 +1,50 @@
+/*
+ * ASPEED Hash and Crypto Engine
+ *
+ * Copyright (C) 2021 IBM Corp.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ASPEED_HACE_H
+#define ASPEED_HACE_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_HACE "aspeed.hace"
+#define TYPE_ASPEED_AST2400_HACE TYPE_ASPEED_HACE "-ast2400"
+#define TYPE_ASPEED_AST2500_HACE TYPE_ASPEED_HACE "-ast2500"
+#define TYPE_ASPEED_AST2600_HACE TYPE_ASPEED_HACE "-ast2600"
+#define TYPE_ASPEED_AST1030_HACE TYPE_ASPEED_HACE "-ast1030"
+
+OBJECT_DECLARE_TYPE(AspeedHACEState, AspeedHACEClass, ASPEED_HACE)
+
+#define ASPEED_HACE_NR_REGS (0x64 >> 2)
+#define ASPEED_HACE_MAX_SG 256 /* max number of entries */
+
+struct AspeedHACEState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ struct iovec iov_cache[ASPEED_HACE_MAX_SG];
+ uint32_t regs[ASPEED_HACE_NR_REGS];
+ uint32_t total_req_len;
+ uint32_t iov_count;
+
+ MemoryRegion *dram_mr;
+ AddressSpace dram_as;
+};
+
+
+struct AspeedHACEClass {
+ SysBusDeviceClass parent_class;
+
+ uint32_t src_mask;
+ uint32_t dest_mask;
+ uint32_t key_mask;
+ uint32_t hash_mask;
+};
+
+#endif /* ASPEED_HACE_H */
diff --git a/include/hw/misc/aspeed_i3c.h b/include/hw/misc/aspeed_i3c.h
new file mode 100644
index 0000000000..39679dfa1a
--- /dev/null
+++ b/include/hw/misc/aspeed_i3c.h
@@ -0,0 +1,48 @@
+/*
+ * ASPEED I3C Controller
+ *
+ * Copyright (C) 2021 ASPEED Technology Inc.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef ASPEED_I3C_H
+#define ASPEED_I3C_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_I3C "aspeed.i3c"
+#define TYPE_ASPEED_I3C_DEVICE "aspeed.i3c.device"
+OBJECT_DECLARE_TYPE(AspeedI3CState, AspeedI3CClass, ASPEED_I3C)
+
+#define ASPEED_I3C_NR_REGS (0x70 >> 2)
+#define ASPEED_I3C_DEVICE_NR_REGS (0x300 >> 2)
+#define ASPEED_I3C_NR_DEVICES 6
+
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedI3CDevice, ASPEED_I3C_DEVICE)
+typedef struct AspeedI3CDevice {
+ /* <private> */
+ SysBusDevice parent;
+
+ /* <public> */
+ MemoryRegion mr;
+ qemu_irq irq;
+
+ uint8_t id;
+ uint32_t regs[ASPEED_I3C_DEVICE_NR_REGS];
+} AspeedI3CDevice;
+
+typedef struct AspeedI3CState {
+ /* <private> */
+ SysBusDevice parent;
+
+ /* <public> */
+ MemoryRegion iomem;
+ MemoryRegion iomem_container;
+ qemu_irq irq;
+
+ uint32_t regs[ASPEED_I3C_NR_REGS];
+ AspeedI3CDevice devices[ASPEED_I3C_NR_DEVICES];
+} AspeedI3CState;
+#endif /* ASPEED_I3C_H */
diff --git a/include/hw/misc/aspeed_lpc.h b/include/hw/misc/aspeed_lpc.h
new file mode 100644
index 0000000000..fa398959af
--- /dev/null
+++ b/include/hw/misc/aspeed_lpc.h
@@ -0,0 +1,45 @@
+/*
+ * ASPEED LPC Controller
+ *
+ * Copyright (C) 2017-2018 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef ASPEED_LPC_H
+#define ASPEED_LPC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_LPC "aspeed.lpc"
+#define ASPEED_LPC(obj) OBJECT_CHECK(AspeedLPCState, (obj), TYPE_ASPEED_LPC)
+
+#define ASPEED_LPC_NR_REGS (0x260 >> 2)
+
+enum aspeed_lpc_subdevice {
+ aspeed_lpc_kcs_1 = 0,
+ aspeed_lpc_kcs_2,
+ aspeed_lpc_kcs_3,
+ aspeed_lpc_kcs_4,
+ aspeed_lpc_ibt,
+};
+
+#define ASPEED_LPC_NR_SUBDEVS 5
+
+typedef struct AspeedLPCState {
+ /* <private> */
+ SysBusDevice parent;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ qemu_irq subdevice_irqs[ASPEED_LPC_NR_SUBDEVS];
+ uint32_t subdevice_irqs_pending;
+
+ uint32_t regs[ASPEED_LPC_NR_REGS];
+ uint32_t hicr7;
+} AspeedLPCState;
+
+#endif /* ASPEED_LPC_H */
diff --git a/include/hw/misc/aspeed_peci.h b/include/hw/misc/aspeed_peci.h
new file mode 100644
index 0000000000..8382707d9f
--- /dev/null
+++ b/include/hw/misc/aspeed_peci.h
@@ -0,0 +1,29 @@
+/*
+ * Aspeed PECI Controller
+ *
+ * Copyright (c) Meta Platforms, Inc. and affiliates. (http://www.meta.com)
+ *
+ * This code is licensed under the GPL version 2 or later. See the COPYING
+ * file in the top-level directory.
+ */
+
+#ifndef ASPEED_PECI_H
+#define ASPEED_PECI_H
+
+#include "hw/sysbus.h"
+
+#define ASPEED_PECI_NR_REGS ((0xFC + 4) >> 2)
+#define TYPE_ASPEED_PECI "aspeed.peci"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedPECIState, ASPEED_PECI);
+
+struct AspeedPECIState {
+ /* <private> */
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ qemu_irq irq;
+
+ uint32_t regs[ASPEED_PECI_NR_REGS];
+};
+
+#endif
diff --git a/include/hw/misc/aspeed_sbc.h b/include/hw/misc/aspeed_sbc.h
new file mode 100644
index 0000000000..405e6782b9
--- /dev/null
+++ b/include/hw/misc/aspeed_sbc.h
@@ -0,0 +1,45 @@
+/*
+ * ASPEED Secure Boot Controller
+ *
+ * Copyright (C) 2021-2022 IBM Corp.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ASPEED_SBC_H
+#define ASPEED_SBC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_SBC "aspeed.sbc"
+#define TYPE_ASPEED_AST2600_SBC TYPE_ASPEED_SBC "-ast2600"
+OBJECT_DECLARE_TYPE(AspeedSBCState, AspeedSBCClass, ASPEED_SBC)
+
+#define ASPEED_SBC_NR_REGS (0x93c >> 2)
+
+#define QSR_AES BIT(27)
+#define QSR_RSA1024 (0x0 << 12)
+#define QSR_RSA2048 (0x1 << 12)
+#define QSR_RSA3072 (0x2 << 12)
+#define QSR_RSA4096 (0x3 << 12)
+#define QSR_SHA224 (0x0 << 10)
+#define QSR_SHA256 (0x1 << 10)
+#define QSR_SHA384 (0x2 << 10)
+#define QSR_SHA512 (0x3 << 10)
+
+struct AspeedSBCState {
+ SysBusDevice parent;
+
+ bool emmc_abr;
+ uint32_t signing_settings;
+
+ MemoryRegion iomem;
+
+ uint32_t regs[ASPEED_SBC_NR_REGS];
+};
+
+struct AspeedSBCClass {
+ SysBusDeviceClass parent_class;
+};
+
+#endif /* ASPEED_SBC_H */
diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h
index 38996adc59..7cb6018dbc 100644
--- a/include/hw/misc/aspeed_scu.h
+++ b/include/hw/misc/aspeed_scu.h
@@ -12,41 +12,64 @@
#define ASPEED_SCU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_SCU "aspeed.scu"
-#define ASPEED_SCU(obj) OBJECT_CHECK(AspeedSCUState, (obj), TYPE_ASPEED_SCU)
+OBJECT_DECLARE_TYPE(AspeedSCUState, AspeedSCUClass, ASPEED_SCU)
+#define TYPE_ASPEED_2400_SCU TYPE_ASPEED_SCU "-ast2400"
+#define TYPE_ASPEED_2500_SCU TYPE_ASPEED_SCU "-ast2500"
+#define TYPE_ASPEED_2600_SCU TYPE_ASPEED_SCU "-ast2600"
+#define TYPE_ASPEED_1030_SCU TYPE_ASPEED_SCU "-ast1030"
#define ASPEED_SCU_NR_REGS (0x1A8 >> 2)
+#define ASPEED_AST2600_SCU_NR_REGS (0xE20 >> 2)
-typedef struct AspeedSCUState {
+struct AspeedSCUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
- uint32_t regs[ASPEED_SCU_NR_REGS];
+ uint32_t regs[ASPEED_AST2600_SCU_NR_REGS];
uint32_t silicon_rev;
uint32_t hw_strap1;
uint32_t hw_strap2;
uint32_t hw_prot_key;
-
- uint32_t clkin;
- uint32_t hpll;
- uint32_t apb_freq;
-} AspeedSCUState;
+};
#define AST2400_A0_SILICON_REV 0x02000303U
#define AST2400_A1_SILICON_REV 0x02010303U
#define AST2500_A0_SILICON_REV 0x04000303U
#define AST2500_A1_SILICON_REV 0x04010303U
+#define AST2600_A0_SILICON_REV 0x05000303U
+#define AST2600_A1_SILICON_REV 0x05010303U
+#define AST2600_A2_SILICON_REV 0x05020303U
+#define AST2600_A3_SILICON_REV 0x05030303U
+#define AST1030_A0_SILICON_REV 0x80000000U
+#define AST1030_A1_SILICON_REV 0x80010000U
#define ASPEED_IS_AST2500(si_rev) ((((si_rev) >> 24) & 0xff) == 0x04)
-extern bool is_supported_silicon_rev(uint32_t silicon_rev);
+bool is_supported_silicon_rev(uint32_t silicon_rev);
+
+
+struct AspeedSCUClass {
+ SysBusDeviceClass parent_class;
+
+ const uint32_t *resets;
+ uint32_t (*calc_hpll)(AspeedSCUState *s, uint32_t hpll_reg);
+ uint32_t (*get_apb)(AspeedSCUState *s);
+ uint32_t apb_divider;
+ uint32_t nr_regs;
+ bool clkin_25Mhz;
+ const MemoryRegionOps *ops;
+};
#define ASPEED_SCU_PROT_KEY 0x1688A8A8
+uint32_t aspeed_scu_get_apb_freq(AspeedSCUState *s);
+
/*
* Extracted from Aspeed SDK v00.03.21. Fixes and extra definitions
* were added.
@@ -267,6 +290,7 @@ extern bool is_supported_silicon_rev(uint32_t silicon_rev);
#define SCU_AST2500_HW_STRAP_ESPI_FLASH_ENABLE (0x1 << 26)
#define SCU_AST2500_HW_STRAP_ESPI_ENABLE (0x1 << 25)
#define SCU_AST2500_HW_STRAP_DDR4_ENABLE (0x1 << 24)
+#define SCU_AST2500_HW_STRAP_25HZ_CLOCK_MODE (0x1 << 23)
#define SCU_AST2500_HW_STRAP_ACPI_ENABLE (0x1 << 19)
#define SCU_AST2500_HW_STRAP_USBCKI_FREQ (0x1 << 18)
@@ -297,4 +321,44 @@ extern bool is_supported_silicon_rev(uint32_t silicon_rev);
SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \
SCU_AST2500_HW_STRAP_RESERVED1)
+/*
+ * SCU200 H-PLL Parameter Register (for Aspeed AST2600 SOC)
+ *
+ * 28:26 H-PLL Parameters
+ * 25 Enable H-PLL reset
+ * 24 Enable H-PLL bypass mode
+ * 23 Turn off H-PLL
+ * 22:19 H-PLL Post Divider (P)
+ * 18:13 H-PLL Numerator (M)
+ * 12:0 H-PLL Denumerator (N)
+ *
+ * (Output frequency) = CLKIN(25MHz) * [(M+1) / (N+1)] / (P+1)
+ *
+ * The default frequency is 1200Mhz when CLKIN = 25MHz
+ */
+#define SCU_AST2600_H_PLL_BYPASS_EN (0x1 << 24)
+#define SCU_AST2600_H_PLL_OFF (0x1 << 23)
+
+/*
+ * SCU310 Clock Selection Register Set 4 (for Aspeed AST1030 SOC)
+ *
+ * 31 I3C Clock Source selection
+ * 30:28 I3C clock divider selection
+ * 26:24 MAC AHB clock divider selection
+ * 22:20 RGMII 125MHz clock divider ration
+ * 19:16 RGMII 50MHz clock divider ration
+ * 15 LHCLK clock generation/output enable control
+ * 14:12 LHCLK divider selection
+ * 11:8 APB Bus PCLK divider selection
+ * 7 Select PECI clock source
+ * 6 Select UART debug port clock source
+ * 5 Select UART6 clock source
+ * 4 Select UART5 clock source
+ * 3 Select UART4 clock source
+ * 2 Select UART3 clock source
+ * 1 Select UART2 clock source
+ * 0 Select UART1 clock source
+ */
+#define SCU_AST1030_CLK_GET_PCLK_DIV(x) (((x) >> 8) & 0xf)
+
#endif /* ASPEED_SCU_H */
diff --git a/include/hw/misc/aspeed_sdmc.h b/include/hw/misc/aspeed_sdmc.h
index b3c926acae..ec2d59a14f 100644
--- a/include/hw/misc/aspeed_sdmc.h
+++ b/include/hw/misc/aspeed_sdmc.h
@@ -10,13 +10,28 @@
#define ASPEED_SDMC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_SDMC "aspeed.sdmc"
-#define ASPEED_SDMC(obj) OBJECT_CHECK(AspeedSDMCState, (obj), TYPE_ASPEED_SDMC)
+OBJECT_DECLARE_TYPE(AspeedSDMCState, AspeedSDMCClass, ASPEED_SDMC)
+#define TYPE_ASPEED_2400_SDMC TYPE_ASPEED_SDMC "-ast2400"
+#define TYPE_ASPEED_2500_SDMC TYPE_ASPEED_SDMC "-ast2500"
+#define TYPE_ASPEED_2600_SDMC TYPE_ASPEED_SDMC "-ast2600"
-#define ASPEED_SDMC_NR_REGS (0x174 >> 2)
+/*
+ * SDMC has 174 documented registers. In addition the u-boot device tree
+ * describes the following regions:
+ * - PHY status regs at offset 0x400, length 0x200
+ * - PHY setting regs at offset 0x100, length 0x300
+ *
+ * There are two sets of MRS (Mode Registers) configuration in ast2600 memory
+ * system: one is in the SDRAM MC (memory controller) which is used in run
+ * time, and the other is in the DDR-PHY IP which is used during DDR-PHY
+ * training.
+ */
+#define ASPEED_SDMC_NR_REGS (0x500 >> 2)
-typedef struct AspeedSDMCState {
+struct AspeedSDMCState {
/*< private >*/
SysBusDevice parent_obj;
@@ -24,12 +39,18 @@ typedef struct AspeedSDMCState {
MemoryRegion iomem;
uint32_t regs[ASPEED_SDMC_NR_REGS];
- uint32_t silicon_rev;
- uint32_t ram_bits;
uint64_t ram_size;
uint64_t max_ram_size;
- uint32_t fixed_conf;
+};
-} AspeedSDMCState;
+
+struct AspeedSDMCClass {
+ SysBusDeviceClass parent_class;
+
+ uint64_t max_ram_size;
+ const uint64_t *valid_ram_sizes;
+ uint32_t (*compute_conf)(AspeedSDMCState *s, uint32_t data);
+ void (*write)(AspeedSDMCState *s, uint32_t reg, uint32_t data);
+};
#endif /* ASPEED_SDMC_H */
diff --git a/include/hw/misc/aspeed_xdma.h b/include/hw/misc/aspeed_xdma.h
new file mode 100644
index 0000000000..b1478fd1c6
--- /dev/null
+++ b/include/hw/misc/aspeed_xdma.h
@@ -0,0 +1,46 @@
+/*
+ * ASPEED XDMA Controller
+ * Eddie James <eajames@linux.ibm.com>
+ *
+ * Copyright (C) 2019 IBM Corp.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ASPEED_XDMA_H
+#define ASPEED_XDMA_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_ASPEED_XDMA "aspeed.xdma"
+#define TYPE_ASPEED_2400_XDMA TYPE_ASPEED_XDMA "-ast2400"
+#define TYPE_ASPEED_2500_XDMA TYPE_ASPEED_XDMA "-ast2500"
+#define TYPE_ASPEED_2600_XDMA TYPE_ASPEED_XDMA "-ast2600"
+OBJECT_DECLARE_TYPE(AspeedXDMAState, AspeedXDMAClass, ASPEED_XDMA)
+
+#define ASPEED_XDMA_NUM_REGS (ASPEED_XDMA_REG_SIZE / sizeof(uint32_t))
+#define ASPEED_XDMA_REG_SIZE 0x7C
+
+struct AspeedXDMAState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ char bmc_cmdq_readp_set;
+ uint32_t regs[ASPEED_XDMA_NUM_REGS];
+};
+
+struct AspeedXDMAClass {
+ SysBusDeviceClass parent_class;
+
+ uint8_t cmdq_endp;
+ uint8_t cmdq_wrp;
+ uint8_t cmdq_rdp;
+ uint8_t intr_ctrl;
+ uint32_t intr_ctrl_mask;
+ uint8_t intr_status;
+ uint32_t intr_complete;
+};
+
+#endif /* ASPEED_XDMA_H */
diff --git a/include/hw/misc/auxbus.h b/include/hw/misc/auxbus.h
index c15b444748..03cacdee42 100644
--- a/include/hw/misc/auxbus.h
+++ b/include/hw/misc/auxbus.h
@@ -25,13 +25,16 @@
#ifndef HW_MISC_AUXBUS_H
#define HW_MISC_AUXBUS_H
-#include "hw/qdev.h"
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
-typedef struct AUXBus AUXBus;
typedef struct AUXSlave AUXSlave;
typedef enum AUXCommand AUXCommand;
typedef enum AUXReply AUXReply;
-typedef struct AUXTOI2CState AUXTOI2CState;
+
+#define TYPE_AUXTOI2C "aux-to-i2c-bridge"
+OBJECT_DECLARE_SIMPLE_TYPE(AUXTOI2CState, AUXTOI2C)
enum AUXCommand {
WRITE_I2C = 0,
@@ -52,7 +55,7 @@ enum AUXReply {
};
#define TYPE_AUX_BUS "aux-bus"
-#define AUX_BUS(obj) OBJECT_CHECK(AUXBus, (obj), TYPE_AUX_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(AUXBus, AUX_BUS)
struct AUXBus {
/* < private > */
@@ -71,8 +74,7 @@ struct AUXBus {
};
#define TYPE_AUX_SLAVE "aux-slave"
-#define AUX_SLAVE(obj) \
- OBJECT_CHECK(AUXSlave, (obj), TYPE_AUX_SLAVE)
+OBJECT_DECLARE_SIMPLE_TYPE(AUXSlave, AUX_SLAVE)
struct AUXSlave {
/* < private > */
@@ -83,21 +85,28 @@ struct AUXSlave {
};
/**
- * aux_init_bus: Initialize an AUX bus.
+ * aux_bus_init: Initialize an AUX bus.
*
* Returns the new AUX bus created.
*
* @parent The device where this bus is located.
* @name The name of the bus.
*/
-AUXBus *aux_init_bus(DeviceState *parent, const char *name);
+AUXBus *aux_bus_init(DeviceState *parent, const char *name);
+
+/**
+ * aux_bus_realize: Realize an AUX bus.
+ *
+ * @bus: The AUX bus.
+ */
+void aux_bus_realize(AUXBus *bus);
/*
* aux_request: Make a request on the bus.
*
* Returns the reply of the request.
*
- * @bus Ths bus where the request happen.
+ * @bus The bus where the request happen.
* @cmd The command requested.
* @address The 20bits address of the slave.
* @len The length of the read or write.
@@ -123,13 +132,6 @@ I2CBus *aux_get_i2c_bus(AUXBus *bus);
*/
void aux_init_mmio(AUXSlave *aux_slave, MemoryRegion *mmio);
-/* aux_create_slave: Create a new device on an AUX bus
- *
- * @bus The AUX bus for the new device.
- * @name The type of the device to be created.
- */
-DeviceState *aux_create_slave(AUXBus *bus, const char *name);
-
/* aux_map_slave: Map the mmio for an AUX slave on the bus.
*
* @dev The AUX slave.
diff --git a/include/hw/misc/avr_power.h b/include/hw/misc/avr_power.h
new file mode 100644
index 0000000000..388e421aa7
--- /dev/null
+++ b/include/hw/misc/avr_power.h
@@ -0,0 +1,46 @@
+/*
+ * AVR Power Reduction Management
+ *
+ * Copyright (c) 2019-2020 Michael Rolnik
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_MISC_AVR_POWER_H
+#define HW_MISC_AVR_POWER_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+
+#define TYPE_AVR_MASK "avr-power"
+OBJECT_DECLARE_SIMPLE_TYPE(AVRMaskState, AVR_MASK)
+
+struct AVRMaskState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+
+ uint8_t val;
+ qemu_irq irq[8];
+};
+
+#endif /* HW_MISC_AVR_POWER_H */
diff --git a/include/hw/misc/bcm2835_cprman.h b/include/hw/misc/bcm2835_cprman.h
new file mode 100644
index 0000000000..0d38036728
--- /dev/null
+++ b/include/hw/misc/bcm2835_cprman.h
@@ -0,0 +1,210 @@
+/*
+ * BCM2835 CPRMAN clock manager
+ *
+ * Copyright (c) 2020 Luc Michel <luc@lmichel.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_BCM2835_CPRMAN_H
+#define HW_MISC_BCM2835_CPRMAN_H
+
+#include "hw/sysbus.h"
+#include "hw/qdev-clock.h"
+
+#define TYPE_BCM2835_CPRMAN "bcm2835-cprman"
+
+typedef struct BCM2835CprmanState BCM2835CprmanState;
+
+DECLARE_INSTANCE_CHECKER(BCM2835CprmanState, CPRMAN,
+ TYPE_BCM2835_CPRMAN)
+
+#define CPRMAN_NUM_REGS (0x2000 / sizeof(uint32_t))
+
+typedef enum CprmanPll {
+ CPRMAN_PLLA = 0,
+ CPRMAN_PLLC,
+ CPRMAN_PLLD,
+ CPRMAN_PLLH,
+ CPRMAN_PLLB,
+
+ CPRMAN_NUM_PLL
+} CprmanPll;
+
+typedef enum CprmanPllChannel {
+ CPRMAN_PLLA_CHANNEL_DSI0 = 0,
+ CPRMAN_PLLA_CHANNEL_CORE,
+ CPRMAN_PLLA_CHANNEL_PER,
+ CPRMAN_PLLA_CHANNEL_CCP2,
+
+ CPRMAN_PLLC_CHANNEL_CORE2,
+ CPRMAN_PLLC_CHANNEL_CORE1,
+ CPRMAN_PLLC_CHANNEL_PER,
+ CPRMAN_PLLC_CHANNEL_CORE0,
+
+ CPRMAN_PLLD_CHANNEL_DSI0,
+ CPRMAN_PLLD_CHANNEL_CORE,
+ CPRMAN_PLLD_CHANNEL_PER,
+ CPRMAN_PLLD_CHANNEL_DSI1,
+
+ CPRMAN_PLLH_CHANNEL_AUX,
+ CPRMAN_PLLH_CHANNEL_RCAL,
+ CPRMAN_PLLH_CHANNEL_PIX,
+
+ CPRMAN_PLLB_CHANNEL_ARM,
+
+ CPRMAN_NUM_PLL_CHANNEL,
+
+ /* Special values used when connecting clock sources to clocks */
+ CPRMAN_CLOCK_SRC_NORMAL = -1,
+ CPRMAN_CLOCK_SRC_FORCE_GROUND = -2,
+ CPRMAN_CLOCK_SRC_DSI0HSCK = -3,
+} CprmanPllChannel;
+
+typedef enum CprmanClockMux {
+ CPRMAN_CLOCK_GNRIC,
+ CPRMAN_CLOCK_VPU,
+ CPRMAN_CLOCK_SYS,
+ CPRMAN_CLOCK_PERIA,
+ CPRMAN_CLOCK_PERII,
+ CPRMAN_CLOCK_H264,
+ CPRMAN_CLOCK_ISP,
+ CPRMAN_CLOCK_V3D,
+ CPRMAN_CLOCK_CAM0,
+ CPRMAN_CLOCK_CAM1,
+ CPRMAN_CLOCK_CCP2,
+ CPRMAN_CLOCK_DSI0E,
+ CPRMAN_CLOCK_DSI0P,
+ CPRMAN_CLOCK_DPI,
+ CPRMAN_CLOCK_GP0,
+ CPRMAN_CLOCK_GP1,
+ CPRMAN_CLOCK_GP2,
+ CPRMAN_CLOCK_HSM,
+ CPRMAN_CLOCK_OTP,
+ CPRMAN_CLOCK_PCM,
+ CPRMAN_CLOCK_PWM,
+ CPRMAN_CLOCK_SLIM,
+ CPRMAN_CLOCK_SMI,
+ CPRMAN_CLOCK_TEC,
+ CPRMAN_CLOCK_TD0,
+ CPRMAN_CLOCK_TD1,
+ CPRMAN_CLOCK_TSENS,
+ CPRMAN_CLOCK_TIMER,
+ CPRMAN_CLOCK_UART,
+ CPRMAN_CLOCK_VEC,
+ CPRMAN_CLOCK_PULSE,
+ CPRMAN_CLOCK_SDC,
+ CPRMAN_CLOCK_ARM,
+ CPRMAN_CLOCK_AVEO,
+ CPRMAN_CLOCK_EMMC,
+ CPRMAN_CLOCK_EMMC2,
+
+ CPRMAN_NUM_CLOCK_MUX
+} CprmanClockMux;
+
+typedef enum CprmanClockMuxSource {
+ CPRMAN_CLOCK_SRC_GND = 0,
+ CPRMAN_CLOCK_SRC_XOSC,
+ CPRMAN_CLOCK_SRC_TD0,
+ CPRMAN_CLOCK_SRC_TD1,
+ CPRMAN_CLOCK_SRC_PLLA,
+ CPRMAN_CLOCK_SRC_PLLC,
+ CPRMAN_CLOCK_SRC_PLLD,
+ CPRMAN_CLOCK_SRC_PLLH,
+ CPRMAN_CLOCK_SRC_PLLC_CORE1,
+ CPRMAN_CLOCK_SRC_PLLC_CORE2,
+
+ CPRMAN_NUM_CLOCK_MUX_SRC
+} CprmanClockMuxSource;
+
+typedef struct CprmanPllState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CprmanPll id;
+
+ uint32_t *reg_cm;
+ uint32_t *reg_a2w_ctrl;
+ uint32_t *reg_a2w_ana; /* ANA[0] .. ANA[3] */
+ uint32_t prediv_mask; /* prediv bit in ana[1] */
+ uint32_t *reg_a2w_frac;
+
+ Clock *xosc_in;
+ Clock *out;
+} CprmanPllState;
+
+typedef struct CprmanPllChannelState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CprmanPllChannel id;
+ CprmanPll parent;
+
+ uint32_t *reg_cm;
+ uint32_t hold_mask;
+ uint32_t load_mask;
+ uint32_t *reg_a2w_ctrl;
+ int fixed_divider;
+
+ Clock *pll_in;
+ Clock *out;
+} CprmanPllChannelState;
+
+typedef struct CprmanClockMuxState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CprmanClockMux id;
+
+ uint32_t *reg_ctl;
+ uint32_t *reg_div;
+ int int_bits;
+ int frac_bits;
+
+ Clock *srcs[CPRMAN_NUM_CLOCK_MUX_SRC];
+ Clock *out;
+
+ /*
+ * Used by clock srcs update callback to retrieve both the clock and the
+ * source number.
+ */
+ struct CprmanClockMuxState *backref[CPRMAN_NUM_CLOCK_MUX_SRC];
+} CprmanClockMuxState;
+
+typedef struct CprmanDsi0HsckMuxState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CprmanClockMux id;
+
+ uint32_t *reg_cm;
+
+ Clock *plla_in;
+ Clock *plld_in;
+ Clock *out;
+} CprmanDsi0HsckMuxState;
+
+struct BCM2835CprmanState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+
+ CprmanPllState plls[CPRMAN_NUM_PLL];
+ CprmanPllChannelState channels[CPRMAN_NUM_PLL_CHANNEL];
+ CprmanClockMuxState clock_muxes[CPRMAN_NUM_CLOCK_MUX];
+ CprmanDsi0HsckMuxState dsi0hsck_mux;
+
+ uint32_t regs[CPRMAN_NUM_REGS];
+ uint32_t xosc_freq;
+
+ Clock *xosc;
+ Clock *gnd;
+};
+
+#endif
diff --git a/include/hw/misc/bcm2835_cprman_internals.h b/include/hw/misc/bcm2835_cprman_internals.h
new file mode 100644
index 0000000000..7617aff96f
--- /dev/null
+++ b/include/hw/misc/bcm2835_cprman_internals.h
@@ -0,0 +1,1019 @@
+/*
+ * BCM2835 CPRMAN clock manager
+ *
+ * Copyright (c) 2020 Luc Michel <luc@lmichel.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_BCM2835_CPRMAN_INTERNALS_H
+#define HW_MISC_BCM2835_CPRMAN_INTERNALS_H
+
+#include "hw/registerfields.h"
+#include "hw/misc/bcm2835_cprman.h"
+
+#define TYPE_CPRMAN_PLL "bcm2835-cprman-pll"
+#define TYPE_CPRMAN_PLL_CHANNEL "bcm2835-cprman-pll-channel"
+#define TYPE_CPRMAN_CLOCK_MUX "bcm2835-cprman-clock-mux"
+#define TYPE_CPRMAN_DSI0HSCK_MUX "bcm2835-cprman-dsi0hsck-mux"
+
+DECLARE_INSTANCE_CHECKER(CprmanPllState, CPRMAN_PLL,
+ TYPE_CPRMAN_PLL)
+DECLARE_INSTANCE_CHECKER(CprmanPllChannelState, CPRMAN_PLL_CHANNEL,
+ TYPE_CPRMAN_PLL_CHANNEL)
+DECLARE_INSTANCE_CHECKER(CprmanClockMuxState, CPRMAN_CLOCK_MUX,
+ TYPE_CPRMAN_CLOCK_MUX)
+DECLARE_INSTANCE_CHECKER(CprmanDsi0HsckMuxState, CPRMAN_DSI0HSCK_MUX,
+ TYPE_CPRMAN_DSI0HSCK_MUX)
+
+/* Register map */
+
+/* PLLs */
+REG32(CM_PLLA, 0x104)
+ FIELD(CM_PLLA, LOADDSI0, 0, 1)
+ FIELD(CM_PLLA, HOLDDSI0, 1, 1)
+ FIELD(CM_PLLA, LOADCCP2, 2, 1)
+ FIELD(CM_PLLA, HOLDCCP2, 3, 1)
+ FIELD(CM_PLLA, LOADCORE, 4, 1)
+ FIELD(CM_PLLA, HOLDCORE, 5, 1)
+ FIELD(CM_PLLA, LOADPER, 6, 1)
+ FIELD(CM_PLLA, HOLDPER, 7, 1)
+ FIELD(CM_PLLx, ANARST, 8, 1)
+REG32(CM_PLLC, 0x108)
+ FIELD(CM_PLLC, LOADCORE0, 0, 1)
+ FIELD(CM_PLLC, HOLDCORE0, 1, 1)
+ FIELD(CM_PLLC, LOADCORE1, 2, 1)
+ FIELD(CM_PLLC, HOLDCORE1, 3, 1)
+ FIELD(CM_PLLC, LOADCORE2, 4, 1)
+ FIELD(CM_PLLC, HOLDCORE2, 5, 1)
+ FIELD(CM_PLLC, LOADPER, 6, 1)
+ FIELD(CM_PLLC, HOLDPER, 7, 1)
+REG32(CM_PLLD, 0x10c)
+ FIELD(CM_PLLD, LOADDSI0, 0, 1)
+ FIELD(CM_PLLD, HOLDDSI0, 1, 1)
+ FIELD(CM_PLLD, LOADDSI1, 2, 1)
+ FIELD(CM_PLLD, HOLDDSI1, 3, 1)
+ FIELD(CM_PLLD, LOADCORE, 4, 1)
+ FIELD(CM_PLLD, HOLDCORE, 5, 1)
+ FIELD(CM_PLLD, LOADPER, 6, 1)
+ FIELD(CM_PLLD, HOLDPER, 7, 1)
+REG32(CM_PLLH, 0x110)
+ FIELD(CM_PLLH, LOADPIX, 0, 1)
+ FIELD(CM_PLLH, LOADAUX, 1, 1)
+ FIELD(CM_PLLH, LOADRCAL, 2, 1)
+REG32(CM_PLLB, 0x170)
+ FIELD(CM_PLLB, LOADARM, 0, 1)
+ FIELD(CM_PLLB, HOLDARM, 1, 1)
+
+REG32(A2W_PLLA_CTRL, 0x1100)
+ FIELD(A2W_PLLx_CTRL, NDIV, 0, 10)
+ FIELD(A2W_PLLx_CTRL, PDIV, 12, 3)
+ FIELD(A2W_PLLx_CTRL, PWRDN, 16, 1)
+ FIELD(A2W_PLLx_CTRL, PRST_DISABLE, 17, 1)
+REG32(A2W_PLLC_CTRL, 0x1120)
+REG32(A2W_PLLD_CTRL, 0x1140)
+REG32(A2W_PLLH_CTRL, 0x1160)
+REG32(A2W_PLLB_CTRL, 0x11e0)
+
+REG32(A2W_PLLA_ANA0, 0x1010)
+REG32(A2W_PLLA_ANA1, 0x1014)
+ FIELD(A2W_PLLx_ANA1, FB_PREDIV, 14, 1)
+REG32(A2W_PLLA_ANA2, 0x1018)
+REG32(A2W_PLLA_ANA3, 0x101c)
+
+REG32(A2W_PLLC_ANA0, 0x1030)
+REG32(A2W_PLLC_ANA1, 0x1034)
+REG32(A2W_PLLC_ANA2, 0x1038)
+REG32(A2W_PLLC_ANA3, 0x103c)
+
+REG32(A2W_PLLD_ANA0, 0x1050)
+REG32(A2W_PLLD_ANA1, 0x1054)
+REG32(A2W_PLLD_ANA2, 0x1058)
+REG32(A2W_PLLD_ANA3, 0x105c)
+
+REG32(A2W_PLLH_ANA0, 0x1070)
+REG32(A2W_PLLH_ANA1, 0x1074)
+ FIELD(A2W_PLLH_ANA1, FB_PREDIV, 11, 1)
+REG32(A2W_PLLH_ANA2, 0x1078)
+REG32(A2W_PLLH_ANA3, 0x107c)
+
+REG32(A2W_PLLB_ANA0, 0x10f0)
+REG32(A2W_PLLB_ANA1, 0x10f4)
+REG32(A2W_PLLB_ANA2, 0x10f8)
+REG32(A2W_PLLB_ANA3, 0x10fc)
+
+REG32(A2W_PLLA_FRAC, 0x1200)
+ FIELD(A2W_PLLx_FRAC, FRAC, 0, 20)
+REG32(A2W_PLLC_FRAC, 0x1220)
+REG32(A2W_PLLD_FRAC, 0x1240)
+REG32(A2W_PLLH_FRAC, 0x1260)
+REG32(A2W_PLLB_FRAC, 0x12e0)
+
+/* PLL channels */
+REG32(A2W_PLLA_DSI0, 0x1300)
+ FIELD(A2W_PLLx_CHANNELy, DIV, 0, 8)
+ FIELD(A2W_PLLx_CHANNELy, DISABLE, 8, 1)
+REG32(A2W_PLLA_CORE, 0x1400)
+REG32(A2W_PLLA_PER, 0x1500)
+REG32(A2W_PLLA_CCP2, 0x1600)
+
+REG32(A2W_PLLC_CORE2, 0x1320)
+REG32(A2W_PLLC_CORE1, 0x1420)
+REG32(A2W_PLLC_PER, 0x1520)
+REG32(A2W_PLLC_CORE0, 0x1620)
+
+REG32(A2W_PLLD_DSI0, 0x1340)
+REG32(A2W_PLLD_CORE, 0x1440)
+REG32(A2W_PLLD_PER, 0x1540)
+REG32(A2W_PLLD_DSI1, 0x1640)
+
+REG32(A2W_PLLH_AUX, 0x1360)
+REG32(A2W_PLLH_RCAL, 0x1460)
+REG32(A2W_PLLH_PIX, 0x1560)
+REG32(A2W_PLLH_STS, 0x1660)
+
+REG32(A2W_PLLB_ARM, 0x13e0)
+
+/* Clock muxes */
+REG32(CM_GNRICCTL, 0x000)
+ FIELD(CM_CLOCKx_CTL, SRC, 0, 4)
+ FIELD(CM_CLOCKx_CTL, ENABLE, 4, 1)
+ FIELD(CM_CLOCKx_CTL, KILL, 5, 1)
+ FIELD(CM_CLOCKx_CTL, GATE, 6, 1)
+ FIELD(CM_CLOCKx_CTL, BUSY, 7, 1)
+ FIELD(CM_CLOCKx_CTL, BUSYD, 8, 1)
+ FIELD(CM_CLOCKx_CTL, MASH, 9, 2)
+ FIELD(CM_CLOCKx_CTL, FLIP, 11, 1)
+REG32(CM_GNRICDIV, 0x004)
+ FIELD(CM_CLOCKx_DIV, FRAC, 0, 12)
+REG32(CM_VPUCTL, 0x008)
+REG32(CM_VPUDIV, 0x00c)
+REG32(CM_SYSCTL, 0x010)
+REG32(CM_SYSDIV, 0x014)
+REG32(CM_PERIACTL, 0x018)
+REG32(CM_PERIADIV, 0x01c)
+REG32(CM_PERIICTL, 0x020)
+REG32(CM_PERIIDIV, 0x024)
+REG32(CM_H264CTL, 0x028)
+REG32(CM_H264DIV, 0x02c)
+REG32(CM_ISPCTL, 0x030)
+REG32(CM_ISPDIV, 0x034)
+REG32(CM_V3DCTL, 0x038)
+REG32(CM_V3DDIV, 0x03c)
+REG32(CM_CAM0CTL, 0x040)
+REG32(CM_CAM0DIV, 0x044)
+REG32(CM_CAM1CTL, 0x048)
+REG32(CM_CAM1DIV, 0x04c)
+REG32(CM_CCP2CTL, 0x050)
+REG32(CM_CCP2DIV, 0x054)
+REG32(CM_DSI0ECTL, 0x058)
+REG32(CM_DSI0EDIV, 0x05c)
+REG32(CM_DSI0PCTL, 0x060)
+REG32(CM_DSI0PDIV, 0x064)
+REG32(CM_DPICTL, 0x068)
+REG32(CM_DPIDIV, 0x06c)
+REG32(CM_GP0CTL, 0x070)
+REG32(CM_GP0DIV, 0x074)
+REG32(CM_GP1CTL, 0x078)
+REG32(CM_GP1DIV, 0x07c)
+REG32(CM_GP2CTL, 0x080)
+REG32(CM_GP2DIV, 0x084)
+REG32(CM_HSMCTL, 0x088)
+REG32(CM_HSMDIV, 0x08c)
+REG32(CM_OTPCTL, 0x090)
+REG32(CM_OTPDIV, 0x094)
+REG32(CM_PCMCTL, 0x098)
+REG32(CM_PCMDIV, 0x09c)
+REG32(CM_PWMCTL, 0x0a0)
+REG32(CM_PWMDIV, 0x0a4)
+REG32(CM_SLIMCTL, 0x0a8)
+REG32(CM_SLIMDIV, 0x0ac)
+REG32(CM_SMICTL, 0x0b0)
+REG32(CM_SMIDIV, 0x0b4)
+REG32(CM_TCNTCTL, 0x0c0)
+REG32(CM_TCNTCNT, 0x0c4)
+REG32(CM_TECCTL, 0x0c8)
+REG32(CM_TECDIV, 0x0cc)
+REG32(CM_TD0CTL, 0x0d0)
+REG32(CM_TD0DIV, 0x0d4)
+REG32(CM_TD1CTL, 0x0d8)
+REG32(CM_TD1DIV, 0x0dc)
+REG32(CM_TSENSCTL, 0x0e0)
+REG32(CM_TSENSDIV, 0x0e4)
+REG32(CM_TIMERCTL, 0x0e8)
+REG32(CM_TIMERDIV, 0x0ec)
+REG32(CM_UARTCTL, 0x0f0)
+REG32(CM_UARTDIV, 0x0f4)
+REG32(CM_VECCTL, 0x0f8)
+REG32(CM_VECDIV, 0x0fc)
+REG32(CM_PULSECTL, 0x190)
+REG32(CM_PULSEDIV, 0x194)
+REG32(CM_SDCCTL, 0x1a8)
+REG32(CM_SDCDIV, 0x1ac)
+REG32(CM_ARMCTL, 0x1b0)
+REG32(CM_AVEOCTL, 0x1b8)
+REG32(CM_AVEODIV, 0x1bc)
+REG32(CM_EMMCCTL, 0x1c0)
+REG32(CM_EMMCDIV, 0x1c4)
+REG32(CM_EMMC2CTL, 0x1d0)
+REG32(CM_EMMC2DIV, 0x1d4)
+
+/* misc registers */
+REG32(CM_LOCK, 0x114)
+ FIELD(CM_LOCK, FLOCKH, 12, 1)
+ FIELD(CM_LOCK, FLOCKD, 11, 1)
+ FIELD(CM_LOCK, FLOCKC, 10, 1)
+ FIELD(CM_LOCK, FLOCKB, 9, 1)
+ FIELD(CM_LOCK, FLOCKA, 8, 1)
+
+REG32(CM_DSI0HSCK, 0x120)
+ FIELD(CM_DSI0HSCK, SELPLLD, 0, 1)
+
+/*
+ * This field is common to all registers. Each register write value must match
+ * the CPRMAN_PASSWORD magic value in its 8 MSB.
+ */
+FIELD(CPRMAN, PASSWORD, 24, 8)
+#define CPRMAN_PASSWORD 0x5a
+
+/* PLL init info */
+typedef struct PLLInitInfo {
+ const char *name;
+ size_t cm_offset;
+ size_t a2w_ctrl_offset;
+ size_t a2w_ana_offset;
+ uint32_t prediv_mask; /* Prediv bit in ana[1] */
+ size_t a2w_frac_offset;
+} PLLInitInfo;
+
+#define FILL_PLL_INIT_INFO(pll_) \
+ .cm_offset = R_CM_ ## pll_, \
+ .a2w_ctrl_offset = R_A2W_ ## pll_ ## _CTRL, \
+ .a2w_ana_offset = R_A2W_ ## pll_ ## _ANA0, \
+ .a2w_frac_offset = R_A2W_ ## pll_ ## _FRAC
+
+static const PLLInitInfo PLL_INIT_INFO[] = {
+ [CPRMAN_PLLA] = {
+ .name = "plla",
+ .prediv_mask = R_A2W_PLLx_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLA),
+ },
+ [CPRMAN_PLLC] = {
+ .name = "pllc",
+ .prediv_mask = R_A2W_PLLx_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLC),
+ },
+ [CPRMAN_PLLD] = {
+ .name = "plld",
+ .prediv_mask = R_A2W_PLLx_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLD),
+ },
+ [CPRMAN_PLLH] = {
+ .name = "pllh",
+ .prediv_mask = R_A2W_PLLH_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLH),
+ },
+ [CPRMAN_PLLB] = {
+ .name = "pllb",
+ .prediv_mask = R_A2W_PLLx_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLB),
+ },
+};
+
+#undef FILL_PLL_CHANNEL_INIT_INFO
+
+static inline void set_pll_init_info(BCM2835CprmanState *s,
+ CprmanPllState *pll,
+ CprmanPll id)
+{
+ pll->id = id;
+ pll->reg_cm = &s->regs[PLL_INIT_INFO[id].cm_offset];
+ pll->reg_a2w_ctrl = &s->regs[PLL_INIT_INFO[id].a2w_ctrl_offset];
+ pll->reg_a2w_ana = &s->regs[PLL_INIT_INFO[id].a2w_ana_offset];
+ pll->prediv_mask = PLL_INIT_INFO[id].prediv_mask;
+ pll->reg_a2w_frac = &s->regs[PLL_INIT_INFO[id].a2w_frac_offset];
+}
+
+
+/* PLL channel init info */
+typedef struct PLLChannelInitInfo {
+ const char *name;
+ CprmanPll parent;
+ size_t cm_offset;
+ uint32_t cm_hold_mask;
+ uint32_t cm_load_mask;
+ size_t a2w_ctrl_offset;
+ unsigned int fixed_divider;
+} PLLChannelInitInfo;
+
+#define FILL_PLL_CHANNEL_INIT_INFO_common(pll_, channel_) \
+ .parent = CPRMAN_ ## pll_, \
+ .cm_offset = R_CM_ ## pll_, \
+ .cm_load_mask = R_CM_ ## pll_ ## _ ## LOAD ## channel_ ## _MASK, \
+ .a2w_ctrl_offset = R_A2W_ ## pll_ ## _ ## channel_
+
+#define FILL_PLL_CHANNEL_INIT_INFO(pll_, channel_) \
+ FILL_PLL_CHANNEL_INIT_INFO_common(pll_, channel_), \
+ .cm_hold_mask = R_CM_ ## pll_ ## _ ## HOLD ## channel_ ## _MASK, \
+ .fixed_divider = 1
+
+#define FILL_PLL_CHANNEL_INIT_INFO_nohold(pll_, channel_) \
+ FILL_PLL_CHANNEL_INIT_INFO_common(pll_, channel_), \
+ .cm_hold_mask = 0
+
+static PLLChannelInitInfo PLL_CHANNEL_INIT_INFO[] = {
+ [CPRMAN_PLLA_CHANNEL_DSI0] = {
+ .name = "plla-dsi0",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLA, DSI0),
+ },
+ [CPRMAN_PLLA_CHANNEL_CORE] = {
+ .name = "plla-core",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLA, CORE),
+ },
+ [CPRMAN_PLLA_CHANNEL_PER] = {
+ .name = "plla-per",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLA, PER),
+ },
+ [CPRMAN_PLLA_CHANNEL_CCP2] = {
+ .name = "plla-ccp2",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLA, CCP2),
+ },
+
+ [CPRMAN_PLLC_CHANNEL_CORE2] = {
+ .name = "pllc-core2",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLC, CORE2),
+ },
+ [CPRMAN_PLLC_CHANNEL_CORE1] = {
+ .name = "pllc-core1",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLC, CORE1),
+ },
+ [CPRMAN_PLLC_CHANNEL_PER] = {
+ .name = "pllc-per",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLC, PER),
+ },
+ [CPRMAN_PLLC_CHANNEL_CORE0] = {
+ .name = "pllc-core0",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLC, CORE0),
+ },
+
+ [CPRMAN_PLLD_CHANNEL_DSI0] = {
+ .name = "plld-dsi0",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLD, DSI0),
+ },
+ [CPRMAN_PLLD_CHANNEL_CORE] = {
+ .name = "plld-core",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLD, CORE),
+ },
+ [CPRMAN_PLLD_CHANNEL_PER] = {
+ .name = "plld-per",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLD, PER),
+ },
+ [CPRMAN_PLLD_CHANNEL_DSI1] = {
+ .name = "plld-dsi1",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLD, DSI1),
+ },
+
+ [CPRMAN_PLLH_CHANNEL_AUX] = {
+ .name = "pllh-aux",
+ .fixed_divider = 1,
+ FILL_PLL_CHANNEL_INIT_INFO_nohold(PLLH, AUX),
+ },
+ [CPRMAN_PLLH_CHANNEL_RCAL] = {
+ .name = "pllh-rcal",
+ .fixed_divider = 10,
+ FILL_PLL_CHANNEL_INIT_INFO_nohold(PLLH, RCAL),
+ },
+ [CPRMAN_PLLH_CHANNEL_PIX] = {
+ .name = "pllh-pix",
+ .fixed_divider = 10,
+ FILL_PLL_CHANNEL_INIT_INFO_nohold(PLLH, PIX),
+ },
+
+ [CPRMAN_PLLB_CHANNEL_ARM] = {
+ .name = "pllb-arm",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLB, ARM),
+ },
+};
+
+#undef FILL_PLL_CHANNEL_INIT_INFO_nohold
+#undef FILL_PLL_CHANNEL_INIT_INFO
+#undef FILL_PLL_CHANNEL_INIT_INFO_common
+
+static inline void set_pll_channel_init_info(BCM2835CprmanState *s,
+ CprmanPllChannelState *channel,
+ CprmanPllChannel id)
+{
+ channel->id = id;
+ channel->parent = PLL_CHANNEL_INIT_INFO[id].parent;
+ channel->reg_cm = &s->regs[PLL_CHANNEL_INIT_INFO[id].cm_offset];
+ channel->hold_mask = PLL_CHANNEL_INIT_INFO[id].cm_hold_mask;
+ channel->load_mask = PLL_CHANNEL_INIT_INFO[id].cm_load_mask;
+ channel->reg_a2w_ctrl = &s->regs[PLL_CHANNEL_INIT_INFO[id].a2w_ctrl_offset];
+ channel->fixed_divider = PLL_CHANNEL_INIT_INFO[id].fixed_divider;
+}
+
+/* Clock mux init info */
+typedef struct ClockMuxInitInfo {
+ const char *name;
+ size_t cm_offset; /* cm_offset[0]->CM_CTL, cm_offset[1]->CM_DIV */
+ int int_bits;
+ int frac_bits;
+
+ CprmanPllChannel src_mapping[CPRMAN_NUM_CLOCK_MUX_SRC];
+} ClockMuxInitInfo;
+
+/*
+ * Each clock mux can have up to 10 sources. Sources 0 to 3 are always the
+ * same (ground, xosc, td0, td1). Sources 4 to 9 are mux specific, and are not
+ * always populated. The following macros catch all those cases.
+ */
+
+/* Unknown mapping. Connect everything to ground */
+#define SRC_MAPPING_INFO_unknown \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* gnd */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* xosc */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* test debug 0 */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* test debug 1 */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll a */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll c */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll d */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll h */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll c, core1 */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll c, core2 */ \
+ }
+
+/* Only the oscillator and the two test debug clocks */
+#define SRC_MAPPING_INFO_xosc \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ }
+
+/* All the PLL "core" channels */
+#define SRC_MAPPING_INFO_core \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_PLLA_CHANNEL_CORE, \
+ CPRMAN_PLLC_CHANNEL_CORE0, \
+ CPRMAN_PLLD_CHANNEL_CORE, \
+ CPRMAN_PLLH_CHANNEL_AUX, \
+ CPRMAN_PLLC_CHANNEL_CORE1, \
+ CPRMAN_PLLC_CHANNEL_CORE2, \
+ }
+
+/* All the PLL "per" channels */
+#define SRC_MAPPING_INFO_periph \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_PLLA_CHANNEL_PER, \
+ CPRMAN_PLLC_CHANNEL_PER, \
+ CPRMAN_PLLD_CHANNEL_PER, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ }
+
+/*
+ * The DSI0 channels. This one got an intermediate mux between the PLL channels
+ * and the clock input.
+ */
+#define SRC_MAPPING_INFO_dsi0 \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_DSI0HSCK, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ }
+
+/* The DSI1 channel */
+#define SRC_MAPPING_INFO_dsi1 \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_PLLD_CHANNEL_DSI1, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ }
+
+#define FILL_CLOCK_MUX_SRC_MAPPING_INIT_INFO(kind_) \
+ SRC_MAPPING_INFO_ ## kind_
+
+#define FILL_CLOCK_MUX_INIT_INFO(clock_, kind_) \
+ .cm_offset = R_CM_ ## clock_ ## CTL, \
+ FILL_CLOCK_MUX_SRC_MAPPING_INIT_INFO(kind_)
+
+static ClockMuxInitInfo CLOCK_MUX_INIT_INFO[] = {
+ [CPRMAN_CLOCK_GNRIC] = {
+ .name = "gnric",
+ FILL_CLOCK_MUX_INIT_INFO(GNRIC, unknown),
+ },
+ [CPRMAN_CLOCK_VPU] = {
+ .name = "vpu",
+ .int_bits = 12,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(VPU, core),
+ },
+ [CPRMAN_CLOCK_SYS] = {
+ .name = "sys",
+ FILL_CLOCK_MUX_INIT_INFO(SYS, unknown),
+ },
+ [CPRMAN_CLOCK_PERIA] = {
+ .name = "peria",
+ FILL_CLOCK_MUX_INIT_INFO(PERIA, unknown),
+ },
+ [CPRMAN_CLOCK_PERII] = {
+ .name = "perii",
+ FILL_CLOCK_MUX_INIT_INFO(PERII, unknown),
+ },
+ [CPRMAN_CLOCK_H264] = {
+ .name = "h264",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(H264, core),
+ },
+ [CPRMAN_CLOCK_ISP] = {
+ .name = "isp",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(ISP, core),
+ },
+ [CPRMAN_CLOCK_V3D] = {
+ .name = "v3d",
+ FILL_CLOCK_MUX_INIT_INFO(V3D, core),
+ },
+ [CPRMAN_CLOCK_CAM0] = {
+ .name = "cam0",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(CAM0, periph),
+ },
+ [CPRMAN_CLOCK_CAM1] = {
+ .name = "cam1",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(CAM1, periph),
+ },
+ [CPRMAN_CLOCK_CCP2] = {
+ .name = "ccp2",
+ FILL_CLOCK_MUX_INIT_INFO(CCP2, unknown),
+ },
+ [CPRMAN_CLOCK_DSI0E] = {
+ .name = "dsi0e",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(DSI0E, dsi0),
+ },
+ [CPRMAN_CLOCK_DSI0P] = {
+ .name = "dsi0p",
+ .int_bits = 0,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(DSI0P, dsi0),
+ },
+ [CPRMAN_CLOCK_DPI] = {
+ .name = "dpi",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(DPI, periph),
+ },
+ [CPRMAN_CLOCK_GP0] = {
+ .name = "gp0",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(GP0, periph),
+ },
+ [CPRMAN_CLOCK_GP1] = {
+ .name = "gp1",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(GP1, periph),
+ },
+ [CPRMAN_CLOCK_GP2] = {
+ .name = "gp2",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(GP2, periph),
+ },
+ [CPRMAN_CLOCK_HSM] = {
+ .name = "hsm",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(HSM, periph),
+ },
+ [CPRMAN_CLOCK_OTP] = {
+ .name = "otp",
+ .int_bits = 4,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(OTP, xosc),
+ },
+ [CPRMAN_CLOCK_PCM] = {
+ .name = "pcm",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(PCM, periph),
+ },
+ [CPRMAN_CLOCK_PWM] = {
+ .name = "pwm",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(PWM, periph),
+ },
+ [CPRMAN_CLOCK_SLIM] = {
+ .name = "slim",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(SLIM, periph),
+ },
+ [CPRMAN_CLOCK_SMI] = {
+ .name = "smi",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(SMI, periph),
+ },
+ [CPRMAN_CLOCK_TEC] = {
+ .name = "tec",
+ .int_bits = 6,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(TEC, xosc),
+ },
+ [CPRMAN_CLOCK_TD0] = {
+ .name = "td0",
+ FILL_CLOCK_MUX_INIT_INFO(TD0, unknown),
+ },
+ [CPRMAN_CLOCK_TD1] = {
+ .name = "td1",
+ FILL_CLOCK_MUX_INIT_INFO(TD1, unknown),
+ },
+ [CPRMAN_CLOCK_TSENS] = {
+ .name = "tsens",
+ .int_bits = 5,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(TSENS, xosc),
+ },
+ [CPRMAN_CLOCK_TIMER] = {
+ .name = "timer",
+ .int_bits = 6,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(TIMER, xosc),
+ },
+ [CPRMAN_CLOCK_UART] = {
+ .name = "uart",
+ .int_bits = 10,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(UART, periph),
+ },
+ [CPRMAN_CLOCK_VEC] = {
+ .name = "vec",
+ .int_bits = 4,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(VEC, periph),
+ },
+ [CPRMAN_CLOCK_PULSE] = {
+ .name = "pulse",
+ FILL_CLOCK_MUX_INIT_INFO(PULSE, xosc),
+ },
+ [CPRMAN_CLOCK_SDC] = {
+ .name = "sdram",
+ .int_bits = 6,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(SDC, core),
+ },
+ [CPRMAN_CLOCK_ARM] = {
+ .name = "arm",
+ FILL_CLOCK_MUX_INIT_INFO(ARM, unknown),
+ },
+ [CPRMAN_CLOCK_AVEO] = {
+ .name = "aveo",
+ .int_bits = 4,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(AVEO, periph),
+ },
+ [CPRMAN_CLOCK_EMMC] = {
+ .name = "emmc",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(EMMC, periph),
+ },
+ [CPRMAN_CLOCK_EMMC2] = {
+ .name = "emmc2",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(EMMC2, unknown),
+ },
+};
+
+#undef FILL_CLOCK_MUX_INIT_INFO
+#undef FILL_CLOCK_MUX_SRC_MAPPING_INIT_INFO
+#undef SRC_MAPPING_INFO_dsi1
+#undef SRC_MAPPING_INFO_dsi0
+#undef SRC_MAPPING_INFO_periph
+#undef SRC_MAPPING_INFO_core
+#undef SRC_MAPPING_INFO_xosc
+#undef SRC_MAPPING_INFO_unknown
+
+static inline void set_clock_mux_init_info(BCM2835CprmanState *s,
+ CprmanClockMuxState *mux,
+ CprmanClockMux id)
+{
+ mux->id = id;
+ mux->reg_ctl = &s->regs[CLOCK_MUX_INIT_INFO[id].cm_offset];
+ mux->reg_div = &s->regs[CLOCK_MUX_INIT_INFO[id].cm_offset + 1];
+ mux->int_bits = CLOCK_MUX_INIT_INFO[id].int_bits;
+ mux->frac_bits = CLOCK_MUX_INIT_INFO[id].frac_bits;
+}
+
+
+/*
+ * Object reset info
+ * Those values have been dumped from a Raspberry Pi 3 Model B v1.2 using the
+ * clk debugfs interface in Linux.
+ */
+typedef struct PLLResetInfo {
+ uint32_t cm;
+ uint32_t a2w_ctrl;
+ uint32_t a2w_ana[4];
+ uint32_t a2w_frac;
+} PLLResetInfo;
+
+static const PLLResetInfo PLL_RESET_INFO[] = {
+ [CPRMAN_PLLA] = {
+ .cm = 0x0000008a,
+ .a2w_ctrl = 0x0002103a,
+ .a2w_frac = 0x00098000,
+ .a2w_ana = { 0x00000000, 0x00144000, 0x00000000, 0x00000100 }
+ },
+
+ [CPRMAN_PLLC] = {
+ .cm = 0x00000228,
+ .a2w_ctrl = 0x0002103e,
+ .a2w_frac = 0x00080000,
+ .a2w_ana = { 0x00000000, 0x00144000, 0x00000000, 0x00000100 }
+ },
+
+ [CPRMAN_PLLD] = {
+ .cm = 0x0000020a,
+ .a2w_ctrl = 0x00021034,
+ .a2w_frac = 0x00015556,
+ .a2w_ana = { 0x00000000, 0x00144000, 0x00000000, 0x00000100 }
+ },
+
+ [CPRMAN_PLLH] = {
+ .cm = 0x00000000,
+ .a2w_ctrl = 0x0002102d,
+ .a2w_frac = 0x00000000,
+ .a2w_ana = { 0x00900000, 0x0000000c, 0x00000000, 0x00000000 }
+ },
+
+ [CPRMAN_PLLB] = {
+ /* unknown */
+ .cm = 0x00000000,
+ .a2w_ctrl = 0x00000000,
+ .a2w_frac = 0x00000000,
+ .a2w_ana = { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }
+ }
+};
+
+typedef struct PLLChannelResetInfo {
+ /*
+ * Even though a PLL channel has a CM register, it shares it with its
+ * parent PLL. The parent already takes care of the reset value.
+ */
+ uint32_t a2w_ctrl;
+} PLLChannelResetInfo;
+
+static const PLLChannelResetInfo PLL_CHANNEL_RESET_INFO[] = {
+ [CPRMAN_PLLA_CHANNEL_DSI0] = { .a2w_ctrl = 0x00000100 },
+ [CPRMAN_PLLA_CHANNEL_CORE] = { .a2w_ctrl = 0x00000003 },
+ [CPRMAN_PLLA_CHANNEL_PER] = { .a2w_ctrl = 0x00000000 }, /* unknown */
+ [CPRMAN_PLLA_CHANNEL_CCP2] = { .a2w_ctrl = 0x00000100 },
+
+ [CPRMAN_PLLC_CHANNEL_CORE2] = { .a2w_ctrl = 0x00000100 },
+ [CPRMAN_PLLC_CHANNEL_CORE1] = { .a2w_ctrl = 0x00000100 },
+ [CPRMAN_PLLC_CHANNEL_PER] = { .a2w_ctrl = 0x00000002 },
+ [CPRMAN_PLLC_CHANNEL_CORE0] = { .a2w_ctrl = 0x00000002 },
+
+ [CPRMAN_PLLD_CHANNEL_DSI0] = { .a2w_ctrl = 0x00000100 },
+ [CPRMAN_PLLD_CHANNEL_CORE] = { .a2w_ctrl = 0x00000004 },
+ [CPRMAN_PLLD_CHANNEL_PER] = { .a2w_ctrl = 0x00000004 },
+ [CPRMAN_PLLD_CHANNEL_DSI1] = { .a2w_ctrl = 0x00000100 },
+
+ [CPRMAN_PLLH_CHANNEL_AUX] = { .a2w_ctrl = 0x00000004 },
+ [CPRMAN_PLLH_CHANNEL_RCAL] = { .a2w_ctrl = 0x00000000 },
+ [CPRMAN_PLLH_CHANNEL_PIX] = { .a2w_ctrl = 0x00000000 },
+
+ [CPRMAN_PLLB_CHANNEL_ARM] = { .a2w_ctrl = 0x00000000 }, /* unknown */
+};
+
+typedef struct ClockMuxResetInfo {
+ uint32_t cm_ctl;
+ uint32_t cm_div;
+} ClockMuxResetInfo;
+
+static const ClockMuxResetInfo CLOCK_MUX_RESET_INFO[] = {
+ [CPRMAN_CLOCK_GNRIC] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_VPU] = {
+ .cm_ctl = 0x00000245,
+ .cm_div = 0x00003000,
+ },
+
+ [CPRMAN_CLOCK_SYS] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_PERIA] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_PERII] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_H264] = {
+ .cm_ctl = 0x00000244,
+ .cm_div = 0x00003000,
+ },
+
+ [CPRMAN_CLOCK_ISP] = {
+ .cm_ctl = 0x00000244,
+ .cm_div = 0x00003000,
+ },
+
+ [CPRMAN_CLOCK_V3D] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_CAM0] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_CAM1] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_CCP2] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_DSI0E] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_DSI0P] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_DPI] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_GP0] = {
+ .cm_ctl = 0x00000200,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_GP1] = {
+ .cm_ctl = 0x00000096,
+ .cm_div = 0x00014000,
+ },
+
+ [CPRMAN_CLOCK_GP2] = {
+ .cm_ctl = 0x00000291,
+ .cm_div = 0x00249f00,
+ },
+
+ [CPRMAN_CLOCK_HSM] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_OTP] = {
+ .cm_ctl = 0x00000091,
+ .cm_div = 0x00004000,
+ },
+
+ [CPRMAN_CLOCK_PCM] = {
+ .cm_ctl = 0x00000200,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_PWM] = {
+ .cm_ctl = 0x00000200,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_SLIM] = {
+ .cm_ctl = 0x00000200,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_SMI] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_TEC] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_TD0] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_TD1] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_TSENS] = {
+ .cm_ctl = 0x00000091,
+ .cm_div = 0x0000a000,
+ },
+
+ [CPRMAN_CLOCK_TIMER] = {
+ .cm_ctl = 0x00000291,
+ .cm_div = 0x00013333,
+ },
+
+ [CPRMAN_CLOCK_UART] = {
+ .cm_ctl = 0x00000296,
+ .cm_div = 0x0000a6ab,
+ },
+
+ [CPRMAN_CLOCK_VEC] = {
+ .cm_ctl = 0x00000097,
+ .cm_div = 0x00002000,
+ },
+
+ [CPRMAN_CLOCK_PULSE] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_SDC] = {
+ .cm_ctl = 0x00004006,
+ .cm_div = 0x00003000,
+ },
+
+ [CPRMAN_CLOCK_ARM] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_AVEO] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_EMMC] = {
+ .cm_ctl = 0x00000295,
+ .cm_div = 0x00006000,
+ },
+
+ [CPRMAN_CLOCK_EMMC2] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+};
+
+#endif
diff --git a/include/hw/misc/bcm2835_mbox.h b/include/hw/misc/bcm2835_mbox.h
index 7e8f3ce86d..ade27af25d 100644
--- a/include/hw/misc/bcm2835_mbox.h
+++ b/include/hw/misc/bcm2835_mbox.h
@@ -1,6 +1,8 @@
/*
* Raspberry Pi emulation (c) 2012 Gregory Estrade
- * This code is licensed under the GNU GPLv2 and later.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2835_MBOX_H
@@ -8,10 +10,10 @@
#include "bcm2835_mbox_defs.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_BCM2835_MBOX "bcm2835-mbox"
-#define BCM2835_MBOX(obj) \
- OBJECT_CHECK(BCM2835MboxState, (obj), TYPE_BCM2835_MBOX)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835MboxState, BCM2835_MBOX)
typedef struct {
uint32_t reg[MBOX_SIZE];
@@ -20,7 +22,7 @@ typedef struct {
uint32_t config;
} BCM2835Mbox;
-typedef struct {
+struct BCM2835MboxState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -32,6 +34,6 @@ typedef struct {
bool mbox_irq_disabled;
bool available[MBOX_CHAN_COUNT];
BCM2835Mbox mbox[2];
-} BCM2835MboxState;
+};
#endif
diff --git a/include/hw/misc/bcm2835_mbox_defs.h b/include/hw/misc/bcm2835_mbox_defs.h
index a18e520b22..9670bf33a0 100644
--- a/include/hw/misc/bcm2835_mbox_defs.h
+++ b/include/hw/misc/bcm2835_mbox_defs.h
@@ -1,6 +1,8 @@
/*
* Raspberry Pi emulation (c) 2012 Gregory Estrade
- * This code is licensed under the GNU GPLv2 and later.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2835_MBOX_DEFS_H
diff --git a/include/hw/misc/bcm2835_mphi.h b/include/hw/misc/bcm2835_mphi.h
new file mode 100644
index 0000000000..751363f496
--- /dev/null
+++ b/include/hw/misc/bcm2835_mphi.h
@@ -0,0 +1,44 @@
+/*
+ * BCM2835 SOC MPHI state definitions
+ *
+ * Copyright (c) 2020 Paul Zimmerman <pauldzim@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef HW_MISC_BCM2835_MPHI_H
+#define HW_MISC_BCM2835_MPHI_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define MPHI_MMIO_SIZE 0x1000
+
+typedef struct BCM2835MphiState BCM2835MphiState;
+
+struct BCM2835MphiState {
+ SysBusDevice parent_obj;
+ qemu_irq irq;
+ MemoryRegion iomem;
+
+ uint32_t outdda;
+ uint32_t outddb;
+ uint32_t ctrl;
+ uint32_t intstat;
+ uint32_t swirq;
+};
+
+#define TYPE_BCM2835_MPHI "bcm2835-mphi"
+
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835MphiState, BCM2835_MPHI)
+
+#endif
diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h
new file mode 100644
index 0000000000..303b9a6f68
--- /dev/null
+++ b/include/hw/misc/bcm2835_powermgt.h
@@ -0,0 +1,29 @@
+/*
+ * BCM2835 Power Management emulation
+ *
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef BCM2835_POWERMGT_H
+#define BCM2835_POWERMGT_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT)
+
+struct BCM2835PowerMgtState {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+
+ uint32_t rstc;
+ uint32_t rsts;
+ uint32_t wdog;
+};
+
+#endif
diff --git a/include/hw/misc/bcm2835_property.h b/include/hw/misc/bcm2835_property.h
index 11be0dbeac..ba8896610c 100644
--- a/include/hw/misc/bcm2835_property.h
+++ b/include/hw/misc/bcm2835_property.h
@@ -1,6 +1,8 @@
/*
* Raspberry Pi emulation (c) 2012 Gregory Estrade
- * This code is licensed under the GNU GPLv2 and later.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#ifndef BCM2835_PROPERTY_H
@@ -9,12 +11,12 @@
#include "hw/sysbus.h"
#include "net/net.h"
#include "hw/display/bcm2835_fb.h"
+#include "qom/object.h"
#define TYPE_BCM2835_PROPERTY "bcm2835-property"
-#define BCM2835_PROPERTY(obj) \
- OBJECT_CHECK(BCM2835PropertyState, (obj), TYPE_BCM2835_PROPERTY)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PropertyState, BCM2835_PROPERTY)
-typedef struct {
+struct BCM2835PropertyState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -28,7 +30,8 @@ typedef struct {
MACAddr macaddr;
uint32_t board_rev;
uint32_t addr;
+ char *command_line;
bool pending;
-} BCM2835PropertyState;
+};
#endif
diff --git a/include/hw/misc/bcm2835_rng.h b/include/hw/misc/bcm2835_rng.h
index 41a531bce7..7c1fb3ef40 100644
--- a/include/hw/misc/bcm2835_rng.h
+++ b/include/hw/misc/bcm2835_rng.h
@@ -11,17 +11,17 @@
#define BCM2835_RNG_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_BCM2835_RNG "bcm2835-rng"
-#define BCM2835_RNG(obj) \
- OBJECT_CHECK(BCM2835RngState, (obj), TYPE_BCM2835_RNG)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835RngState, BCM2835_RNG)
-typedef struct {
+struct BCM2835RngState {
SysBusDevice busdev;
MemoryRegion iomem;
uint32_t rng_ctrl;
uint32_t rng_status;
-} BCM2835RngState;
+};
#endif
diff --git a/include/hw/misc/bcm2835_thermal.h b/include/hw/misc/bcm2835_thermal.h
new file mode 100644
index 0000000000..f90f9e487c
--- /dev/null
+++ b/include/hw/misc/bcm2835_thermal.h
@@ -0,0 +1,27 @@
+/*
+ * BCM2835 dummy thermal sensor
+ *
+ * Copyright (C) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_BCM2835_THERMAL_H
+#define HW_MISC_BCM2835_THERMAL_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_BCM2835_THERMAL "bcm2835-thermal"
+
+OBJECT_DECLARE_SIMPLE_TYPE(Bcm2835ThermalState, BCM2835_THERMAL)
+
+struct Bcm2835ThermalState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+ MemoryRegion iomem;
+ uint32_t ctl;
+};
+
+#endif
diff --git a/include/hw/misc/cbus.h b/include/hw/misc/cbus.h
new file mode 100644
index 0000000000..5334984020
--- /dev/null
+++ b/include/hw/misc/cbus.h
@@ -0,0 +1,31 @@
+/*
+ * CBUS three-pin bus and the Retu / Betty / Tahvo / Vilma / Avilma /
+ * Hinku / Vinku / Ahne / Pihi chips used in various Nokia platforms.
+ * Based on reverse-engineering of a linux driver.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_MISC_CBUS_H
+#define HW_MISC_CBUS_H
+
+
+typedef struct {
+ qemu_irq clk;
+ qemu_irq dat;
+ qemu_irq sel;
+} CBus;
+
+CBus *cbus_init(qemu_irq dat_out);
+void cbus_attach(CBus *bus, void *slave_opaque);
+
+void *retu_init(qemu_irq irq, int vilma);
+void *tahvo_init(qemu_irq irq, int betty);
+
+void retu_key_event(void *retu, int state);
+
+#endif
diff --git a/include/hw/misc/djmemc.h b/include/hw/misc/djmemc.h
new file mode 100644
index 0000000000..82d4e4a2fe
--- /dev/null
+++ b/include/hw/misc/djmemc.h
@@ -0,0 +1,30 @@
+/*
+ * djMEMC, macintosh memory and interrupt controller
+ * (Quadra 610/650/800 & Centris 610/650)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_DJMEMC_H
+#define HW_MISC_DJMEMC_H
+
+#include "hw/sysbus.h"
+
+#define DJMEMC_SIZE 0x2000
+#define DJMEMC_NUM_REGS (0x38 / sizeof(uint32_t))
+
+#define DJMEMC_MAXBANKS 10
+
+struct DJMEMCState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mem_regs;
+
+ /* Memory controller */
+ uint32_t regs[DJMEMC_NUM_REGS];
+};
+
+#define TYPE_DJMEMC "djMEMC"
+OBJECT_DECLARE_SIMPLE_TYPE(DJMEMCState, DJMEMC);
+
+#endif
diff --git a/include/hw/misc/empty_slot.h b/include/hw/misc/empty_slot.h
new file mode 100644
index 0000000000..dec56e56ae
--- /dev/null
+++ b/include/hw/misc/empty_slot.h
@@ -0,0 +1,19 @@
+/*
+ * QEMU Empty Slot
+ *
+ * The empty_slot device emulates known to a bus but not connected devices.
+ *
+ * Copyright (c) 2010 Artyom Tarasenko
+ *
+ * This code is licensed under the GNU GPL v2 or (at your option) any later
+ * version.
+ */
+
+#ifndef HW_EMPTY_SLOT_H
+#define HW_EMPTY_SLOT_H
+
+#include "exec/hwaddr.h"
+
+void empty_slot_init(const char *name, hwaddr addr, uint64_t slot_size);
+
+#endif
diff --git a/include/hw/misc/grlib_ahb_apb_pnp.h b/include/hw/misc/grlib_ahb_apb_pnp.h
new file mode 100644
index 0000000000..bab0b5f47f
--- /dev/null
+++ b/include/hw/misc/grlib_ahb_apb_pnp.h
@@ -0,0 +1,57 @@
+/*
+ * GRLIB AHB APB PNP
+ *
+ * Copyright (C) 2019 AdaCore
+ *
+ * Developed by :
+ * Frederic Konrad <frederic.konrad@adacore.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef GRLIB_AHB_APB_PNP_H
+#define GRLIB_AHB_APB_PNP_H
+#include "qom/object.h"
+
+#define TYPE_GRLIB_AHB_PNP "grlib-ahbpnp"
+OBJECT_DECLARE_SIMPLE_TYPE(AHBPnp, GRLIB_AHB_PNP)
+
+#define TYPE_GRLIB_APB_PNP "grlib-apbpnp"
+OBJECT_DECLARE_SIMPLE_TYPE(APBPnp, GRLIB_APB_PNP)
+
+void grlib_ahb_pnp_add_entry(AHBPnp *dev, uint32_t address, uint32_t mask,
+ uint8_t vendor, uint16_t device, int slave,
+ int type);
+void grlib_apb_pnp_add_entry(APBPnp *dev, uint32_t address, uint32_t mask,
+ uint8_t vendor, uint16_t device, uint8_t version,
+ uint8_t irq, int type);
+
+/* VENDORS */
+#define GRLIB_VENDOR_GAISLER (0x01)
+/* DEVICES */
+#define GRLIB_LEON3_DEV (0x03)
+#define GRLIB_APBMST_DEV (0x06)
+#define GRLIB_APBUART_DEV (0x0C)
+#define GRLIB_IRQMP_DEV (0x0D)
+#define GRLIB_GPTIMER_DEV (0x11)
+/* TYPE */
+#define GRLIB_CPU_AREA (0x00)
+#define GRLIB_APBIO_AREA (0x01)
+#define GRLIB_AHBMEM_AREA (0x02)
+
+#define GRLIB_AHB_MASTER (0x00)
+#define GRLIB_AHB_SLAVE (0x01)
+
+#endif /* GRLIB_AHB_APB_PNP_H */
diff --git a/include/hw/misc/imx25_ccm.h b/include/hw/misc/imx25_ccm.h
index 296321c612..c3b89018c6 100644
--- a/include/hw/misc/imx25_ccm.h
+++ b/include/hw/misc/imx25_ccm.h
@@ -12,6 +12,7 @@
#define IMX25_CCM_H
#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
#define IMX25_CCM_MPCTL_REG 0
#define IMX25_CCM_UPCTL_REG 1
@@ -63,9 +64,9 @@
CCTL_##name##_SHIFT)
#define TYPE_IMX25_CCM "imx25.ccm"
-#define IMX25_CCM(obj) OBJECT_CHECK(IMX25CCMState, (obj), TYPE_IMX25_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX25CCMState, IMX25_CCM)
-typedef struct IMX25CCMState {
+struct IMX25CCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -74,6 +75,6 @@ typedef struct IMX25CCMState {
uint32_t reg[IMX25_CCM_MAX_REG];
-} IMX25CCMState;
+};
#endif /* IMX25_CCM_H */
diff --git a/include/hw/misc/imx2_wdt.h b/include/hw/misc/imx2_wdt.h
deleted file mode 100644
index 8afc99a10e..0000000000
--- a/include/hw/misc/imx2_wdt.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2017, Impinj, Inc.
- *
- * i.MX2 Watchdog IP block
- *
- * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef IMX2_WDT_H
-#define IMX2_WDT_H
-
-#include "hw/sysbus.h"
-
-#define TYPE_IMX2_WDT "imx2.wdt"
-#define IMX2_WDT(obj) OBJECT_CHECK(IMX2WdtState, (obj), TYPE_IMX2_WDT)
-
-enum IMX2WdtRegisters {
- IMX2_WDT_WCR = 0x0000,
- IMX2_WDT_REG_NUM = 0x0008 / sizeof(uint16_t) + 1,
-};
-
-
-typedef struct IMX2WdtState {
- /* <private> */
- SysBusDevice parent_obj;
-
- MemoryRegion mmio;
-} IMX2WdtState;
-
-#endif /* IMX7_SNVS_H */
diff --git a/include/hw/misc/imx31_ccm.h b/include/hw/misc/imx31_ccm.h
index c376fad14c..18e08ee84f 100644
--- a/include/hw/misc/imx31_ccm.h
+++ b/include/hw/misc/imx31_ccm.h
@@ -12,6 +12,7 @@
#define IMX31_CCM_H
#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
#define IMX31_CCM_CCMR_REG 0
#define IMX31_CCM_PDR0_REG 1
@@ -72,9 +73,9 @@
PDR0_##name##_PODF_SHIFT)
#define TYPE_IMX31_CCM "imx31.ccm"
-#define IMX31_CCM(obj) OBJECT_CHECK(IMX31CCMState, (obj), TYPE_IMX31_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX31CCMState, IMX31_CCM)
-typedef struct IMX31CCMState {
+struct IMX31CCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -83,6 +84,6 @@ typedef struct IMX31CCMState {
uint32_t reg[IMX31_CCM_MAX_REG];
-} IMX31CCMState;
+};
#endif /* IMX31_CCM_H */
diff --git a/include/hw/misc/imx6_ccm.h b/include/hw/misc/imx6_ccm.h
index 80505809b4..ccf46d7353 100644
--- a/include/hw/misc/imx6_ccm.h
+++ b/include/hw/misc/imx6_ccm.h
@@ -13,6 +13,7 @@
#include "hw/misc/imx_ccm.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
#define CCM_CCR 0
#define CCM_CCDR 1
@@ -178,9 +179,9 @@
#define EXTRACT(value, name) extract32(value, name##_SHIFT, name##_LENGTH)
#define TYPE_IMX6_CCM "imx6.ccm"
-#define IMX6_CCM(obj) OBJECT_CHECK(IMX6CCMState, (obj), TYPE_IMX6_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX6CCMState, IMX6_CCM)
-typedef struct IMX6CCMState {
+struct IMX6CCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -192,6 +193,6 @@ typedef struct IMX6CCMState {
uint32_t ccm[CCM_MAX];
uint32_t analog[CCM_ANALOG_MAX];
-} IMX6CCMState;
+};
#endif /* IMX6_CCM_H */
diff --git a/include/hw/misc/imx6_src.h b/include/hw/misc/imx6_src.h
index eb3640732e..f380da3810 100644
--- a/include/hw/misc/imx6_src.h
+++ b/include/hw/misc/imx6_src.h
@@ -13,6 +13,7 @@
#include "hw/sysbus.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
#define SRC_SCR 0
#define SRC_SBMR1 1
@@ -57,9 +58,9 @@
#define EXTRACT(value, name) extract32(value, name##_SHIFT, name##_LENGTH)
#define TYPE_IMX6_SRC "imx6.src"
-#define IMX6_SRC(obj) OBJECT_CHECK(IMX6SRCState, (obj), TYPE_IMX6_SRC)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX6SRCState, IMX6_SRC)
-typedef struct IMX6SRCState {
+struct IMX6SRCState {
/* <private> */
SysBusDevice parent_obj;
@@ -68,6 +69,6 @@ typedef struct IMX6SRCState {
uint32_t regs[SRC_MAX];
-} IMX6SRCState;
+};
#endif /* IMX6_SRC_H */
diff --git a/include/hw/misc/imx6ul_ccm.h b/include/hw/misc/imx6ul_ccm.h
index 377ddca244..edb5f784d5 100644
--- a/include/hw/misc/imx6ul_ccm.h
+++ b/include/hw/misc/imx6ul_ccm.h
@@ -12,6 +12,7 @@
#include "hw/misc/imx_ccm.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
#define CCM_CCR 0
#define CCM_CCDR 1
@@ -207,9 +208,9 @@
#define CCM_ANALOG_PLL_LOCK (1 << 31);
#define TYPE_IMX6UL_CCM "imx6ul.ccm"
-#define IMX6UL_CCM(obj) OBJECT_CHECK(IMX6ULCCMState, (obj), TYPE_IMX6UL_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX6ULCCMState, IMX6UL_CCM)
-typedef struct IMX6ULCCMState {
+struct IMX6ULCCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -221,6 +222,6 @@ typedef struct IMX6ULCCMState {
uint32_t ccm[CCM_MAX];
uint32_t analog[CCM_ANALOG_MAX];
-} IMX6ULCCMState;
+};
#endif /* IMX6UL_CCM_H */
diff --git a/include/hw/misc/imx7_ccm.h b/include/hw/misc/imx7_ccm.h
index 9538f37d98..dcaebfb4ef 100644
--- a/include/hw/misc/imx7_ccm.h
+++ b/include/hw/misc/imx7_ccm.h
@@ -14,6 +14,7 @@
#include "hw/misc/imx_ccm.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
enum IMX7AnalogRegisters {
ANALOG_PLL_ARM,
@@ -104,9 +105,9 @@ enum IMX7PMURegisters {
};
#define TYPE_IMX7_CCM "imx7.ccm"
-#define IMX7_CCM(obj) OBJECT_CHECK(IMX7CCMState, (obj), TYPE_IMX7_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7CCMState, IMX7_CCM)
-typedef struct IMX7CCMState {
+struct IMX7CCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -114,13 +115,13 @@ typedef struct IMX7CCMState {
MemoryRegion iomem;
uint32_t ccm[CCM_MAX];
-} IMX7CCMState;
+};
#define TYPE_IMX7_ANALOG "imx7.analog"
-#define IMX7_ANALOG(obj) OBJECT_CHECK(IMX7AnalogState, (obj), TYPE_IMX7_ANALOG)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7AnalogState, IMX7_ANALOG)
-typedef struct IMX7AnalogState {
+struct IMX7AnalogState {
/* <private> */
IMXCCMState parent_obj;
@@ -134,6 +135,6 @@ typedef struct IMX7AnalogState {
uint32_t analog[ANALOG_MAX];
uint32_t pmu[PMU_MAX];
-} IMX7AnalogState;
+};
#endif /* IMX7_CCM_H */
diff --git a/include/hw/misc/imx7_gpr.h b/include/hw/misc/imx7_gpr.h
index e19373d274..df364bd8f0 100644
--- a/include/hw/misc/imx7_gpr.h
+++ b/include/hw/misc/imx7_gpr.h
@@ -14,15 +14,16 @@
#include "qemu/bitops.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX7_GPR "imx7.gpr"
-#define IMX7_GPR(obj) OBJECT_CHECK(IMX7GPRState, (obj), TYPE_IMX7_GPR)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7GPRState, IMX7_GPR)
-typedef struct IMX7GPRState {
+struct IMX7GPRState {
/* <private> */
SysBusDevice parent_obj;
MemoryRegion mmio;
-} IMX7GPRState;
+};
#endif /* IMX7_GPR_H */
diff --git a/include/hw/misc/imx7_snvs.h b/include/hw/misc/imx7_snvs.h
index 255f8f26f9..1272076086 100644
--- a/include/hw/misc/imx7_snvs.h
+++ b/include/hw/misc/imx7_snvs.h
@@ -14,22 +14,28 @@
#include "qemu/bitops.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
enum IMX7SNVSRegisters {
SNVS_LPCR = 0x38,
SNVS_LPCR_TOP = BIT(6),
- SNVS_LPCR_DP_EN = BIT(5)
+ SNVS_LPCR_DP_EN = BIT(5),
+ SNVS_LPSRTCMR = 0x050, /* Secure Real Time Counter MSB Register */
+ SNVS_LPSRTCLR = 0x054, /* Secure Real Time Counter LSB Register */
};
#define TYPE_IMX7_SNVS "imx7.snvs"
-#define IMX7_SNVS(obj) OBJECT_CHECK(IMX7SNVSState, (obj), TYPE_IMX7_SNVS)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7SNVSState, IMX7_SNVS)
-typedef struct IMX7SNVSState {
+struct IMX7SNVSState {
/* <private> */
SysBusDevice parent_obj;
MemoryRegion mmio;
-} IMX7SNVSState;
+
+ uint64_t tick_offset;
+ uint64_t lpcr;
+};
#endif /* IMX7_SNVS_H */
diff --git a/include/hw/misc/imx7_src.h b/include/hw/misc/imx7_src.h
new file mode 100644
index 0000000000..b4b97dcb1c
--- /dev/null
+++ b/include/hw/misc/imx7_src.h
@@ -0,0 +1,66 @@
+/*
+ * IMX7 System Reset Controller
+ *
+ * Copyright (C) 2023 Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef IMX7_SRC_H
+#define IMX7_SRC_H
+
+#include "hw/sysbus.h"
+#include "qemu/bitops.h"
+#include "qom/object.h"
+
+#define SRC_SCR 0
+#define SRC_A7RCR0 1
+#define SRC_A7RCR1 2
+#define SRC_M4RCR 3
+#define SRC_ERCR 5
+#define SRC_HSICPHY_RCR 7
+#define SRC_USBOPHY1_RCR 8
+#define SRC_USBOPHY2_RCR 9
+#define SRC_MPIPHY_RCR 10
+#define SRC_PCIEPHY_RCR 11
+#define SRC_SBMR1 22
+#define SRC_SRSR 23
+#define SRC_SISR 26
+#define SRC_SIMR 27
+#define SRC_SBMR2 28
+#define SRC_GPR1 29
+#define SRC_GPR2 30
+#define SRC_GPR3 31
+#define SRC_GPR4 32
+#define SRC_GPR5 33
+#define SRC_GPR6 34
+#define SRC_GPR7 35
+#define SRC_GPR8 36
+#define SRC_GPR9 37
+#define SRC_GPR10 38
+#define SRC_MAX 39
+
+/* SRC_A7SCR1 */
+#define R_CORE1_ENABLE_SHIFT 1
+#define R_CORE1_ENABLE_LENGTH 1
+/* SRC_A7SCR0 */
+#define R_CORE1_RST_SHIFT 5
+#define R_CORE1_RST_LENGTH 1
+#define R_CORE0_RST_SHIFT 4
+#define R_CORE0_RST_LENGTH 1
+
+#define TYPE_IMX7_SRC "imx7.src"
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7SRCState, IMX7_SRC)
+
+struct IMX7SRCState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+
+ uint32_t regs[SRC_MAX];
+};
+
+#endif /* IMX7_SRC_H */
diff --git a/include/hw/misc/imx_ccm.h b/include/hw/misc/imx_ccm.h
index 33cbc09952..7e5678e972 100644
--- a/include/hw/misc/imx_ccm.h
+++ b/include/hw/misc/imx_ccm.h
@@ -12,6 +12,7 @@
#define IMX_CCM_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define CKIL_FREQ 32768 /* nominal 32khz clock */
@@ -27,20 +28,15 @@
#define PLL_MFN(x) (((x) & 0x3ff) << 0)
#define TYPE_IMX_CCM "imx.ccm"
-#define IMX_CCM(obj) \
- OBJECT_CHECK(IMXCCMState, (obj), TYPE_IMX_CCM)
-#define IMX_CCM_CLASS(klass) \
- OBJECT_CLASS_CHECK(IMXCCMClass, (klass), TYPE_IMX_CCM)
-#define IMX_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IMXCCMClass, (obj), TYPE_IMX_CCM)
+OBJECT_DECLARE_TYPE(IMXCCMState, IMXCCMClass, IMX_CCM)
-typedef struct IMXCCMState {
+struct IMXCCMState {
/* <private> */
SysBusDevice parent_obj;
/* <public> */
-} IMXCCMState;
+};
typedef enum {
CLK_NONE,
@@ -52,13 +48,13 @@ typedef enum {
CLK_HIGH,
} IMXClk;
-typedef struct IMXCCMClass {
+struct IMXCCMClass {
/* <private> */
SysBusDeviceClass parent_class;
/* <public> */
uint32_t (*get_clock_frequency)(IMXCCMState *s, IMXClk clk);
-} IMXCCMClass;
+};
uint32_t imx_ccm_calc_pll(uint32_t pllreg, uint32_t base_freq);
diff --git a/include/hw/misc/imx_rngc.h b/include/hw/misc/imx_rngc.h
new file mode 100644
index 0000000000..34ad699225
--- /dev/null
+++ b/include/hw/misc/imx_rngc.h
@@ -0,0 +1,36 @@
+/*
+ * Freescale i.MX RNGC emulation
+ *
+ * Copyright (C) 2020 Martin Kaiser <martin@kaiser.cx>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef IMX_RNGC_H
+#define IMX_RNGC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_IMX_RNGC "imx.rngc"
+OBJECT_DECLARE_SIMPLE_TYPE(IMXRNGCState, IMX_RNGC)
+
+struct IMXRNGCState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+
+ uint8_t op_self_test;
+ uint8_t op_seed;
+ uint8_t mask;
+ bool auto_seed;
+
+ QEMUBH *self_test_bh;
+ QEMUBH *seed_bh;
+ qemu_irq irq;
+};
+
+#endif /* IMX_RNGC_H */
diff --git a/include/hw/misc/iosb.h b/include/hw/misc/iosb.h
new file mode 100644
index 0000000000..377f8ca7e2
--- /dev/null
+++ b/include/hw/misc/iosb.h
@@ -0,0 +1,25 @@
+/*
+ * QEMU IOSB emulation
+ *
+ * Copyright (c) 2019 Laurent Vivier
+ * Copyright (c) 2022 Mark Cave-Ayland
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MEM_IOSB_H
+#define HW_MEM_IOSB_H
+
+#define IOSB_REGS 7
+
+struct IOSBState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mem_regs;
+ uint32_t regs[IOSB_REGS];
+};
+
+#define TYPE_IOSB "IOSB"
+OBJECT_DECLARE_SIMPLE_TYPE(IOSBState, IOSB);
+
+#endif
diff --git a/include/hw/misc/iotkit-secctl.h b/include/hw/misc/iotkit-secctl.h
index 1a193b306f..79a3628320 100644
--- a/include/hw/misc/iotkit-secctl.h
+++ b/include/hw/misc/iotkit-secctl.h
@@ -11,7 +11,7 @@
/* This is a model of the security controller which is part of the
* Arm IoT Kit and documented in
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ecm0601256/index.html
+ * https://developer.arm.com/documentation/ecm0601256/latest
*
* QEMU interface:
* + sysbus MMIO region 0 is the "secure privilege control block" registers
@@ -40,8 +40,8 @@
* + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_enable
* + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_clear
* + named GPIO inputs ahb_ppcexp{0,1,2,3}_irq_status
- * Controlling the MPC in the IoTKit:
- * + named GPIO input mpc_status
+ * Controlling the (up to) 4 MPCs in the IoTKit/SSE:
+ * + named GPIO inputs mpc_status[0..3]
* Controlling each of the 16 expansion MPCs which a system using the IoTKit
* might provide:
* + named GPIO inputs mpcexp_status[0..15]
@@ -56,9 +56,10 @@
#define IOTKIT_SECCTL_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IOTKIT_SECCTL "iotkit-secctl"
-#define IOTKIT_SECCTL(obj) OBJECT_CHECK(IoTKitSecCtl, (obj), TYPE_IOTKIT_SECCTL)
+OBJECT_DECLARE_SIMPLE_TYPE(IoTKitSecCtl, IOTKIT_SECCTL)
#define IOTS_APB_PPC0_NUM_PORTS 3
#define IOTS_APB_PPC1_NUM_PORTS 1
@@ -67,10 +68,9 @@
#define IOTS_NUM_APB_EXP_PPC 4
#define IOTS_NUM_AHB_EXP_PPC 4
#define IOTS_NUM_EXP_MPC 16
-#define IOTS_NUM_MPC 1
+#define IOTS_NUM_MPC 4
#define IOTS_NUM_EXP_MSC 16
-typedef struct IoTKitSecCtl IoTKitSecCtl;
/* State and IRQ lines relating to a PPC. For the
* PPCs in the IoTKit not all the IRQ lines are used.
@@ -120,6 +120,8 @@ struct IoTKitSecCtl {
IoTKitSecCtlPPC apb[IOTS_NUM_APB_PPC];
IoTKitSecCtlPPC apbexp[IOTS_NUM_APB_EXP_PPC];
IoTKitSecCtlPPC ahbexp[IOTS_NUM_APB_EXP_PPC];
+
+ uint32_t sse_version;
};
#endif
diff --git a/include/hw/misc/iotkit-sysctl.h b/include/hw/misc/iotkit-sysctl.h
index e36613cb5e..481e27f4db 100644
--- a/include/hw/misc/iotkit-sysctl.h
+++ b/include/hw/misc/iotkit-sysctl.h
@@ -12,11 +12,13 @@
/*
* This is a model of the "system control element" which is part of the
* Arm IoTKit and documented in
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ecm0601256/index.html
+ * https://developer.arm.com/documentation/ecm0601256/latest
* Specifically, it implements the "system information block" and
* "system control register" blocks.
*
* QEMU interface:
+ * + QOM property "sse-version": indicates which SSE version this is part of
+ * (used to identify whether to provide SSE-200-only registers, etc)
* + sysbus MMIO region 0: the system information register bank
* + sysbus MMIO region 1: the system control register bank
*/
@@ -25,12 +27,12 @@
#define HW_MISC_IOTKIT_SYSCTL_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IOTKIT_SYSCTL "iotkit-sysctl"
-#define IOTKIT_SYSCTL(obj) OBJECT_CHECK(IoTKitSysCtl, (obj), \
- TYPE_IOTKIT_SYSCTL)
+OBJECT_DECLARE_SIMPLE_TYPE(IoTKitSysCtl, IOTKIT_SYSCTL)
-typedef struct IoTKitSysCtl {
+struct IoTKitSysCtl {
/*< private >*/
SysBusDevice parent_obj;
@@ -41,9 +43,31 @@ typedef struct IoTKitSysCtl {
uint32_t reset_syndrome;
uint32_t reset_mask;
uint32_t gretreg;
- uint32_t initsvrtor0;
+ uint32_t initsvtor0;
uint32_t cpuwait;
uint32_t wicctrl;
-} IoTKitSysCtl;
+ uint32_t scsecctrl;
+ uint32_t fclk_div;
+ uint32_t sysclk_div;
+ uint32_t clock_force;
+ uint32_t initsvtor1;
+ uint32_t nmi_enable;
+ uint32_t ewctrl;
+ uint32_t pwrctrl;
+ uint32_t pdcm_pd_sys_sense;
+ uint32_t pdcm_pd_sram0_sense;
+ uint32_t pdcm_pd_sram1_sense;
+ uint32_t pdcm_pd_sram2_sense;
+ uint32_t pdcm_pd_sram3_sense;
+ uint32_t pdcm_pd_cpu0_sense;
+ uint32_t pdcm_pd_vmr0_sense;
+ uint32_t pdcm_pd_vmr1_sense;
+
+ /* Properties */
+ uint32_t sse_version;
+ uint32_t cpuwait_rst;
+ uint32_t initsvtor0_rst;
+ uint32_t initsvtor1_rst;
+};
#endif
diff --git a/include/hw/misc/iotkit-sysinfo.h b/include/hw/misc/iotkit-sysinfo.h
index 7b2e1a5e48..91c23f90d2 100644
--- a/include/hw/misc/iotkit-sysinfo.h
+++ b/include/hw/misc/iotkit-sysinfo.h
@@ -12,8 +12,10 @@
/*
* This is a model of the "system information block" which is part of the
* Arm IoTKit and documented in
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ecm0601256/index.html
+ * https://developer.arm.com/documentation/ecm0601256/latest
* QEMU interface:
+ * + QOM property "SYS_VERSION": value to use for SYS_VERSION register
+ * + QOM property "SYS_CONFIG": value to use for SYS_CONFIG register
* + sysbus MMIO region 0: the system information register bank
*/
@@ -21,17 +23,23 @@
#define HW_MISC_IOTKIT_SYSINFO_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IOTKIT_SYSINFO "iotkit-sysinfo"
-#define IOTKIT_SYSINFO(obj) OBJECT_CHECK(IoTKitSysInfo, (obj), \
- TYPE_IOTKIT_SYSINFO)
+OBJECT_DECLARE_SIMPLE_TYPE(IoTKitSysInfo, IOTKIT_SYSINFO)
-typedef struct IoTKitSysInfo {
+struct IoTKitSysInfo {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
-} IoTKitSysInfo;
+
+ /* Properties */
+ uint32_t sys_version;
+ uint32_t sys_config;
+ uint32_t sse_version;
+ uint32_t iidr;
+};
#endif
diff --git a/include/hw/misc/lasi.h b/include/hw/misc/lasi.h
new file mode 100644
index 0000000000..f01c0f680a
--- /dev/null
+++ b/include/hw/misc/lasi.h
@@ -0,0 +1,79 @@
+/*
+ * HP-PARISC Lasi chipset emulation.
+ *
+ * (C) 2019 by Helge Deller <deller@gmx.de>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ * Documentation available at:
+ * https://parisc.wiki.kernel.org/images-parisc/7/79/Lasi_ers.pdf
+ */
+
+#ifndef LASI_H
+#define LASI_H
+
+#include "exec/address-spaces.h"
+#include "hw/pci/pci_host.h"
+#include "hw/boards.h"
+
+#define TYPE_LASI_CHIP "lasi-chip"
+OBJECT_DECLARE_SIMPLE_TYPE(LasiState, LASI_CHIP)
+
+#define LASI_IRR 0x00 /* RO */
+#define LASI_IMR 0x04
+#define LASI_IPR 0x08
+#define LASI_ICR 0x0c
+#define LASI_IAR 0x10
+
+#define LASI_LPT 0x02000
+#define LASI_AUDIO 0x04000
+#define LASI_UART 0x05000
+#define LASI_LAN 0x07000
+#define LASI_RTC 0x09000
+#define LASI_FDC 0x0A000
+
+#define LASI_PCR 0x0C000 /* LASI Power Control register */
+#define LASI_ERRLOG 0x0C004 /* LASI Error Logging register */
+#define LASI_VER 0x0C008 /* LASI Version Control register */
+#define LASI_IORESET 0x0C00C /* LASI I/O Reset register */
+#define LASI_AMR 0x0C010 /* LASI Arbitration Mask register */
+#define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */
+#define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */
+
+#define LASI_BIT(x) (1ul << (x))
+#define LASI_IRQ_BITS (LASI_BIT(5) | LASI_BIT(7) | LASI_BIT(8) | LASI_BIT(9) \
+ | LASI_BIT(13) | LASI_BIT(14) | LASI_BIT(16) | LASI_BIT(17) \
+ | LASI_BIT(18) | LASI_BIT(19) | LASI_BIT(20) | LASI_BIT(21) \
+ | LASI_BIT(26))
+
+#define ICR_BUS_ERROR_BIT LASI_BIT(8) /* bit 8 in ICR */
+#define ICR_TOC_BIT LASI_BIT(1) /* bit 1 in ICR */
+
+#define LASI_IRQS 27
+
+#define LASI_IRQ_HPA 14
+#define LASI_IRQ_UART_HPA 5
+#define LASI_IRQ_LPT_HPA 7
+#define LASI_IRQ_LAN_HPA 8
+#define LASI_IRQ_SCSI_HPA 9
+#define LASI_IRQ_AUDIO_HPA 13
+#define LASI_IRQ_PS2KBD_HPA 26
+#define LASI_IRQ_PS2MOU_HPA 26
+
+struct LasiState {
+ PCIHostState parent_obj;
+
+ uint32_t irr;
+ uint32_t imr;
+ uint32_t ipr;
+ uint32_t icr;
+ uint32_t iar;
+
+ uint32_t errlog;
+ uint32_t amr;
+ uint32_t rtc_ref;
+
+ MemoryRegion this_mem;
+};
+
+#endif
diff --git a/include/hw/misc/led.h b/include/hw/misc/led.h
new file mode 100644
index 0000000000..29c0879570
--- /dev/null
+++ b/include/hw/misc/led.h
@@ -0,0 +1,98 @@
+/*
+ * QEMU single LED device
+ *
+ * Copyright (C) 2020 Philippe Mathieu-Daudé <f4bug@amsat.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_MISC_LED_H
+#define HW_MISC_LED_H
+
+#include "qom/object.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_LED "led"
+
+/**
+ * LEDColor: Color of a LED
+ *
+ * This set is restricted to physically available LED colors.
+ *
+ * LED colors from 'Table 1. Product performance of LUXEON Rebel Color
+ * Line' of the 'DS68 LUXEON Rebel Color Line' datasheet available at:
+ * https://www.lumileds.com/products/color-leds/luxeon-rebel-color/
+ */
+typedef enum { /* Coarse wavelength range */
+ LED_COLOR_VIOLET, /* 425 nm */
+ LED_COLOR_BLUE, /* 475 nm */
+ LED_COLOR_CYAN, /* 500 nm */
+ LED_COLOR_GREEN, /* 535 nm */
+ LED_COLOR_YELLOW, /* 567 nm */
+ LED_COLOR_AMBER, /* 590 nm */
+ LED_COLOR_ORANGE, /* 615 nm */
+ LED_COLOR_RED, /* 630 nm */
+} LEDColor;
+
+struct LEDState {
+ /* Private */
+ DeviceState parent_obj;
+ /* Public */
+
+ uint8_t intensity_percent;
+ qemu_irq irq;
+
+ /* Properties */
+ char *description;
+ char *color;
+ /*
+ * Determines whether a GPIO is using a positive (active-high)
+ * logic (when used with GPIO, the intensity at reset is related
+ * to the GPIO polarity).
+ */
+ bool gpio_active_high;
+};
+typedef struct LEDState LEDState;
+DECLARE_INSTANCE_CHECKER(LEDState, LED, TYPE_LED)
+
+/**
+ * led_set_intensity: Set the intensity of a LED device
+ * @s: the LED object
+ * @intensity_percent: intensity as percentage in range 0 to 100.
+ */
+void led_set_intensity(LEDState *s, unsigned intensity_percent);
+
+/**
+ * led_get_intensity:
+ * @s: the LED object
+ *
+ * Returns: The LED intensity as percentage in range 0 to 100.
+ */
+unsigned led_get_intensity(LEDState *s);
+
+/**
+ * led_set_state: Set the state of a LED device
+ * @s: the LED object
+ * @is_emitting: boolean indicating whether the LED is emitting
+ *
+ * This utility is meant for LED connected to GPIO.
+ */
+void led_set_state(LEDState *s, bool is_emitting);
+
+/**
+ * led_create_simple: Create and realize a LED device
+ * @parentobj: the parent object
+ * @gpio_polarity: GPIO polarity
+ * @color: color of the LED
+ * @description: description of the LED (optional)
+ *
+ * Create the device state structure, initialize it, and
+ * drop the reference to it (the device is realized).
+ *
+ * Returns: The newly allocated and instantiated LED object.
+ */
+LEDState *led_create_simple(Object *parentobj,
+ GpioPolarity gpio_polarity,
+ LEDColor color,
+ const char *description);
+
+#endif /* HW_MISC_LED_H */
diff --git a/include/hw/misc/mac_via.h b/include/hw/misc/mac_via.h
new file mode 100644
index 0000000000..63cdcf7c69
--- /dev/null
+++ b/include/hw/misc/mac_via.h
@@ -0,0 +1,115 @@
+/*
+ *
+ * Copyright (c) 2011-2018 Laurent Vivier
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_MISC_MAC_VIA_H
+#define HW_MISC_MAC_VIA_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+#include "hw/misc/mos6522.h"
+#include "hw/input/adb.h"
+#include "qom/object.h"
+
+
+#define VIA_SIZE 0x2000
+
+/* VIA 1 */
+#define VIA1_IRQ_ONE_SECOND_BIT CA2_INT_BIT
+#define VIA1_IRQ_60HZ_BIT CA1_INT_BIT
+#define VIA1_IRQ_ADB_READY_BIT SR_INT_BIT
+#define VIA1_IRQ_ADB_DATA_BIT CB2_INT_BIT
+#define VIA1_IRQ_ADB_CLOCK_BIT CB1_INT_BIT
+
+#define VIA1_IRQ_ONE_SECOND BIT(VIA1_IRQ_ONE_SECOND_BIT)
+#define VIA1_IRQ_60HZ BIT(VIA1_IRQ_60HZ_BIT)
+#define VIA1_IRQ_ADB_READY BIT(VIA1_IRQ_ADB_READY_BIT)
+#define VIA1_IRQ_ADB_DATA BIT(VIA1_IRQ_ADB_DATA_BIT)
+#define VIA1_IRQ_ADB_CLOCK BIT(VIA1_IRQ_ADB_CLOCK_BIT)
+
+
+#define TYPE_MOS6522_Q800_VIA1 "mos6522-q800-via1"
+OBJECT_DECLARE_SIMPLE_TYPE(MOS6522Q800VIA1State, MOS6522_Q800_VIA1)
+
+struct MOS6522Q800VIA1State {
+ /*< private >*/
+ MOS6522State parent_obj;
+
+ MemoryRegion via_mem;
+
+ qemu_irq auxmode_irq;
+ uint8_t last_b;
+
+ /* RTC */
+ uint8_t PRAM[256];
+ BlockBackend *blk;
+ VMChangeStateEntry *vmstate;
+
+ uint32_t tick_offset;
+
+ uint8_t data_out;
+ int data_out_cnt;
+ uint8_t data_in;
+ uint8_t data_in_cnt;
+ uint8_t cmd;
+ int wprotect;
+ int alt;
+
+ /* ADB */
+ ADBBusState adb_bus;
+ qemu_irq adb_data_ready;
+ int adb_data_in_size;
+ int adb_data_in_index;
+ int adb_data_out_index;
+ uint8_t adb_data_in[128];
+ uint8_t adb_data_out[16];
+ uint8_t adb_autopoll_cmd;
+
+ /* external timers */
+ QEMUTimer *one_second_timer;
+ int64_t next_second;
+ QEMUTimer *sixty_hz_timer;
+ int64_t next_sixty_hz;
+
+ /* SETUPTIMEK hack */
+ int timer_hack_state;
+};
+
+
+/* VIA 2 */
+#define VIA2_IRQ_SCSI_DATA_BIT CA2_INT_BIT
+#define VIA2_IRQ_NUBUS_BIT CA1_INT_BIT
+#define VIA2_IRQ_SCSI_BIT CB2_INT_BIT
+#define VIA2_IRQ_ASC_BIT CB1_INT_BIT
+
+#define VIA2_IRQ_SCSI_DATA BIT(VIA2_IRQ_SCSI_DATA_BIT)
+#define VIA2_IRQ_NUBUS BIT(VIA2_IRQ_NUBUS_BIT)
+#define VIA2_IRQ_UNUSED BIT(VIA2_IRQ_SCSI_BIT)
+#define VIA2_IRQ_SCSI BIT(VIA2_IRQ_UNUSED_BIT)
+#define VIA2_IRQ_ASC BIT(VIA2_IRQ_ASC_BIT)
+
+#define VIA2_NUBUS_IRQ_NB 7
+
+#define VIA2_NUBUS_IRQ_9 0
+#define VIA2_NUBUS_IRQ_A 1
+#define VIA2_NUBUS_IRQ_B 2
+#define VIA2_NUBUS_IRQ_C 3
+#define VIA2_NUBUS_IRQ_D 4
+#define VIA2_NUBUS_IRQ_E 5
+#define VIA2_NUBUS_IRQ_INTVIDEO 6
+
+#define TYPE_MOS6522_Q800_VIA2 "mos6522-q800-via2"
+OBJECT_DECLARE_SIMPLE_TYPE(MOS6522Q800VIA2State, MOS6522_Q800_VIA2)
+
+struct MOS6522Q800VIA2State {
+ /*< private >*/
+ MOS6522State parent_obj;
+
+ MemoryRegion via_mem;
+};
+
+#endif
diff --git a/include/hw/misc/macio/cuda.h b/include/hw/misc/macio/cuda.h
index 7dad469142..8a6678c749 100644
--- a/include/hw/misc/macio/cuda.h
+++ b/include/hw/misc/macio/cuda.h
@@ -26,6 +26,10 @@
#ifndef CUDA_H
#define CUDA_H
+#include "hw/input/adb.h"
+#include "hw/misc/mos6522.h"
+#include "qom/object.h"
+
/* CUDA commands (2nd byte) */
#define CUDA_WARM_START 0x0
#define CUDA_AUTOPOLL 0x1
@@ -56,20 +60,19 @@
/* MOS6522 CUDA */
-typedef struct MOS6522CUDAState {
+struct MOS6522CUDAState {
/*< private >*/
MOS6522State parent_obj;
-} MOS6522CUDAState;
+};
#define TYPE_MOS6522_CUDA "mos6522-cuda"
-#define MOS6522_CUDA(obj) OBJECT_CHECK(MOS6522CUDAState, (obj), \
- TYPE_MOS6522_CUDA)
+OBJECT_DECLARE_SIMPLE_TYPE(MOS6522CUDAState, MOS6522_CUDA)
/* Cuda */
#define TYPE_CUDA "cuda"
-#define CUDA(obj) OBJECT_CHECK(CUDAState, (obj), TYPE_CUDA)
+OBJECT_DECLARE_SIMPLE_TYPE(CUDAState, CUDA)
-typedef struct CUDAState {
+struct CUDAState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -93,12 +96,8 @@ typedef struct CUDAState {
int data_out_index;
qemu_irq irq;
- uint16_t adb_poll_mask;
- uint8_t autopoll_rate_ms;
- uint8_t autopoll;
uint8_t data_in[128];
uint8_t data_out[16];
- QEMUTimer *adb_poll_timer;
-} CUDAState;
+};
#endif /* CUDA_H */
diff --git a/include/hw/misc/macio/gpio.h b/include/hw/misc/macio/gpio.h
index 2838ae5fde..7d2aa886c2 100644
--- a/include/hw/misc/macio/gpio.h
+++ b/include/hw/misc/macio/gpio.h
@@ -26,21 +26,23 @@
#ifndef MACIO_GPIO_H
#define MACIO_GPIO_H
+#include "hw/ppc/openpic.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
#define TYPE_MACIO_GPIO "macio-gpio"
-#define MACIO_GPIO(obj) OBJECT_CHECK(MacIOGPIOState, (obj), TYPE_MACIO_GPIO)
+OBJECT_DECLARE_SIMPLE_TYPE(MacIOGPIOState, MACIO_GPIO)
-typedef struct MacIOGPIOState {
+struct MacIOGPIOState {
/*< private >*/
SysBusDevice parent;
/*< public >*/
- OpenPICState *pic;
-
MemoryRegion gpiomem;
qemu_irq gpio_extirqs[10];
uint8_t gpio_levels[8];
uint8_t gpio_regs[36]; /* XXX Check count */
-} MacIOGPIOState;
+};
void macio_set_gpio(MacIOGPIOState *s, uint32_t gpio, bool state);
diff --git a/include/hw/misc/macio/macio.h b/include/hw/misc/macio/macio.h
index 970058b6ed..2b54da6b31 100644
--- a/include/hw/misc/macio/macio.h
+++ b/include/hw/misc/macio/macio.h
@@ -27,27 +27,52 @@
#define MACIO_H
#include "hw/char/escc.h"
+#include "hw/pci/pci_device.h"
+#include "hw/ide/ide-bus.h"
#include "hw/intc/heathrow_pic.h"
#include "hw/misc/macio/cuda.h"
#include "hw/misc/macio/gpio.h"
#include "hw/misc/macio/pmu.h"
+#include "hw/nvram/mac_nvram.h"
#include "hw/ppc/mac_dbdma.h"
#include "hw/ppc/openpic.h"
+#include "qom/object.h"
+
+/* Old World IRQs */
+#define OLDWORLD_CUDA_IRQ 0x12
+#define OLDWORLD_ESCCB_IRQ 0x10
+#define OLDWORLD_ESCCA_IRQ 0xf
+#define OLDWORLD_IDE0_IRQ 0xd
+#define OLDWORLD_IDE0_DMA_IRQ 0x2
+#define OLDWORLD_IDE1_IRQ 0xe
+#define OLDWORLD_IDE1_DMA_IRQ 0x3
+
+/* New World IRQs */
+#define NEWWORLD_CUDA_IRQ 0x19
+#define NEWWORLD_PMU_IRQ 0x19
+#define NEWWORLD_ESCCB_IRQ 0x24
+#define NEWWORLD_ESCCA_IRQ 0x25
+#define NEWWORLD_IDE0_IRQ 0xd
+#define NEWWORLD_IDE0_DMA_IRQ 0x2
+#define NEWWORLD_IDE1_IRQ 0xe
+#define NEWWORLD_IDE1_DMA_IRQ 0x3
+#define NEWWORLD_EXTING_GPIO1 0x2f
+#define NEWWORLD_EXTING_GPIO9 0x37
/* MacIO virtual bus */
#define TYPE_MACIO_BUS "macio-bus"
-#define MACIO_BUS(obj) OBJECT_CHECK(MacIOBusState, (obj), TYPE_MACIO_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(MacIOBusState, MACIO_BUS)
-typedef struct MacIOBusState {
+struct MacIOBusState {
/*< private >*/
BusState parent_obj;
-} MacIOBusState;
+};
/* MacIO IDE */
#define TYPE_MACIO_IDE "macio-ide"
-#define MACIO_IDE(obj) OBJECT_CHECK(MACIOIDEState, (obj), TYPE_MACIO_IDE)
+OBJECT_DECLARE_SIMPLE_TYPE(MACIOIDEState, MACIO_IDE)
-typedef struct MACIOIDEState {
+struct MACIOIDEState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -65,15 +90,15 @@ typedef struct MACIOIDEState {
bool dma_active;
uint32_t timing_reg;
uint32_t irq_reg;
-} MACIOIDEState;
+};
void macio_ide_init_drives(MACIOIDEState *ide, DriveInfo **hd_table);
void macio_ide_register_dma(MACIOIDEState *ide);
#define TYPE_MACIO "macio"
-#define MACIO(obj) OBJECT_CHECK(MacIOState, (obj), TYPE_MACIO)
+OBJECT_DECLARE_SIMPLE_TYPE(MacIOState, MACIO)
-typedef struct MacIOState {
+struct MacIOState {
/*< private >*/
PCIDevice parent;
/*< public >*/
@@ -85,37 +110,35 @@ typedef struct MacIOState {
DBDMAState dbdma;
ESCCState escc;
uint64_t frequency;
-} MacIOState;
+};
#define TYPE_OLDWORLD_MACIO "macio-oldworld"
-#define OLDWORLD_MACIO(obj) \
- OBJECT_CHECK(OldWorldMacIOState, (obj), TYPE_OLDWORLD_MACIO)
+OBJECT_DECLARE_SIMPLE_TYPE(OldWorldMacIOState, OLDWORLD_MACIO)
-typedef struct OldWorldMacIOState {
+struct OldWorldMacIOState {
/*< private >*/
MacIOState parent_obj;
/*< public >*/
- HeathrowState *pic;
+ HeathrowState pic;
MacIONVRAMState nvram;
MACIOIDEState ide[2];
-} OldWorldMacIOState;
+};
#define TYPE_NEWWORLD_MACIO "macio-newworld"
-#define NEWWORLD_MACIO(obj) \
- OBJECT_CHECK(NewWorldMacIOState, (obj), TYPE_NEWWORLD_MACIO)
+OBJECT_DECLARE_SIMPLE_TYPE(NewWorldMacIOState, NEWWORLD_MACIO)
-typedef struct NewWorldMacIOState {
+struct NewWorldMacIOState {
/*< private >*/
MacIOState parent_obj;
/*< public >*/
bool has_pmu;
bool has_adb;
- OpenPICState *pic;
+ OpenPICState pic;
MACIOIDEState ide[2];
MacIOGPIOState gpio;
-} NewWorldMacIOState;
+};
#endif /* MACIO_H */
diff --git a/include/hw/misc/macio/pmu.h b/include/hw/misc/macio/pmu.h
index d10895ba5f..ceb12082ae 100644
--- a/include/hw/misc/macio/pmu.h
+++ b/include/hw/misc/macio/pmu.h
@@ -10,6 +10,11 @@
#ifndef PMU_H
#define PMU_H
+#include "hw/input/adb.h"
+#include "hw/misc/mos6522.h"
+#include "hw/misc/macio/gpio.h"
+#include "qom/object.h"
+
/*
* PMU commands
*/
@@ -71,7 +76,7 @@
#define PMU_INT_WAITING_CHARGER 0x01 /* ??? */
#define PMU_INT_AUTO_SRQ_POLL 0x02 /* ??? */
-/* Bits in the environement message (either obtained via PMU_GET_COVER,
+/* Bits in the environment message (either obtained via PMU_GET_COVER,
* or via PMU_INT_ENVIRONMENT on core99 */
#define PMU_ENV_LID_CLOSED 0x01 /* The lid is closed */
@@ -170,28 +175,25 @@ typedef enum {
} PMUCmdState;
/* MOS6522 PMU */
-typedef struct MOS6522PMUState {
+struct MOS6522PMUState {
/*< private >*/
MOS6522State parent_obj;
-} MOS6522PMUState;
+};
#define TYPE_MOS6522_PMU "mos6522-pmu"
-#define MOS6522_PMU(obj) OBJECT_CHECK(MOS6522PMUState, (obj), \
- TYPE_MOS6522_PMU)
+OBJECT_DECLARE_SIMPLE_TYPE(MOS6522PMUState, MOS6522_PMU)
/**
* PMUState:
* @last_b: last value of B register
*/
-typedef struct PMUState {
+struct PMUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion mem;
uint64_t frequency;
- qemu_irq via_irq;
- bool via_irq_state;
/* PMU state */
MOS6522PMUState mos6522_pmu;
@@ -215,10 +217,6 @@ typedef struct PMUState {
/* ADB */
bool has_adb;
ADBBusState adb_bus;
- uint16_t adb_poll_mask;
- uint8_t autopoll_rate_ms;
- uint8_t autopoll_mask;
- QEMUTimer *adb_poll_timer;
uint8_t adb_reply_size;
uint8_t adb_reply[ADB_MAX_OUT_LEN];
@@ -229,9 +227,9 @@ typedef struct PMUState {
/* GPIO */
MacIOGPIOState *gpio;
-} PMUState;
+};
#define TYPE_VIA_PMU "via-pmu"
-#define VIA_PMU(obj) OBJECT_CHECK(PMUState, (obj), TYPE_VIA_PMU)
+OBJECT_DECLARE_SIMPLE_TYPE(PMUState, VIA_PMU)
#endif /* PMU_H */
diff --git a/include/hw/misc/mchp_pfsoc_dmc.h b/include/hw/misc/mchp_pfsoc_dmc.h
new file mode 100644
index 0000000000..3bc1581e0f
--- /dev/null
+++ b/include/hw/misc/mchp_pfsoc_dmc.h
@@ -0,0 +1,58 @@
+/*
+ * Microchip PolarFire SoC DDR Memory Controller module emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MCHP_PFSOC_DMC_H
+#define MCHP_PFSOC_DMC_H
+
+#include "hw/sysbus.h"
+
+/* DDR SGMII PHY module */
+
+#define MCHP_PFSOC_DDR_SGMII_PHY_REG_SIZE 0x1000
+
+typedef struct MchpPfSoCDdrSgmiiPhyState {
+ SysBusDevice parent;
+ MemoryRegion sgmii_phy;
+} MchpPfSoCDdrSgmiiPhyState;
+
+#define TYPE_MCHP_PFSOC_DDR_SGMII_PHY "mchp.pfsoc.ddr_sgmii_phy"
+
+#define MCHP_PFSOC_DDR_SGMII_PHY(obj) \
+ OBJECT_CHECK(MchpPfSoCDdrSgmiiPhyState, (obj), \
+ TYPE_MCHP_PFSOC_DDR_SGMII_PHY)
+
+/* DDR CFG module */
+
+#define MCHP_PFSOC_DDR_CFG_REG_SIZE 0x40000
+
+typedef struct MchpPfSoCDdrCfgState {
+ SysBusDevice parent;
+ MemoryRegion cfg;
+} MchpPfSoCDdrCfgState;
+
+#define TYPE_MCHP_PFSOC_DDR_CFG "mchp.pfsoc.ddr_cfg"
+
+#define MCHP_PFSOC_DDR_CFG(obj) \
+ OBJECT_CHECK(MchpPfSoCDdrCfgState, (obj), \
+ TYPE_MCHP_PFSOC_DDR_CFG)
+
+#endif /* MCHP_PFSOC_DMC_H */
diff --git a/include/hw/misc/mchp_pfsoc_ioscb.h b/include/hw/misc/mchp_pfsoc_ioscb.h
new file mode 100644
index 0000000000..3fd3e74966
--- /dev/null
+++ b/include/hw/misc/mchp_pfsoc_ioscb.h
@@ -0,0 +1,56 @@
+/*
+ * Microchip PolarFire SoC IOSCB module emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MCHP_PFSOC_IOSCB_H
+#define MCHP_PFSOC_IOSCB_H
+
+#include "hw/sysbus.h"
+
+typedef struct MchpPfSoCIoscbState {
+ SysBusDevice parent;
+ MemoryRegion container;
+ MemoryRegion lane01;
+ MemoryRegion lane23;
+ MemoryRegion ctrl;
+ MemoryRegion qspixip;
+ MemoryRegion mailbox;
+ MemoryRegion cfg;
+ MemoryRegion ccc;
+ MemoryRegion pll_mss;
+ MemoryRegion cfm_mss;
+ MemoryRegion pll_ddr;
+ MemoryRegion bc_ddr;
+ MemoryRegion io_calib_ddr;
+ MemoryRegion pll_sgmii;
+ MemoryRegion dll_sgmii;
+ MemoryRegion cfm_sgmii;
+ MemoryRegion bc_sgmii;
+ MemoryRegion io_calib_sgmii;
+ qemu_irq irq;
+} MchpPfSoCIoscbState;
+
+#define TYPE_MCHP_PFSOC_IOSCB "mchp.pfsoc.ioscb"
+
+#define MCHP_PFSOC_IOSCB(obj) \
+ OBJECT_CHECK(MchpPfSoCIoscbState, (obj), TYPE_MCHP_PFSOC_IOSCB)
+
+#endif /* MCHP_PFSOC_IOSCB_H */
diff --git a/include/hw/misc/mchp_pfsoc_sysreg.h b/include/hw/misc/mchp_pfsoc_sysreg.h
new file mode 100644
index 0000000000..c2232bd28d
--- /dev/null
+++ b/include/hw/misc/mchp_pfsoc_sysreg.h
@@ -0,0 +1,42 @@
+/*
+ * Microchip PolarFire SoC SYSREG module emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MCHP_PFSOC_SYSREG_H
+#define MCHP_PFSOC_SYSREG_H
+
+#include "hw/sysbus.h"
+
+#define MCHP_PFSOC_SYSREG_REG_SIZE 0x2000
+
+typedef struct MchpPfSoCSysregState {
+ SysBusDevice parent;
+ MemoryRegion sysreg;
+ qemu_irq irq;
+} MchpPfSoCSysregState;
+
+#define TYPE_MCHP_PFSOC_SYSREG "mchp.pfsoc.sysreg"
+
+#define MCHP_PFSOC_SYSREG(obj) \
+ OBJECT_CHECK(MchpPfSoCSysregState, (obj), \
+ TYPE_MCHP_PFSOC_SYSREG)
+
+#endif /* MCHP_PFSOC_SYSREG_H */
diff --git a/include/hw/misc/mips_cmgcr.h b/include/hw/misc/mips_cmgcr.h
index c9dfcb4b84..db4bf5f449 100644
--- a/include/hw/misc/mips_cmgcr.h
+++ b/include/hw/misc/mips_cmgcr.h
@@ -10,8 +10,11 @@
#ifndef MIPS_CMGCR_H
#define MIPS_CMGCR_H
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
#define TYPE_MIPS_GCR "mips-gcr"
-#define MIPS_GCR(obj) OBJECT_CHECK(MIPSGCRState, (obj), TYPE_MIPS_GCR)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSGCRState, MIPS_GCR)
#define GCR_BASE_ADDR 0x1fbf8000ULL
#define GCR_ADDRSPACE_SZ 0x8000
@@ -68,12 +71,11 @@ struct MIPSGCRVPState {
uint64_t reset_base;
};
-typedef struct MIPSGCRState MIPSGCRState;
struct MIPSGCRState {
SysBusDevice parent_obj;
int32_t gcr_rev;
- int32_t num_vps;
+ uint32_t num_vps;
hwaddr gcr_base;
MemoryRegion iomem;
MemoryRegion *cpc_mr;
diff --git a/include/hw/misc/mips_cpc.h b/include/hw/misc/mips_cpc.h
index 72c834e039..fcafbd5e00 100644
--- a/include/hw/misc/mips_cpc.h
+++ b/include/hw/misc/mips_cpc.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,6 +20,9 @@
#ifndef MIPS_CPC_H
#define MIPS_CPC_H
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
#define CPC_ADDRSPACE_SZ 0x6000
/* CPC blocks offsets relative to base address */
@@ -32,9 +35,9 @@
#define CPC_VP_RUNNING_OFS 0x30
#define TYPE_MIPS_CPC "mips-cpc"
-#define MIPS_CPC(obj) OBJECT_CHECK(MIPSCPCState, (obj), TYPE_MIPS_CPC)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSCPCState, MIPS_CPC)
-typedef struct MIPSCPCState {
+struct MIPSCPCState {
SysBusDevice parent_obj;
uint32_t num_vp;
@@ -42,6 +45,6 @@ typedef struct MIPSCPCState {
MemoryRegion mr;
uint64_t vp_running; /* Indicates which VPs are in the run state */
-} MIPSCPCState;
+};
#endif /* MIPS_CPC_H */
diff --git a/include/hw/misc/mips_itu.h b/include/hw/misc/mips_itu.h
index 030eb4ac62..27c9a1090d 100644
--- a/include/hw/misc/mips_itu.h
+++ b/include/hw/misc/mips_itu.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,9 +21,10 @@
#define MIPS_ITU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_MIPS_ITU "mips-itu"
-#define MIPS_ITU(obj) OBJECT_CHECK(MIPSITUState, (obj), TYPE_MIPS_ITU)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSITUState, MIPS_ITU)
#define ITC_CELL_DEPTH_SHIFT 2
#define ITC_CELL_DEPTH (1u << ITC_CELL_DEPTH_SHIFT)
@@ -51,13 +52,13 @@ typedef struct ITCStorageCell {
#define ITC_ADDRESSMAP_NUM 2
-typedef struct MIPSITUState {
+struct MIPSITUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
- int32_t num_fifo;
- int32_t num_semaphores;
+ uint32_t num_fifo;
+ uint32_t num_semaphores;
/* ITC Storage */
ITCStorageCell *cell;
@@ -66,7 +67,10 @@ typedef struct MIPSITUState {
/* ITC Configuration Tags */
uint64_t ITCAddressMap[ITC_ADDRESSMAP_NUM];
MemoryRegion tag_io;
-} MIPSITUState;
+
+ /* ITU Control Register */
+ uint64_t icr0;
+};
/* Get ITC Configuration Tag memory region. */
MemoryRegion *mips_itu_get_tag_region(MIPSITUState *itu);
diff --git a/include/hw/misc/mos6522.h b/include/hw/misc/mos6522.h
index 03d9f0c059..fba45668ab 100644
--- a/include/hw/misc/mos6522.h
+++ b/include/hw/misc/mos6522.h
@@ -27,10 +27,11 @@
#ifndef MOS6522_H
#define MOS6522_H
-#include "exec/memory.h"
+#include "exec/hwaddr.h"
#include "hw/sysbus.h"
-#include "hw/ide/internal.h"
-#include "hw/input/adb.h"
+#include "qom/object.h"
+
+#define MOS6522_NUM_REGS 16
/* Bits in ACR */
#define SR_CTRL 0x1c /* Shift register control bits */
@@ -41,18 +42,43 @@
#define IER_SET 0x80 /* set bits in IER */
#define IER_CLR 0 /* clear bits in IER */
-#define CA2_INT 0x01
-#define CA1_INT 0x02
-#define SR_INT 0x04 /* Shift register full/empty */
-#define CB2_INT 0x08
-#define CB1_INT 0x10
-#define T2_INT 0x20 /* Timer 2 interrupt */
-#define T1_INT 0x40 /* Timer 1 interrupt */
+#define CA2_INT_BIT 0
+#define CA1_INT_BIT 1
+#define SR_INT_BIT 2 /* Shift register full/empty */
+#define CB2_INT_BIT 3
+#define CB1_INT_BIT 4
+#define T2_INT_BIT 5 /* Timer 2 interrupt */
+#define T1_INT_BIT 6 /* Timer 1 interrupt */
+
+#define CA2_INT BIT(CA2_INT_BIT)
+#define CA1_INT BIT(CA1_INT_BIT)
+#define SR_INT BIT(SR_INT_BIT)
+#define CB2_INT BIT(CB2_INT_BIT)
+#define CB1_INT BIT(CB1_INT_BIT)
+#define T2_INT BIT(T2_INT_BIT)
+#define T1_INT BIT(T1_INT_BIT)
+
+#define VIA_NUM_INTS 5
/* Bits in ACR */
#define T1MODE 0xc0 /* Timer 1 mode */
#define T1MODE_CONT 0x40 /* continuous interrupts */
+/* Bits in PCR */
+#define CB2_CTRL_MASK 0xe0
+#define CB2_CTRL_SHIFT 5
+#define CB1_CTRL_MASK 0x10
+#define CB1_CTRL_SHIFT 4
+#define CA2_CTRL_MASK 0x0e
+#define CA2_CTRL_SHIFT 1
+#define CA1_CTRL_MASK 0x1
+#define CA1_CTRL_SHIFT 0
+
+#define C2_POS 0x2
+#define C2_IND 0x1
+
+#define C1_POS 0x1
+
/* VIA registers */
#define VIA_REG_B 0x00
#define VIA_REG_A 0x01
@@ -100,7 +126,7 @@ typedef struct MOS6522Timer {
* @last_b: last value of B register
* @last_acr: last value of ACR register
*/
-typedef struct MOS6522State {
+struct MOS6522State {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -116,40 +142,36 @@ typedef struct MOS6522State {
uint8_t pcr;
uint8_t ifr;
uint8_t ier;
- uint8_t anh;
MOS6522Timer timers[2];
uint64_t frequency;
qemu_irq irq;
-} MOS6522State;
+ uint8_t last_irq_levels;
+};
#define TYPE_MOS6522 "mos6522"
-#define MOS6522(obj) OBJECT_CHECK(MOS6522State, (obj), TYPE_MOS6522)
+OBJECT_DECLARE_TYPE(MOS6522State, MOS6522DeviceClass, MOS6522)
-typedef struct MOS6522DeviceClass {
+struct MOS6522DeviceClass {
DeviceClass parent_class;
- DeviceReset parent_reset;
- void (*set_sr_int)(MOS6522State *dev);
+ ResettablePhases parent_phases;
void (*portB_write)(MOS6522State *dev);
void (*portA_write)(MOS6522State *dev);
- void (*update_irq)(MOS6522State *dev);
/* These are used to influence the CUDA MacOS timebase calibration */
uint64_t (*get_timer1_counter_value)(MOS6522State *dev, MOS6522Timer *ti);
uint64_t (*get_timer2_counter_value)(MOS6522State *dev, MOS6522Timer *ti);
uint64_t (*get_timer1_load_time)(MOS6522State *dev, MOS6522Timer *ti);
uint64_t (*get_timer2_load_time)(MOS6522State *dev, MOS6522Timer *ti);
-} MOS6522DeviceClass;
+};
-#define MOS6522_DEVICE_CLASS(cls) \
- OBJECT_CLASS_CHECK(MOS6522DeviceClass, (cls), TYPE_MOS6522)
-#define MOS6522_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(MOS6522DeviceClass, (obj), TYPE_MOS6522)
extern const VMStateDescription vmstate_mos6522;
uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size);
void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size);
+void hmp_info_via(Monitor *mon, const QDict *qdict);
+
#endif /* MOS6522_H */
diff --git a/include/hw/misc/mps2-fpgaio.h b/include/hw/misc/mps2-fpgaio.h
index 69e265cd4b..7b8bd604de 100644
--- a/include/hw/misc/mps2-fpgaio.h
+++ b/include/hw/misc/mps2-fpgaio.h
@@ -12,7 +12,7 @@
/* This is a model of the FPGAIO register block in the AN505
* FPGA image for the MPS2 dev board; it is documented in the
* application note:
- * http://infocenter.arm.com/help/topic/com.arm.doc.dai0505b/index.html
+ * https://developer.arm.com/documentation/dai0505/latest/
*
* QEMU interface:
* + sysbus MMIO region 0: the register bank
@@ -22,20 +22,29 @@
#define MPS2_FPGAIO_H
#include "hw/sysbus.h"
+#include "hw/misc/led.h"
+#include "qom/object.h"
#define TYPE_MPS2_FPGAIO "mps2-fpgaio"
-#define MPS2_FPGAIO(obj) OBJECT_CHECK(MPS2FPGAIO, (obj), TYPE_MPS2_FPGAIO)
+OBJECT_DECLARE_SIMPLE_TYPE(MPS2FPGAIO, MPS2_FPGAIO)
-typedef struct {
+#define MPS2FPGAIO_MAX_LEDS 32
+
+struct MPS2FPGAIO {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
+ LEDState *led[MPS2FPGAIO_MAX_LEDS];
+ uint32_t num_leds;
+ bool has_switches;
+ bool has_dbgctrl;
uint32_t led0;
uint32_t prescale;
uint32_t misc;
+ uint32_t dbgctrl;
/* QEMU_CLOCK_VIRTUAL time at which counter and pscntr were last synced */
int64_t pscntr_sync_ticks;
@@ -48,6 +57,6 @@ typedef struct {
/* These hold the CLOCK_VIRTUAL ns tick when the CLK1HZ/CLK100HZ was zero */
int64_t clk1hz_tick_offset;
int64_t clk100hz_tick_offset;
-} MPS2FPGAIO;
+};
#endif
diff --git a/include/hw/misc/mps2-scc.h b/include/hw/misc/mps2-scc.h
index 7045473788..8ff188c06b 100644
--- a/include/hw/misc/mps2-scc.h
+++ b/include/hw/misc/mps2-scc.h
@@ -9,26 +9,49 @@
* (at your option) any later version.
*/
+/*
+ * This is a model of the Serial Communication Controller (SCC)
+ * block found in most MPS FPGA images.
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: the register bank
+ * + QOM property "scc-cfg4": value of the read-only CFG4 register
+ * + QOM property "scc-aid": value of the read-only SCC_AID register
+ * + QOM property "scc-id": value of the read-only SCC_ID register
+ * + QOM property "scc-cfg0": reset value of the CFG0 register
+ * + QOM property array "oscclk": reset values of the OSCCLK registers
+ * (which are accessed via the SYS_CFG channel provided by this device)
+ * + named GPIO output "remap": this tracks the value of CFG0 register
+ * bit 0. Boards where this bit controls memory remapping should
+ * connect this GPIO line to a function performing that mapping.
+ * Boards where bit 0 has no special function should leave the GPIO
+ * output disconnected.
+ */
#ifndef MPS2_SCC_H
#define MPS2_SCC_H
#include "hw/sysbus.h"
+#include "hw/misc/led.h"
+#include "qom/object.h"
#define TYPE_MPS2_SCC "mps2-scc"
-#define MPS2_SCC(obj) OBJECT_CHECK(MPS2SCC, (obj), TYPE_MPS2_SCC)
+OBJECT_DECLARE_SIMPLE_TYPE(MPS2SCC, MPS2_SCC)
-#define NUM_OSCCLK 3
-
-typedef struct {
+struct MPS2SCC {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
+ LEDState *led[8];
uint32_t cfg0;
uint32_t cfg1;
+ uint32_t cfg2;
uint32_t cfg4;
+ uint32_t cfg5;
+ uint32_t cfg6;
+ uint32_t cfg7;
uint32_t cfgdata_rtn;
uint32_t cfgdata_out;
uint32_t cfgctrl;
@@ -36,8 +59,12 @@ typedef struct {
uint32_t dll;
uint32_t aid;
uint32_t id;
- uint32_t oscclk[NUM_OSCCLK];
- uint32_t oscclk_reset[NUM_OSCCLK];
-} MPS2SCC;
+ uint32_t num_oscclk;
+ uint32_t *oscclk;
+ uint32_t *oscclk_reset;
+ uint32_t cfg0_reset;
+
+ qemu_irq remap;
+};
#endif
diff --git a/include/hw/misc/msf2-sysreg.h b/include/hw/misc/msf2-sysreg.h
index 5993f67b4e..fc1890e710 100644
--- a/include/hw/misc/msf2-sysreg.h
+++ b/include/hw/misc/msf2-sysreg.h
@@ -26,6 +26,7 @@
#define HW_MSF2_SYSREG_H
#include "hw/sysbus.h"
+#include "qom/object.h"
enum {
ESRAM_CR = 0x00 / 4,
@@ -61,9 +62,9 @@ enum {
#define MSF2_SYSREG_MMIO_SIZE 0x300
#define TYPE_MSF2_SYSREG "msf2-sysreg"
-#define MSF2_SYSREG(obj) OBJECT_CHECK(MSF2SysregState, (obj), TYPE_MSF2_SYSREG)
+OBJECT_DECLARE_SIMPLE_TYPE(MSF2SysregState, MSF2_SYSREG)
-typedef struct MSF2SysregState {
+struct MSF2SysregState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -72,6 +73,6 @@ typedef struct MSF2SysregState {
uint8_t apb1div;
uint32_t regs[MSF2_SYSREG_MMIO_SIZE / 4];
-} MSF2SysregState;
+};
#endif /* HW_MSF2_SYSREG_H */
diff --git a/include/hw/misc/npcm7xx_clk.h b/include/hw/misc/npcm7xx_clk.h
new file mode 100644
index 0000000000..5ed4a4672b
--- /dev/null
+++ b/include/hw/misc/npcm7xx_clk.h
@@ -0,0 +1,180 @@
+/*
+ * Nuvoton NPCM7xx Clock Control Registers.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_CLK_H
+#define NPCM7XX_CLK_H
+
+#include "exec/memory.h"
+#include "hw/clock.h"
+#include "hw/sysbus.h"
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_CLK_NR_REGS (0x70 / sizeof(uint32_t))
+
+#define NPCM7XX_WATCHDOG_RESET_GPIO_IN "npcm7xx-clk-watchdog-reset-gpio-in"
+
+/* Maximum amount of clock inputs in a SEL module. */
+#define NPCM7XX_CLK_SEL_MAX_INPUT 5
+
+/* PLLs in CLK module. */
+typedef enum NPCM7xxClockPLL {
+ NPCM7XX_CLOCK_PLL0,
+ NPCM7XX_CLOCK_PLL1,
+ NPCM7XX_CLOCK_PLL2,
+ NPCM7XX_CLOCK_PLLG,
+ NPCM7XX_CLOCK_NR_PLLS,
+} NPCM7xxClockPLL;
+
+/* SEL/MUX in CLK module. */
+typedef enum NPCM7xxClockSEL {
+ NPCM7XX_CLOCK_PIXCKSEL,
+ NPCM7XX_CLOCK_MCCKSEL,
+ NPCM7XX_CLOCK_CPUCKSEL,
+ NPCM7XX_CLOCK_CLKOUTSEL,
+ NPCM7XX_CLOCK_UARTCKSEL,
+ NPCM7XX_CLOCK_TIMCKSEL,
+ NPCM7XX_CLOCK_SDCKSEL,
+ NPCM7XX_CLOCK_GFXMSEL,
+ NPCM7XX_CLOCK_SUCKSEL,
+ NPCM7XX_CLOCK_NR_SELS,
+} NPCM7xxClockSEL;
+
+/* Dividers in CLK module. */
+typedef enum NPCM7xxClockDivider {
+ NPCM7XX_CLOCK_PLL1D2, /* PLL1/2 */
+ NPCM7XX_CLOCK_PLL2D2, /* PLL2/2 */
+ NPCM7XX_CLOCK_MC_DIVIDER,
+ NPCM7XX_CLOCK_AXI_DIVIDER,
+ NPCM7XX_CLOCK_AHB_DIVIDER,
+ NPCM7XX_CLOCK_AHB3_DIVIDER,
+ NPCM7XX_CLOCK_SPI0_DIVIDER,
+ NPCM7XX_CLOCK_SPIX_DIVIDER,
+ NPCM7XX_CLOCK_APB1_DIVIDER,
+ NPCM7XX_CLOCK_APB2_DIVIDER,
+ NPCM7XX_CLOCK_APB3_DIVIDER,
+ NPCM7XX_CLOCK_APB4_DIVIDER,
+ NPCM7XX_CLOCK_APB5_DIVIDER,
+ NPCM7XX_CLOCK_CLKOUT_DIVIDER,
+ NPCM7XX_CLOCK_UART_DIVIDER,
+ NPCM7XX_CLOCK_TIMER_DIVIDER,
+ NPCM7XX_CLOCK_ADC_DIVIDER,
+ NPCM7XX_CLOCK_MMC_DIVIDER,
+ NPCM7XX_CLOCK_SDHC_DIVIDER,
+ NPCM7XX_CLOCK_GFXM_DIVIDER, /* divide by 3 */
+ NPCM7XX_CLOCK_UTMI_DIVIDER,
+ NPCM7XX_CLOCK_NR_DIVIDERS,
+} NPCM7xxClockConverter;
+
+typedef struct NPCM7xxCLKState NPCM7xxCLKState;
+
+/**
+ * struct NPCM7xxClockPLLState - A PLL module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @clock_in: The input clock of this module.
+ * @clock_out: The output clock of this module.
+ * @reg: The control registers for this PLL module.
+ */
+typedef struct NPCM7xxClockPLLState {
+ DeviceState parent;
+
+ const char *name;
+ NPCM7xxCLKState *clk;
+ Clock *clock_in;
+ Clock *clock_out;
+
+ int reg;
+} NPCM7xxClockPLLState;
+
+/**
+ * struct NPCM7xxClockSELState - A SEL module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @input_size: The size of inputs of this module.
+ * @clock_in: The input clocks of this module.
+ * @clock_out: The output clocks of this module.
+ * @offset: The offset of this module in the control register.
+ * @len: The length of this module in the control register.
+ */
+typedef struct NPCM7xxClockSELState {
+ DeviceState parent;
+
+ const char *name;
+ NPCM7xxCLKState *clk;
+ uint8_t input_size;
+ Clock *clock_in[NPCM7XX_CLK_SEL_MAX_INPUT];
+ Clock *clock_out;
+
+ int offset;
+ int len;
+} NPCM7xxClockSELState;
+
+/**
+ * struct NPCM7xxClockDividerState - A Divider module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @clock_in: The input clock of this module.
+ * @clock_out: The output clock of this module.
+ * @divide: The function the divider uses to divide the input.
+ * @reg: The index of the control register that contains the divisor.
+ * @offset: The offset of the divisor in the control register.
+ * @len: The length of the divisor in the control register.
+ * @divisor: The divisor for a constant divisor
+ */
+typedef struct NPCM7xxClockDividerState {
+ DeviceState parent;
+
+ const char *name;
+ NPCM7xxCLKState *clk;
+ Clock *clock_in;
+ Clock *clock_out;
+
+ uint32_t (*divide)(struct NPCM7xxClockDividerState *s);
+ union {
+ struct {
+ int reg;
+ int offset;
+ int len;
+ };
+ int divisor;
+ };
+} NPCM7xxClockDividerState;
+
+struct NPCM7xxCLKState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ /* Clock converters */
+ NPCM7xxClockPLLState plls[NPCM7XX_CLOCK_NR_PLLS];
+ NPCM7xxClockSELState sels[NPCM7XX_CLOCK_NR_SELS];
+ NPCM7xxClockDividerState dividers[NPCM7XX_CLOCK_NR_DIVIDERS];
+
+ uint32_t regs[NPCM7XX_CLK_NR_REGS];
+
+ /* Time reference for SECCNT and CNTR25M, initialized by power on reset */
+ int64_t ref_ns;
+
+ /* The incoming reference clock. */
+ Clock *clkref;
+};
+
+#define TYPE_NPCM7XX_CLK "npcm7xx-clk"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxCLKState, NPCM7XX_CLK)
+
+#endif /* NPCM7XX_CLK_H */
diff --git a/include/hw/misc/npcm7xx_gcr.h b/include/hw/misc/npcm7xx_gcr.h
new file mode 100644
index 0000000000..c0bbdda77e
--- /dev/null
+++ b/include/hw/misc/npcm7xx_gcr.h
@@ -0,0 +1,73 @@
+/*
+ * Nuvoton NPCM7xx System Global Control Registers.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_GCR_H
+#define NPCM7XX_GCR_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/*
+ * NPCM7XX PWRON STRAP bit fields
+ * 12: SPI0 powered by VSBV3 at 1.8V
+ * 11: System flash attached to BMC
+ * 10: BSP alternative pins.
+ * 9:8: Flash UART command route enabled.
+ * 7: Security enabled.
+ * 6: HI-Z state control.
+ * 5: ECC disabled.
+ * 4: Reserved
+ * 3: JTAG2 enabled.
+ * 2:0: CPU and DRAM clock frequency.
+ */
+#define NPCM7XX_PWRON_STRAP_SPI0F18 BIT(12)
+#define NPCM7XX_PWRON_STRAP_SFAB BIT(11)
+#define NPCM7XX_PWRON_STRAP_BSPA BIT(10)
+#define NPCM7XX_PWRON_STRAP_FUP(x) ((x) << 8)
+#define FUP_NORM_UART2 3
+#define FUP_PROG_UART3 2
+#define FUP_PROG_UART2 1
+#define FUP_NORM_UART3 0
+#define NPCM7XX_PWRON_STRAP_SECEN BIT(7)
+#define NPCM7XX_PWRON_STRAP_HIZ BIT(6)
+#define NPCM7XX_PWRON_STRAP_ECC BIT(5)
+#define NPCM7XX_PWRON_STRAP_RESERVE1 BIT(4)
+#define NPCM7XX_PWRON_STRAP_J2EN BIT(3)
+#define NPCM7XX_PWRON_STRAP_CKFRQ(x) (x)
+#define CKFRQ_SKIPINIT 0x000
+#define CKFRQ_DEFAULT 0x111
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_GCR_NR_REGS (0x148 / sizeof(uint32_t))
+
+struct NPCM7xxGCRState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ uint32_t regs[NPCM7XX_GCR_NR_REGS];
+
+ uint32_t reset_pwron;
+ uint32_t reset_mdlr;
+ uint32_t reset_intcr3;
+};
+
+#define TYPE_NPCM7XX_GCR "npcm7xx-gcr"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxGCRState, NPCM7XX_GCR)
+
+#endif /* NPCM7XX_GCR_H */
diff --git a/include/hw/misc/npcm7xx_mft.h b/include/hw/misc/npcm7xx_mft.h
new file mode 100644
index 0000000000..d6384382ce
--- /dev/null
+++ b/include/hw/misc/npcm7xx_mft.h
@@ -0,0 +1,69 @@
+/*
+ * Nuvoton NPCM7xx MFT Module
+ *
+ * Copyright 2021 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_MFT_H
+#define NPCM7XX_MFT_H
+
+#include "exec/memory.h"
+#include "hw/clock.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+/* Max Fan input number. */
+#define NPCM7XX_MFT_MAX_FAN_INPUT 19
+
+/*
+ * Number of registers in one MFT module. Don't change this without increasing
+ * the version_id in vmstate.
+ */
+#define NPCM7XX_MFT_NR_REGS (0x20 / sizeof(uint16_t))
+
+/*
+ * The MFT can take up to 4 inputs: A0, B0, A1, B1. It can measure one A and one
+ * B simultaneously. NPCM7XX_MFT_INASEL and NPCM7XX_MFT_INBSEL are used to
+ * select which A or B input are used.
+ */
+#define NPCM7XX_MFT_FANIN_COUNT 4
+
+/**
+ * struct NPCM7xxMFTState - Multi Functional Tachometer device state.
+ * @parent: System bus device.
+ * @iomem: Memory region through which registers are accessed.
+ * @clock_in: The input clock for MFT from CLK module.
+ * @clock_{1,2}: The counter clocks for NPCM7XX_MFT_CNT{1,2}
+ * @irq: The IRQ for this MFT state.
+ * @regs: The MMIO registers.
+ * @max_rpm: The maximum rpm for fans. Order: A0, B0, A1, B1.
+ * @duty: The duty cycles for fans, relative to NPCM7XX_PWM_MAX_DUTY.
+ */
+struct NPCM7xxMFTState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ Clock *clock_in;
+ Clock *clock_1, *clock_2;
+ qemu_irq irq;
+ uint16_t regs[NPCM7XX_MFT_NR_REGS];
+
+ uint32_t max_rpm[NPCM7XX_MFT_FANIN_COUNT];
+ uint32_t duty[NPCM7XX_MFT_FANIN_COUNT];
+};
+
+#define TYPE_NPCM7XX_MFT "npcm7xx-mft"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxMFTState, NPCM7XX_MFT)
+
+#endif /* NPCM7XX_MFT_H */
diff --git a/include/hw/misc/npcm7xx_pwm.h b/include/hw/misc/npcm7xx_pwm.h
new file mode 100644
index 0000000000..bf953440ac
--- /dev/null
+++ b/include/hw/misc/npcm7xx_pwm.h
@@ -0,0 +1,106 @@
+/*
+ * Nuvoton NPCM7xx PWM Module
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_PWM_H
+#define NPCM7XX_PWM_H
+
+#include "hw/clock.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+
+/* Each PWM module holds 4 PWM channels. */
+#define NPCM7XX_PWM_PER_MODULE 4
+
+/*
+ * Number of registers in one pwm module. Don't change this without increasing
+ * the version_id in vmstate.
+ */
+#define NPCM7XX_PWM_NR_REGS (0x54 / sizeof(uint32_t))
+
+/*
+ * The maximum duty values. Each duty unit represents 1/NPCM7XX_PWM_MAX_DUTY
+ * cycles. For example, if NPCM7XX_PWM_MAX_DUTY=1,000,000 and a PWM has a duty
+ * value of 100,000 the duty cycle for that PWM is 10%.
+ */
+#define NPCM7XX_PWM_MAX_DUTY 1000000
+
+typedef struct NPCM7xxPWMState NPCM7xxPWMState;
+
+/**
+ * struct NPCM7xxPWM - The state of a single PWM channel.
+ * @module: The PWM module that contains this channel.
+ * @irq: GIC interrupt line to fire on expiration if enabled.
+ * @running: Whether this PWM channel is generating output.
+ * @inverted: Whether this PWM channel is inverted.
+ * @index: The index of this PWM channel.
+ * @cnr: The counter register.
+ * @cmr: The comparator register.
+ * @pdr: The data register.
+ * @pwdr: The watchdog register.
+ * @freq: The frequency of this PWM channel.
+ * @duty: The duty cycle of this PWM channel. One unit represents
+ * 1/NPCM7XX_MAX_DUTY cycles.
+ */
+typedef struct NPCM7xxPWM {
+ NPCM7xxPWMState *module;
+
+ qemu_irq irq;
+
+ bool running;
+ bool inverted;
+
+ uint8_t index;
+ uint32_t cnr;
+ uint32_t cmr;
+ uint32_t pdr;
+ uint32_t pwdr;
+
+ uint32_t freq;
+ uint32_t duty;
+} NPCM7xxPWM;
+
+/**
+ * struct NPCM7xxPWMState - Pulse Width Modulation device state.
+ * @parent: System bus device.
+ * @iomem: Memory region through which registers are accessed.
+ * @clock: The PWM clock.
+ * @pwm: The PWM channels owned by this module.
+ * @duty_gpio_out: The duty cycle of each PWM channels as a output GPIO.
+ * @ppr: The prescaler register.
+ * @csr: The clock selector register.
+ * @pcr: The control register.
+ * @pier: The interrupt enable register.
+ * @piir: The interrupt indication register.
+ */
+struct NPCM7xxPWMState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ Clock *clock;
+ NPCM7xxPWM pwm[NPCM7XX_PWM_PER_MODULE];
+ qemu_irq duty_gpio_out[NPCM7XX_PWM_PER_MODULE];
+
+ uint32_t ppr;
+ uint32_t csr;
+ uint32_t pcr;
+ uint32_t pier;
+ uint32_t piir;
+};
+
+#define TYPE_NPCM7XX_PWM "npcm7xx-pwm"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxPWMState, NPCM7XX_PWM)
+
+#endif /* NPCM7XX_PWM_H */
diff --git a/include/hw/misc/npcm7xx_rng.h b/include/hw/misc/npcm7xx_rng.h
new file mode 100644
index 0000000000..650375dc2c
--- /dev/null
+++ b/include/hw/misc/npcm7xx_rng.h
@@ -0,0 +1,34 @@
+/*
+ * Nuvoton NPCM7xx Random Number Generator.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_RNG_H
+#define NPCM7XX_RNG_H
+
+#include "hw/sysbus.h"
+
+struct NPCM7xxRNGState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ uint8_t rngcs;
+ uint8_t rngd;
+ uint8_t rngmode;
+};
+
+#define TYPE_NPCM7XX_RNG "npcm7xx-rng"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxRNGState, NPCM7XX_RNG)
+
+#endif /* NPCM7XX_RNG_H */
diff --git a/include/hw/misc/nrf51_rng.h b/include/hw/misc/nrf51_rng.h
index 3d6bf79997..9aff9a76f8 100644
--- a/include/hw/misc/nrf51_rng.h
+++ b/include/hw/misc/nrf51_rng.h
@@ -30,13 +30,15 @@
* the COPYING file in the top-level directory.
*
*/
+
#ifndef NRF51_RNG_H
#define NRF51_RNG_H
#include "hw/sysbus.h"
#include "qemu/timer.h"
+#include "qom/object.h"
#define TYPE_NRF51_RNG "nrf51_soc.rng"
-#define NRF51_RNG(obj) OBJECT_CHECK(NRF51RNGState, (obj), TYPE_NRF51_RNG)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51RNGState, NRF51_RNG)
#define NRF51_RNG_SIZE 0x1000
@@ -53,7 +55,7 @@
#define NRF51_RNG_REG_CONFIG_DECEN 0
#define NRF51_RNG_REG_VALUE 0x508
-typedef struct {
+struct NRF51RNGState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -77,7 +79,7 @@ typedef struct {
uint32_t interrupt_enabled;
uint32_t filter_enabled;
-} NRF51RNGState;
+};
-#endif /* NRF51_RNG_H_ */
+#endif /* NRF51_RNG_H */
diff --git a/include/hw/misc/pca9552.h b/include/hw/misc/pca9552.h
deleted file mode 100644
index ebb43c63fe..0000000000
--- a/include/hw/misc/pca9552.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * PCA9552 I2C LED blinker
- *
- * Copyright (c) 2017-2018, IBM Corporation.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later. See the COPYING file in the top-level directory.
- */
-#ifndef PCA9552_H
-#define PCA9552_H
-
-#include "hw/i2c/i2c.h"
-
-#define TYPE_PCA9552 "pca9552"
-#define PCA9552(obj) OBJECT_CHECK(PCA9552State, (obj), TYPE_PCA9552)
-
-#define PCA9552_NR_REGS 10
-
-typedef struct PCA9552State {
- /*< private >*/
- I2CSlave i2c;
- /*< public >*/
-
- uint8_t len;
- uint8_t pointer;
-
- uint8_t regs[PCA9552_NR_REGS];
- uint8_t max_reg;
- uint8_t nr_leds;
-} PCA9552State;
-
-#endif
diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h
index 1ee071a703..fab94165d0 100644
--- a/include/hw/misc/pvpanic.h
+++ b/include/hw/misc/pvpanic.h
@@ -11,20 +11,27 @@
* See the COPYING file in the top-level directory.
*
*/
+
#ifndef HW_MISC_PVPANIC_H
#define HW_MISC_PVPANIC_H
-#define TYPE_PVPANIC "pvpanic"
+#include "exec/memory.h"
+#include "qom/object.h"
+
+#define TYPE_PVPANIC_ISA_DEVICE "pvpanic"
+#define TYPE_PVPANIC_PCI_DEVICE "pvpanic-pci"
#define PVPANIC_IOPORT_PROP "ioport"
-static inline uint16_t pvpanic_port(void)
-{
- Object *o = object_resolve_path_type("", TYPE_PVPANIC, NULL);
- if (!o) {
- return 0;
- }
- return object_property_get_uint(o, PVPANIC_IOPORT_PROP, NULL);
-}
+/*
+ * PVPanicState for any device type
+ */
+typedef struct PVPanicState PVPanicState;
+struct PVPanicState {
+ MemoryRegion mr;
+ uint8_t events;
+};
+
+void pvpanic_setup_io(PVPanicState *s, DeviceState *dev, unsigned size);
#endif
diff --git a/include/hw/misc/sifive_e_aon.h b/include/hw/misc/sifive_e_aon.h
new file mode 100644
index 0000000000..2ae1c4139c
--- /dev/null
+++ b/include/hw/misc/sifive_e_aon.h
@@ -0,0 +1,60 @@
+/*
+ * SiFive HiFive1 AON (Always On Domain) interface.
+ *
+ * Copyright (c) 2022 SiFive, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SIFIVE_AON_H
+#define HW_SIFIVE_AON_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_SIFIVE_E_AON "riscv.sifive.e.aon"
+OBJECT_DECLARE_SIMPLE_TYPE(SiFiveEAONState, SIFIVE_E_AON)
+
+#define SIFIVE_E_AON_WDOGKEY (0x51F15E)
+#define SIFIVE_E_AON_WDOGFEED (0xD09F00D)
+#define SIFIVE_E_LFCLK_DEFAULT_FREQ (32768)
+
+enum {
+ SIFIVE_E_AON_WDT = 0x0,
+ SIFIVE_E_AON_RTC = 0x40,
+ SIFIVE_E_AON_LFROSC = 0x70,
+ SIFIVE_E_AON_BACKUP = 0x80,
+ SIFIVE_E_AON_PMU = 0x100,
+ SIFIVE_E_AON_MAX = 0x150
+};
+
+struct SiFiveEAONState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+
+ /*< watchdog timer >*/
+ QEMUTimer *wdog_timer;
+ qemu_irq wdog_irq;
+ uint64_t wdog_restart_time;
+ uint64_t wdogclk_freq;
+
+ uint32_t wdogcfg;
+ uint16_t wdogcmp0;
+ uint32_t wdogcount;
+ uint8_t wdogunlock;
+};
+
+#endif
diff --git a/include/hw/misc/sifive_e_prci.h b/include/hw/misc/sifive_e_prci.h
new file mode 100644
index 0000000000..6aa949e910
--- /dev/null
+++ b/include/hw/misc/sifive_e_prci.h
@@ -0,0 +1,74 @@
+/*
+ * QEMU SiFive E PRCI (Power, Reset, Clock, Interrupt) interface
+ *
+ * Copyright (c) 2017 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SIFIVE_E_PRCI_H
+#define HW_SIFIVE_E_PRCI_H
+
+#include "hw/sysbus.h"
+
+enum {
+ SIFIVE_E_PRCI_HFROSCCFG = 0x0,
+ SIFIVE_E_PRCI_HFXOSCCFG = 0x4,
+ SIFIVE_E_PRCI_PLLCFG = 0x8,
+ SIFIVE_E_PRCI_PLLOUTDIV = 0xC
+};
+
+enum {
+ SIFIVE_E_PRCI_HFROSCCFG_RDY = (1 << 31),
+ SIFIVE_E_PRCI_HFROSCCFG_EN = (1 << 30)
+};
+
+enum {
+ SIFIVE_E_PRCI_HFXOSCCFG_RDY = (1 << 31),
+ SIFIVE_E_PRCI_HFXOSCCFG_EN = (1 << 30)
+};
+
+enum {
+ SIFIVE_E_PRCI_PLLCFG_PLLSEL = (1 << 16),
+ SIFIVE_E_PRCI_PLLCFG_REFSEL = (1 << 17),
+ SIFIVE_E_PRCI_PLLCFG_BYPASS = (1 << 18),
+ SIFIVE_E_PRCI_PLLCFG_LOCK = (1 << 31)
+};
+
+enum {
+ SIFIVE_E_PRCI_PLLOUTDIV_DIV1 = (1 << 8)
+};
+
+#define SIFIVE_E_PRCI_REG_SIZE 0x1000
+
+#define TYPE_SIFIVE_E_PRCI "riscv.sifive.e.prci"
+
+typedef struct SiFiveEPRCIState SiFiveEPRCIState;
+DECLARE_INSTANCE_CHECKER(SiFiveEPRCIState, SIFIVE_E_PRCI,
+ TYPE_SIFIVE_E_PRCI)
+
+struct SiFiveEPRCIState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t hfrosccfg;
+ uint32_t hfxosccfg;
+ uint32_t pllcfg;
+ uint32_t plloutdiv;
+};
+
+DeviceState *sifive_e_prci_create(hwaddr addr);
+
+#endif
diff --git a/include/hw/riscv/sifive_test.h b/include/hw/misc/sifive_test.h
index 71d4c9fad7..88a38d00c5 100644
--- a/include/hw/riscv/sifive_test.h
+++ b/include/hw/misc/sifive_test.h
@@ -19,22 +19,27 @@
#ifndef HW_SIFIVE_TEST_H
#define HW_SIFIVE_TEST_H
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
#define TYPE_SIFIVE_TEST "riscv.sifive.test"
-#define SIFIVE_TEST(obj) \
- OBJECT_CHECK(SiFiveTestState, (obj), TYPE_SIFIVE_TEST)
+typedef struct SiFiveTestState SiFiveTestState;
+DECLARE_INSTANCE_CHECKER(SiFiveTestState, SIFIVE_TEST,
+ TYPE_SIFIVE_TEST)
-typedef struct SiFiveTestState {
+struct SiFiveTestState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion mmio;
-} SiFiveTestState;
+};
enum {
FINISHER_FAIL = 0x3333,
- FINISHER_PASS = 0x5555
+ FINISHER_PASS = 0x5555,
+ FINISHER_RESET = 0x7777
};
DeviceState *sifive_test_create(hwaddr addr);
diff --git a/include/hw/misc/sifive_u_otp.h b/include/hw/misc/sifive_u_otp.h
new file mode 100644
index 0000000000..170d2148f2
--- /dev/null
+++ b/include/hw/misc/sifive_u_otp.h
@@ -0,0 +1,88 @@
+/*
+ * QEMU SiFive U OTP (One-Time Programmable) Memory interface
+ *
+ * Copyright (c) 2019 Bin Meng <bmeng.cn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SIFIVE_U_OTP_H
+#define HW_SIFIVE_U_OTP_H
+
+#include "hw/sysbus.h"
+
+#define SIFIVE_U_OTP_PA 0x00
+#define SIFIVE_U_OTP_PAIO 0x04
+#define SIFIVE_U_OTP_PAS 0x08
+#define SIFIVE_U_OTP_PCE 0x0C
+#define SIFIVE_U_OTP_PCLK 0x10
+#define SIFIVE_U_OTP_PDIN 0x14
+#define SIFIVE_U_OTP_PDOUT 0x18
+#define SIFIVE_U_OTP_PDSTB 0x1C
+#define SIFIVE_U_OTP_PPROG 0x20
+#define SIFIVE_U_OTP_PTC 0x24
+#define SIFIVE_U_OTP_PTM 0x28
+#define SIFIVE_U_OTP_PTM_REP 0x2C
+#define SIFIVE_U_OTP_PTR 0x30
+#define SIFIVE_U_OTP_PTRIM 0x34
+#define SIFIVE_U_OTP_PWE 0x38
+
+#define SIFIVE_U_OTP_PWE_EN (1 << 0)
+
+#define SIFIVE_U_OTP_PCE_EN (1 << 0)
+
+#define SIFIVE_U_OTP_PDSTB_EN (1 << 0)
+
+#define SIFIVE_U_OTP_PTRIM_EN (1 << 0)
+
+#define SIFIVE_U_OTP_PA_MASK 0xfff
+#define SIFIVE_U_OTP_NUM_FUSES 0x1000
+#define SIFIVE_U_OTP_FUSE_WORD 4
+#define SIFIVE_U_OTP_SERIAL_ADDR 0xfc
+
+#define SIFIVE_U_OTP_REG_SIZE 0x1000
+
+#define TYPE_SIFIVE_U_OTP "riscv.sifive.u.otp"
+
+typedef struct SiFiveUOTPState SiFiveUOTPState;
+DECLARE_INSTANCE_CHECKER(SiFiveUOTPState, SIFIVE_U_OTP,
+ TYPE_SIFIVE_U_OTP)
+
+struct SiFiveUOTPState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t pa;
+ uint32_t paio;
+ uint32_t pas;
+ uint32_t pce;
+ uint32_t pclk;
+ uint32_t pdin;
+ uint32_t pdstb;
+ uint32_t pprog;
+ uint32_t ptc;
+ uint32_t ptm;
+ uint32_t ptm_rep;
+ uint32_t ptr;
+ uint32_t ptrim;
+ uint32_t pwe;
+ uint32_t fuse[SIFIVE_U_OTP_NUM_FUSES];
+ uint32_t fuse_wo[SIFIVE_U_OTP_NUM_FUSES];
+ /* config */
+ uint32_t serial;
+ BlockBackend *blk;
+};
+
+#endif /* HW_SIFIVE_U_OTP_H */
diff --git a/include/hw/misc/sifive_u_prci.h b/include/hw/misc/sifive_u_prci.h
new file mode 100644
index 0000000000..4d2491ad46
--- /dev/null
+++ b/include/hw/misc/sifive_u_prci.h
@@ -0,0 +1,94 @@
+/*
+ * QEMU SiFive U PRCI (Power, Reset, Clock, Interrupt) interface
+ *
+ * Copyright (c) 2019 Bin Meng <bmeng.cn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SIFIVE_U_PRCI_H
+#define HW_SIFIVE_U_PRCI_H
+
+#include "hw/sysbus.h"
+
+#define SIFIVE_U_PRCI_HFXOSCCFG 0x00
+#define SIFIVE_U_PRCI_COREPLLCFG0 0x04
+#define SIFIVE_U_PRCI_DDRPLLCFG0 0x0C
+#define SIFIVE_U_PRCI_DDRPLLCFG1 0x10
+#define SIFIVE_U_PRCI_GEMGXLPLLCFG0 0x1C
+#define SIFIVE_U_PRCI_GEMGXLPLLCFG1 0x20
+#define SIFIVE_U_PRCI_CORECLKSEL 0x24
+#define SIFIVE_U_PRCI_DEVICESRESET 0x28
+#define SIFIVE_U_PRCI_CLKMUXSTATUS 0x2C
+
+/*
+ * Current FU540-C000 manual says ready bit is at bit 29, but
+ * freedom-u540-c000-bootloader codes (ux00prci.h) says it is at bit 31.
+ * We have to trust the actual code that works.
+ *
+ * see https://github.com/sifive/freedom-u540-c000-bootloader
+ */
+
+#define SIFIVE_U_PRCI_HFXOSCCFG_EN (1 << 30)
+#define SIFIVE_U_PRCI_HFXOSCCFG_RDY (1 << 31)
+
+/* xxxPLLCFG0 register bits */
+#define SIFIVE_U_PRCI_PLLCFG0_DIVR (1 << 0)
+#define SIFIVE_U_PRCI_PLLCFG0_DIVF (31 << 6)
+#define SIFIVE_U_PRCI_PLLCFG0_DIVQ (3 << 15)
+#define SIFIVE_U_PRCI_PLLCFG0_FSE (1 << 25)
+#define SIFIVE_U_PRCI_PLLCFG0_LOCK (1 << 31)
+
+/* xxxPLLCFG1 register bits */
+#define SIFIVE_U_PRCI_PLLCFG1_CKE (1 << 24)
+
+/* coreclksel register bits */
+#define SIFIVE_U_PRCI_CORECLKSEL_HFCLK (1 << 0)
+
+
+#define SIFIVE_U_PRCI_REG_SIZE 0x1000
+
+#define TYPE_SIFIVE_U_PRCI "riscv.sifive.u.prci"
+
+typedef struct SiFiveUPRCIState SiFiveUPRCIState;
+DECLARE_INSTANCE_CHECKER(SiFiveUPRCIState, SIFIVE_U_PRCI,
+ TYPE_SIFIVE_U_PRCI)
+
+struct SiFiveUPRCIState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t hfxosccfg;
+ uint32_t corepllcfg0;
+ uint32_t ddrpllcfg0;
+ uint32_t ddrpllcfg1;
+ uint32_t gemgxlpllcfg0;
+ uint32_t gemgxlpllcfg1;
+ uint32_t coreclksel;
+ uint32_t devicesreset;
+ uint32_t clkmuxstatus;
+};
+
+/*
+ * Clock indexes for use by Device Tree data and the PRCI driver.
+ *
+ * These values are from sifive-fu540-prci.h in the Linux kernel.
+ */
+#define PRCI_CLK_COREPLL 0
+#define PRCI_CLK_DDRPLL 1
+#define PRCI_CLK_GEMGXLPLL 2
+#define PRCI_CLK_TLCLK 3
+
+#endif /* HW_SIFIVE_U_PRCI_H */
diff --git a/include/hw/misc/stm32f2xx_syscfg.h b/include/hw/misc/stm32f2xx_syscfg.h
index 69e6a30fc5..8595a3b31b 100644
--- a/include/hw/misc/stm32f2xx_syscfg.h
+++ b/include/hw/misc/stm32f2xx_syscfg.h
@@ -26,7 +26,7 @@
#define HW_STM32F2XX_SYSCFG_H
#include "hw/sysbus.h"
-#include "hw/hw.h"
+#include "qom/object.h"
#define SYSCFG_MEMRMP 0x00
#define SYSCFG_PMC 0x04
@@ -37,10 +37,9 @@
#define SYSCFG_CMPCR 0x20
#define TYPE_STM32F2XX_SYSCFG "stm32f2xx-syscfg"
-#define STM32F2XX_SYSCFG(obj) \
- OBJECT_CHECK(STM32F2XXSyscfgState, (obj), TYPE_STM32F2XX_SYSCFG)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F2XXSyscfgState, STM32F2XX_SYSCFG)
-typedef struct {
+struct STM32F2XXSyscfgState {
/* <private> */
SysBusDevice parent_obj;
@@ -54,8 +53,6 @@ typedef struct {
uint32_t syscfg_exticr3;
uint32_t syscfg_exticr4;
uint32_t syscfg_cmpcr;
-
- qemu_irq irq;
-} STM32F2XXSyscfgState;
+};
#endif /* HW_STM32F2XX_SYSCFG_H */
diff --git a/include/hw/misc/stm32f4xx_exti.h b/include/hw/misc/stm32f4xx_exti.h
new file mode 100644
index 0000000000..fc11c595fa
--- /dev/null
+++ b/include/hw/misc/stm32f4xx_exti.h
@@ -0,0 +1,59 @@
+/*
+ * STM32F4XX EXTI
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_STM32F4XX_EXTI_H
+#define HW_STM32F4XX_EXTI_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define EXTI_IMR 0x00
+#define EXTI_EMR 0x04
+#define EXTI_RTSR 0x08
+#define EXTI_FTSR 0x0C
+#define EXTI_SWIER 0x10
+#define EXTI_PR 0x14
+
+#define TYPE_STM32F4XX_EXTI "stm32f4xx-exti"
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F4xxExtiState, STM32F4XX_EXTI)
+
+#define NUM_GPIO_EVENT_IN_LINES 16
+#define NUM_INTERRUPT_OUT_LINES 16
+
+struct STM32F4xxExtiState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t exti_imr;
+ uint32_t exti_emr;
+ uint32_t exti_rtsr;
+ uint32_t exti_ftsr;
+ uint32_t exti_swier;
+ uint32_t exti_pr;
+
+ qemu_irq irq[NUM_INTERRUPT_OUT_LINES];
+};
+
+#endif
diff --git a/include/hw/misc/stm32f4xx_syscfg.h b/include/hw/misc/stm32f4xx_syscfg.h
new file mode 100644
index 0000000000..9fce67f4b4
--- /dev/null
+++ b/include/hw/misc/stm32f4xx_syscfg.h
@@ -0,0 +1,60 @@
+/*
+ * STM32F4xx SYSCFG
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_STM32F4XX_SYSCFG_H
+#define HW_STM32F4XX_SYSCFG_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define SYSCFG_MEMRMP 0x00
+#define SYSCFG_PMC 0x04
+#define SYSCFG_EXTICR1 0x08
+#define SYSCFG_EXTICR2 0x0C
+#define SYSCFG_EXTICR3 0x10
+#define SYSCFG_EXTICR4 0x14
+#define SYSCFG_CMPCR 0x20
+
+#define TYPE_STM32F4XX_SYSCFG "stm32f4xx-syscfg"
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F4xxSyscfgState, STM32F4XX_SYSCFG)
+
+#define SYSCFG_NUM_EXTICR 4
+
+struct STM32F4xxSyscfgState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+
+ uint32_t syscfg_memrmp;
+ uint32_t syscfg_pmc;
+ uint32_t syscfg_exticr[SYSCFG_NUM_EXTICR];
+ uint32_t syscfg_cmpcr;
+
+ qemu_irq irq;
+ qemu_irq gpio_out[16];
+};
+
+#endif
diff --git a/include/hw/misc/stm32l4x5_exti.h b/include/hw/misc/stm32l4x5_exti.h
new file mode 100644
index 0000000000..be961d2f01
--- /dev/null
+++ b/include/hw/misc/stm32l4x5_exti.h
@@ -0,0 +1,51 @@
+/*
+ * STM32L4x5 EXTI (Extended interrupts and events controller)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * This work is based on the stm32f4xx_exti by Alistair Francis.
+ * Original code is licensed under the MIT License:
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ */
+
+/*
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html
+ */
+
+#ifndef HW_STM32L4X5_EXTI_H
+#define HW_STM32L4X5_EXTI_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_EXTI "stm32l4x5-exti"
+OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5ExtiState, STM32L4X5_EXTI)
+
+#define EXTI_NUM_INTERRUPT_OUT_LINES 40
+#define EXTI_NUM_REGISTER 2
+
+struct Stm32l4x5ExtiState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t imr[EXTI_NUM_REGISTER];
+ uint32_t emr[EXTI_NUM_REGISTER];
+ uint32_t rtsr[EXTI_NUM_REGISTER];
+ uint32_t ftsr[EXTI_NUM_REGISTER];
+ uint32_t swier[EXTI_NUM_REGISTER];
+ uint32_t pr[EXTI_NUM_REGISTER];
+
+ qemu_irq irq[EXTI_NUM_INTERRUPT_OUT_LINES];
+};
+
+#endif
diff --git a/include/hw/misc/stm32l4x5_rcc.h b/include/hw/misc/stm32l4x5_rcc.h
new file mode 100644
index 0000000000..0fbfba5c40
--- /dev/null
+++ b/include/hw/misc/stm32l4x5_rcc.h
@@ -0,0 +1,239 @@
+/*
+ * STM32L4X5 RCC (Reset and clock control)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ *
+ * Inspired by the BCM2835 CPRMAN clock manager by Luc Michel.
+ */
+
+#ifndef HW_STM32L4X5_RCC_H
+#define HW_STM32L4X5_RCC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_RCC "stm32l4x5-rcc"
+OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5RccState, STM32L4X5_RCC)
+
+/* In the Stm32l4x5 clock tree, mux have at most 7 sources */
+#define RCC_NUM_CLOCK_MUX_SRC 7
+
+typedef enum PllCommonChannels {
+ RCC_PLL_COMMON_CHANNEL_P = 0,
+ RCC_PLL_COMMON_CHANNEL_Q = 1,
+ RCC_PLL_COMMON_CHANNEL_R = 2,
+
+ RCC_NUM_CHANNEL_PLL_OUT = 3
+} PllCommonChannels;
+
+/* NB: Prescaler are assimilated to mux with one source and one output */
+typedef enum RccClockMux {
+ /* Internal muxes that arent't exposed publicly to other peripherals */
+ RCC_CLOCK_MUX_SYSCLK,
+ RCC_CLOCK_MUX_PLL_INPUT,
+ RCC_CLOCK_MUX_HCLK,
+ RCC_CLOCK_MUX_PCLK1,
+ RCC_CLOCK_MUX_PCLK2,
+ RCC_CLOCK_MUX_HSE_OVER_32,
+ RCC_CLOCK_MUX_LCD_AND_RTC_COMMON,
+
+ /* Muxes with a publicly available output */
+ RCC_CLOCK_MUX_CORTEX_REFCLK,
+ RCC_CLOCK_MUX_USART1,
+ RCC_CLOCK_MUX_USART2,
+ RCC_CLOCK_MUX_USART3,
+ RCC_CLOCK_MUX_UART4,
+ RCC_CLOCK_MUX_UART5,
+ RCC_CLOCK_MUX_LPUART1,
+ RCC_CLOCK_MUX_I2C1,
+ RCC_CLOCK_MUX_I2C2,
+ RCC_CLOCK_MUX_I2C3,
+ RCC_CLOCK_MUX_LPTIM1,
+ RCC_CLOCK_MUX_LPTIM2,
+ RCC_CLOCK_MUX_SWPMI1,
+ RCC_CLOCK_MUX_MCO,
+ RCC_CLOCK_MUX_LSCO,
+ RCC_CLOCK_MUX_DFSDM1,
+ RCC_CLOCK_MUX_ADC,
+ RCC_CLOCK_MUX_CLK48,
+ RCC_CLOCK_MUX_SAI1,
+ RCC_CLOCK_MUX_SAI2,
+
+ /*
+ * Mux that have only one input and one output assigned to as peripheral.
+ * They could be direct lines but it is simpler
+ * to use the same logic for all outputs.
+ */
+ /* - AHB1 */
+ RCC_CLOCK_MUX_TSC,
+ RCC_CLOCK_MUX_CRC,
+ RCC_CLOCK_MUX_FLASH,
+ RCC_CLOCK_MUX_DMA2,
+ RCC_CLOCK_MUX_DMA1,
+
+ /* - AHB2 */
+ RCC_CLOCK_MUX_RNG,
+ RCC_CLOCK_MUX_AES,
+ RCC_CLOCK_MUX_OTGFS,
+ RCC_CLOCK_MUX_GPIOA,
+ RCC_CLOCK_MUX_GPIOB,
+ RCC_CLOCK_MUX_GPIOC,
+ RCC_CLOCK_MUX_GPIOD,
+ RCC_CLOCK_MUX_GPIOE,
+ RCC_CLOCK_MUX_GPIOF,
+ RCC_CLOCK_MUX_GPIOG,
+ RCC_CLOCK_MUX_GPIOH,
+
+ /* - AHB3 */
+ RCC_CLOCK_MUX_QSPI,
+ RCC_CLOCK_MUX_FMC,
+
+ /* - APB1 */
+ RCC_CLOCK_MUX_OPAMP,
+ RCC_CLOCK_MUX_DAC1,
+ RCC_CLOCK_MUX_PWR,
+ RCC_CLOCK_MUX_CAN1,
+ RCC_CLOCK_MUX_SPI3,
+ RCC_CLOCK_MUX_SPI2,
+ RCC_CLOCK_MUX_WWDG,
+ RCC_CLOCK_MUX_LCD,
+ RCC_CLOCK_MUX_TIM7,
+ RCC_CLOCK_MUX_TIM6,
+ RCC_CLOCK_MUX_TIM5,
+ RCC_CLOCK_MUX_TIM4,
+ RCC_CLOCK_MUX_TIM3,
+ RCC_CLOCK_MUX_TIM2,
+
+ /* - APB2 */
+ RCC_CLOCK_MUX_TIM17,
+ RCC_CLOCK_MUX_TIM16,
+ RCC_CLOCK_MUX_TIM15,
+ RCC_CLOCK_MUX_TIM8,
+ RCC_CLOCK_MUX_SPI1,
+ RCC_CLOCK_MUX_TIM1,
+ RCC_CLOCK_MUX_SDMMC1,
+ RCC_CLOCK_MUX_FW,
+ RCC_CLOCK_MUX_SYSCFG,
+
+ /* - BDCR */
+ RCC_CLOCK_MUX_RTC,
+
+ /* - OTHER */
+ RCC_CLOCK_MUX_CORTEX_FCLK,
+
+ RCC_NUM_CLOCK_MUX
+} RccClockMux;
+
+typedef enum RccPll {
+ RCC_PLL_PLL,
+ RCC_PLL_PLLSAI1,
+ RCC_PLL_PLLSAI2,
+
+ RCC_NUM_PLL
+} RccPll;
+
+typedef struct RccClockMuxState {
+ DeviceState parent_obj;
+
+ RccClockMux id;
+ Clock *srcs[RCC_NUM_CLOCK_MUX_SRC];
+ Clock *out;
+ bool enabled;
+ uint32_t src;
+ uint32_t multiplier;
+ uint32_t divider;
+
+ /*
+ * Used by clock srcs update callback to retrieve both the clock and the
+ * source number.
+ */
+ struct RccClockMuxState *backref[RCC_NUM_CLOCK_MUX_SRC];
+} RccClockMuxState;
+
+typedef struct RccPllState {
+ DeviceState parent_obj;
+
+ RccPll id;
+ Clock *in;
+ uint32_t vco_multiplier;
+ Clock *channels[RCC_NUM_CHANNEL_PLL_OUT];
+ /* Global pll enabled flag */
+ bool enabled;
+ /* 'enabled' refers to the runtime configuration */
+ bool channel_enabled[RCC_NUM_CHANNEL_PLL_OUT];
+ /*
+ * 'exists' refers to the physical configuration
+ * It should only be set at pll initialization.
+ * e.g. pllsai2 doesn't have a Q output.
+ */
+ bool channel_exists[RCC_NUM_CHANNEL_PLL_OUT];
+ uint32_t channel_divider[RCC_NUM_CHANNEL_PLL_OUT];
+} RccPllState;
+
+struct Stm32l4x5RccState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t cr;
+ uint32_t icscr;
+ uint32_t cfgr;
+ uint32_t pllcfgr;
+ uint32_t pllsai1cfgr;
+ uint32_t pllsai2cfgr;
+ uint32_t cier;
+ uint32_t cifr;
+ uint32_t ahb1rstr;
+ uint32_t ahb2rstr;
+ uint32_t ahb3rstr;
+ uint32_t apb1rstr1;
+ uint32_t apb1rstr2;
+ uint32_t apb2rstr;
+ uint32_t ahb1enr;
+ uint32_t ahb2enr;
+ uint32_t ahb3enr;
+ uint32_t apb1enr1;
+ uint32_t apb1enr2;
+ uint32_t apb2enr;
+ uint32_t ahb1smenr;
+ uint32_t ahb2smenr;
+ uint32_t ahb3smenr;
+ uint32_t apb1smenr1;
+ uint32_t apb1smenr2;
+ uint32_t apb2smenr;
+ uint32_t ccipr;
+ uint32_t bdcr;
+ uint32_t csr;
+
+ /* Clock sources */
+ Clock *gnd;
+ Clock *hsi16_rc;
+ Clock *msi_rc;
+ Clock *hse;
+ Clock *lsi_rc;
+ Clock *lse_crystal;
+ Clock *sai1_extclk;
+ Clock *sai2_extclk;
+
+ /* PLLs */
+ RccPllState plls[RCC_NUM_PLL];
+
+ /* Muxes ~= outputs */
+ RccClockMuxState clock_muxes[RCC_NUM_CLOCK_MUX];
+
+ qemu_irq irq;
+ uint64_t hse_frequency;
+ uint64_t sai1_extclk_frequency;
+ uint64_t sai2_extclk_frequency;
+};
+
+#endif /* HW_STM32L4X5_RCC_H */
diff --git a/include/hw/misc/stm32l4x5_rcc_internals.h b/include/hw/misc/stm32l4x5_rcc_internals.h
new file mode 100644
index 0000000000..ff1c834f69
--- /dev/null
+++ b/include/hw/misc/stm32l4x5_rcc_internals.h
@@ -0,0 +1,1042 @@
+/*
+ * STM32L4X5 RCC (Reset and clock control)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ *
+ * Inspired by the BCM2835 CPRMAN clock manager implementation by Luc Michel.
+ */
+
+#ifndef HW_STM32L4X5_RCC_INTERNALS_H
+#define HW_STM32L4X5_RCC_INTERNALS_H
+
+#include "hw/registerfields.h"
+#include "hw/misc/stm32l4x5_rcc.h"
+
+#define TYPE_RCC_CLOCK_MUX "stm32l4x5-rcc-clock-mux"
+#define TYPE_RCC_PLL "stm32l4x5-rcc-pll"
+
+OBJECT_DECLARE_SIMPLE_TYPE(RccClockMuxState, RCC_CLOCK_MUX)
+OBJECT_DECLARE_SIMPLE_TYPE(RccPllState, RCC_PLL)
+
+/* Register map */
+REG32(CR, 0x00)
+ FIELD(CR, PLLSAI2RDY, 29, 1)
+ FIELD(CR, PLLSAI2ON, 28, 1)
+ FIELD(CR, PLLSAI1RDY, 27, 1)
+ FIELD(CR, PLLSAI1ON, 26, 1)
+ FIELD(CR, PLLRDY, 25, 1)
+ FIELD(CR, PLLON, 24, 1)
+ FIELD(CR, CSSON, 19, 1)
+ FIELD(CR, HSEBYP, 18, 1)
+ FIELD(CR, HSERDY, 17, 1)
+ FIELD(CR, HSEON, 16, 1)
+ FIELD(CR, HSIASFS, 11, 1)
+ FIELD(CR, HSIRDY, 10, 1)
+ FIELD(CR, HSIKERON, 9, 1)
+ FIELD(CR, HSION, 8, 1)
+ FIELD(CR, MSIRANGE, 4, 4)
+ FIELD(CR, MSIRGSEL, 3, 1)
+ FIELD(CR, MSIPLLEN, 2, 1)
+ FIELD(CR, MSIRDY, 1, 1)
+ FIELD(CR, MSION, 0, 1)
+REG32(ICSCR, 0x04)
+ FIELD(ICSCR, HSITRIM, 24, 7)
+ FIELD(ICSCR, HSICAL, 16, 8)
+ FIELD(ICSCR, MSITRIM, 8, 8)
+ FIELD(ICSCR, MSICAL, 0, 8)
+REG32(CFGR, 0x08)
+ FIELD(CFGR, MCOPRE, 28, 3)
+ /* MCOSEL[2:0] only for STM32L475xx/476xx/486xx devices */
+ FIELD(CFGR, MCOSEL, 24, 3)
+ FIELD(CFGR, STOPWUCK, 15, 1)
+ FIELD(CFGR, PPRE2, 11, 3)
+ FIELD(CFGR, PPRE1, 8, 3)
+ FIELD(CFGR, HPRE, 4, 4)
+ FIELD(CFGR, SWS, 2, 2)
+ FIELD(CFGR, SW, 0, 2)
+REG32(PLLCFGR, 0x0C)
+ FIELD(PLLCFGR, PLLPDIV, 27, 5)
+ FIELD(PLLCFGR, PLLR, 25, 2)
+ FIELD(PLLCFGR, PLLREN, 24, 1)
+ FIELD(PLLCFGR, PLLQ, 21, 2)
+ FIELD(PLLCFGR, PLLQEN, 20, 1)
+ FIELD(PLLCFGR, PLLP, 17, 1)
+ FIELD(PLLCFGR, PLLPEN, 16, 1)
+ FIELD(PLLCFGR, PLLN, 8, 7)
+ FIELD(PLLCFGR, PLLM, 4, 3)
+ FIELD(PLLCFGR, PLLSRC, 0, 2)
+REG32(PLLSAI1CFGR, 0x10)
+ FIELD(PLLSAI1CFGR, PLLSAI1PDIV, 27, 5)
+ FIELD(PLLSAI1CFGR, PLLSAI1R, 25, 2)
+ FIELD(PLLSAI1CFGR, PLLSAI1REN, 24, 1)
+ FIELD(PLLSAI1CFGR, PLLSAI1Q, 21, 2)
+ FIELD(PLLSAI1CFGR, PLLSAI1QEN, 20, 1)
+ FIELD(PLLSAI1CFGR, PLLSAI1P, 17, 1)
+ FIELD(PLLSAI1CFGR, PLLSAI1PEN, 16, 1)
+ FIELD(PLLSAI1CFGR, PLLSAI1N, 8, 7)
+REG32(PLLSAI2CFGR, 0x14)
+ FIELD(PLLSAI2CFGR, PLLSAI2PDIV, 27, 5)
+ FIELD(PLLSAI2CFGR, PLLSAI2R, 25, 2)
+ FIELD(PLLSAI2CFGR, PLLSAI2REN, 24, 1)
+ FIELD(PLLSAI2CFGR, PLLSAI2Q, 21, 2)
+ FIELD(PLLSAI2CFGR, PLLSAI2QEN, 20, 1)
+ FIELD(PLLSAI2CFGR, PLLSAI2P, 17, 1)
+ FIELD(PLLSAI2CFGR, PLLSAI2PEN, 16, 1)
+ FIELD(PLLSAI2CFGR, PLLSAI2N, 8, 7)
+REG32(CIER, 0x18)
+ /* HSI48RDYIE: only on STM32L496xx/4A6xx devices */
+ FIELD(CIER, LSECSSIE, 9, 1)
+ FIELD(CIER, PLLSAI2RDYIE, 7, 1)
+ FIELD(CIER, PLLSAI1RDYIE, 6, 1)
+ FIELD(CIER, PLLRDYIE, 5, 1)
+ FIELD(CIER, HSERDYIE, 4, 1)
+ FIELD(CIER, HSIRDYIE, 3, 1)
+ FIELD(CIER, MSIRDYIE, 2, 1)
+ FIELD(CIER, LSERDYIE, 1, 1)
+ FIELD(CIER, LSIRDYIE, 0, 1)
+REG32(CIFR, 0x1C)
+ /* HSI48RDYF: only on STM32L496xx/4A6xx devices */
+ FIELD(CIFR, LSECSSF, 9, 1)
+ FIELD(CIFR, CSSF, 8, 1)
+ FIELD(CIFR, PLLSAI2RDYF, 7, 1)
+ FIELD(CIFR, PLLSAI1RDYF, 6, 1)
+ FIELD(CIFR, PLLRDYF, 5, 1)
+ FIELD(CIFR, HSERDYF, 4, 1)
+ FIELD(CIFR, HSIRDYF, 3, 1)
+ FIELD(CIFR, MSIRDYF, 2, 1)
+ FIELD(CIFR, LSERDYF, 1, 1)
+ FIELD(CIFR, LSIRDYF, 0, 1)
+REG32(CICR, 0x20)
+ /* HSI48RDYC: only on STM32L496xx/4A6xx devices */
+ FIELD(CICR, LSECSSC, 9, 1)
+ FIELD(CICR, CSSC, 8, 1)
+ FIELD(CICR, PLLSAI2RDYC, 7, 1)
+ FIELD(CICR, PLLSAI1RDYC, 6, 1)
+ FIELD(CICR, PLLRDYC, 5, 1)
+ FIELD(CICR, HSERDYC, 4, 1)
+ FIELD(CICR, HSIRDYC, 3, 1)
+ FIELD(CICR, MSIRDYC, 2, 1)
+ FIELD(CICR, LSERDYC, 1, 1)
+ FIELD(CICR, LSIRDYC, 0, 1)
+REG32(AHB1RSTR, 0x28)
+REG32(AHB2RSTR, 0x2C)
+REG32(AHB3RSTR, 0x30)
+REG32(APB1RSTR1, 0x38)
+REG32(APB1RSTR2, 0x3C)
+REG32(APB2RSTR, 0x40)
+REG32(AHB1ENR, 0x48)
+ /* DMA2DEN: reserved for STM32L475xx */
+ FIELD(AHB1ENR, TSCEN, 16, 1)
+ FIELD(AHB1ENR, CRCEN, 12, 1)
+ FIELD(AHB1ENR, FLASHEN, 8, 1)
+ FIELD(AHB1ENR, DMA2EN, 1, 1)
+ FIELD(AHB1ENR, DMA1EN, 0, 1)
+REG32(AHB2ENR, 0x4C)
+ FIELD(AHB2ENR, RNGEN, 18, 1)
+ /* HASHEN: reserved for STM32L475xx */
+ FIELD(AHB2ENR, AESEN, 16, 1)
+ /* DCMIEN: reserved for STM32L475xx */
+ FIELD(AHB2ENR, ADCEN, 13, 1)
+ FIELD(AHB2ENR, OTGFSEN, 12, 1)
+ /* GPIOIEN: reserved for STM32L475xx */
+ FIELD(AHB2ENR, GPIOHEN, 7, 1)
+ FIELD(AHB2ENR, GPIOGEN, 6, 1)
+ FIELD(AHB2ENR, GPIOFEN, 5, 1)
+ FIELD(AHB2ENR, GPIOEEN, 4, 1)
+ FIELD(AHB2ENR, GPIODEN, 3, 1)
+ FIELD(AHB2ENR, GPIOCEN, 2, 1)
+ FIELD(AHB2ENR, GPIOBEN, 1, 1)
+ FIELD(AHB2ENR, GPIOAEN, 0, 1)
+REG32(AHB3ENR, 0x50)
+ FIELD(AHB3ENR, QSPIEN, 8, 1)
+ FIELD(AHB3ENR, FMCEN, 0, 1)
+REG32(APB1ENR1, 0x58)
+ FIELD(APB1ENR1, LPTIM1EN, 31, 1)
+ FIELD(APB1ENR1, OPAMPEN, 30, 1)
+ FIELD(APB1ENR1, DAC1EN, 29, 1)
+ FIELD(APB1ENR1, PWREN, 28, 1)
+ FIELD(APB1ENR1, CAN2EN, 26, 1)
+ FIELD(APB1ENR1, CAN1EN, 25, 1)
+ /* CRSEN: reserved for STM32L475xx */
+ FIELD(APB1ENR1, I2C3EN, 23, 1)
+ FIELD(APB1ENR1, I2C2EN, 22, 1)
+ FIELD(APB1ENR1, I2C1EN, 21, 1)
+ FIELD(APB1ENR1, UART5EN, 20, 1)
+ FIELD(APB1ENR1, UART4EN, 19, 1)
+ FIELD(APB1ENR1, USART3EN, 18, 1)
+ FIELD(APB1ENR1, USART2EN, 17, 1)
+ FIELD(APB1ENR1, SPI3EN, 15, 1)
+ FIELD(APB1ENR1, SPI2EN, 14, 1)
+ FIELD(APB1ENR1, WWDGEN, 11, 1)
+ /* RTCAPBEN: reserved for STM32L475xx */
+ FIELD(APB1ENR1, LCDEN, 9, 1)
+ FIELD(APB1ENR1, TIM7EN, 5, 1)
+ FIELD(APB1ENR1, TIM6EN, 4, 1)
+ FIELD(APB1ENR1, TIM5EN, 3, 1)
+ FIELD(APB1ENR1, TIM4EN, 2, 1)
+ FIELD(APB1ENR1, TIM3EN, 1, 1)
+ FIELD(APB1ENR1, TIM2EN, 0, 1)
+REG32(APB1ENR2, 0x5C)
+ FIELD(APB1ENR2, LPTIM2EN, 5, 1)
+ FIELD(APB1ENR2, SWPMI1EN, 2, 1)
+ /* I2C4EN: reserved for STM32L475xx */
+ FIELD(APB1ENR2, LPUART1EN, 0, 1)
+REG32(APB2ENR, 0x60)
+ FIELD(APB2ENR, DFSDM1EN, 24, 1)
+ FIELD(APB2ENR, SAI2EN, 22, 1)
+ FIELD(APB2ENR, SAI1EN, 21, 1)
+ FIELD(APB2ENR, TIM17EN, 18, 1)
+ FIELD(APB2ENR, TIM16EN, 17, 1)
+ FIELD(APB2ENR, TIM15EN, 16, 1)
+ FIELD(APB2ENR, USART1EN, 14, 1)
+ FIELD(APB2ENR, TIM8EN, 13, 1)
+ FIELD(APB2ENR, SPI1EN, 12, 1)
+ FIELD(APB2ENR, TIM1EN, 11, 1)
+ FIELD(APB2ENR, SDMMC1EN, 10, 1)
+ FIELD(APB2ENR, FWEN, 7, 1)
+ FIELD(APB2ENR, SYSCFGEN, 0, 1)
+REG32(AHB1SMENR, 0x68)
+REG32(AHB2SMENR, 0x6C)
+REG32(AHB3SMENR, 0x70)
+REG32(APB1SMENR1, 0x78)
+REG32(APB1SMENR2, 0x7C)
+REG32(APB2SMENR, 0x80)
+REG32(CCIPR, 0x88)
+ FIELD(CCIPR, DFSDM1SEL, 31, 1)
+ FIELD(CCIPR, SWPMI1SEL, 30, 1)
+ FIELD(CCIPR, ADCSEL, 28, 2)
+ FIELD(CCIPR, CLK48SEL, 26, 2)
+ FIELD(CCIPR, SAI2SEL, 24, 2)
+ FIELD(CCIPR, SAI1SEL, 22, 2)
+ FIELD(CCIPR, LPTIM2SEL, 20, 2)
+ FIELD(CCIPR, LPTIM1SEL, 18, 2)
+ FIELD(CCIPR, I2C3SEL, 16, 2)
+ FIELD(CCIPR, I2C2SEL, 14, 2)
+ FIELD(CCIPR, I2C1SEL, 12, 2)
+ FIELD(CCIPR, LPUART1SEL, 10, 2)
+ FIELD(CCIPR, UART5SEL, 8, 2)
+ FIELD(CCIPR, UART4SEL, 6, 2)
+ FIELD(CCIPR, USART3SEL, 4, 2)
+ FIELD(CCIPR, USART2SEL, 2, 2)
+ FIELD(CCIPR, USART1SEL, 0, 2)
+REG32(BDCR, 0x90)
+ FIELD(BDCR, LSCOSEL, 25, 1)
+ FIELD(BDCR, LSCOEN, 24, 1)
+ FIELD(BDCR, BDRST, 16, 1)
+ FIELD(BDCR, RTCEN, 15, 1)
+ FIELD(BDCR, RTCSEL, 8, 2)
+ FIELD(BDCR, LSECSSD, 6, 1)
+ FIELD(BDCR, LSECSSON, 5, 1)
+ FIELD(BDCR, LSEDRV, 3, 2)
+ FIELD(BDCR, LSEBYP, 2, 1)
+ FIELD(BDCR, LSERDY, 1, 1)
+ FIELD(BDCR, LSEON, 0, 1)
+REG32(CSR, 0x94)
+ FIELD(CSR, LPWRRSTF, 31, 1)
+ FIELD(CSR, WWDGRSTF, 30, 1)
+ FIELD(CSR, IWWGRSTF, 29, 1)
+ FIELD(CSR, SFTRSTF, 28, 1)
+ FIELD(CSR, BORRSTF, 27, 1)
+ FIELD(CSR, PINRSTF, 26, 1)
+ FIELD(CSR, OBLRSTF, 25, 1)
+ FIELD(CSR, FWRSTF, 24, 1)
+ FIELD(CSR, RMVF, 23, 1)
+ FIELD(CSR, MSISRANGE, 8, 4)
+ FIELD(CSR, LSIRDY, 1, 1)
+ FIELD(CSR, LSION, 0, 1)
+/* CRRCR and CCIPR2 registers are present on L496/L4A6 devices only. */
+
+/* Read Only masks to prevent writes in unauthorized bits */
+#define CR_READ_ONLY_MASK (R_CR_PLLSAI2RDY_MASK | \
+ R_CR_PLLSAI1RDY_MASK | \
+ R_CR_PLLRDY_MASK | \
+ R_CR_HSERDY_MASK | \
+ R_CR_HSIRDY_MASK | \
+ R_CR_MSIRDY_MASK)
+#define CR_READ_SET_MASK (R_CR_CSSON_MASK | R_CR_MSIRGSEL_MASK)
+#define ICSCR_READ_ONLY_MASK (R_ICSCR_HSICAL_MASK | R_ICSCR_MSICAL_MASK)
+#define CFGR_READ_ONLY_MASK (R_CFGR_SWS_MASK)
+#define CIFR_READ_ONLY_MASK (R_CIFR_LSECSSF_MASK | \
+ R_CIFR_CSSF_MASK | \
+ R_CIFR_PLLSAI2RDYF_MASK | \
+ R_CIFR_PLLSAI1RDYF_MASK | \
+ R_CIFR_PLLRDYF_MASK | \
+ R_CIFR_HSERDYF_MASK | \
+ R_CIFR_HSIRDYF_MASK | \
+ R_CIFR_MSIRDYF_MASK | \
+ R_CIFR_LSERDYF_MASK | \
+ R_CIFR_LSIRDYF_MASK)
+#define CIFR_IRQ_MASK CIFR_READ_ONLY_MASK
+#define APB2ENR_READ_SET_MASK (R_APB2ENR_FWEN_MASK)
+#define BDCR_READ_ONLY_MASK (R_BDCR_LSECSSD_MASK | R_BDCR_LSERDY_MASK)
+#define CSR_READ_ONLY_MASK (R_CSR_LPWRRSTF_MASK | \
+ R_CSR_WWDGRSTF_MASK | \
+ R_CSR_IWWGRSTF_MASK | \
+ R_CSR_SFTRSTF_MASK | \
+ R_CSR_BORRSTF_MASK | \
+ R_CSR_PINRSTF_MASK | \
+ R_CSR_OBLRSTF_MASK | \
+ R_CSR_FWRSTF_MASK | \
+ R_CSR_LSIRDY_MASK)
+
+/* Pll Channels */
+enum PllChannels {
+ RCC_PLL_CHANNEL_PLLSAI3CLK = 0,
+ RCC_PLL_CHANNEL_PLL48M1CLK = 1,
+ RCC_PLL_CHANNEL_PLLCLK = 2,
+};
+
+enum PllSai1Channels {
+ RCC_PLLSAI1_CHANNEL_PLLSAI1CLK = 0,
+ RCC_PLLSAI1_CHANNEL_PLL48M2CLK = 1,
+ RCC_PLLSAI1_CHANNEL_PLLADC1CLK = 2,
+};
+
+enum PllSai2Channels {
+ RCC_PLLSAI2_CHANNEL_PLLSAI2CLK = 0,
+ /* No Q channel */
+ RCC_PLLSAI2_CHANNEL_PLLADC2CLK = 2,
+};
+
+typedef enum RccClockMuxSource {
+ RCC_CLOCK_MUX_SRC_GND = 0,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_HSE,
+ RCC_CLOCK_MUX_SRC_MSI,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ RCC_CLOCK_MUX_SRC_SAI1_EXTCLK,
+ RCC_CLOCK_MUX_SRC_SAI2_EXTCLK,
+ RCC_CLOCK_MUX_SRC_PLL,
+ RCC_CLOCK_MUX_SRC_PLLSAI1,
+ RCC_CLOCK_MUX_SRC_PLLSAI2,
+ RCC_CLOCK_MUX_SRC_PLLSAI3,
+ RCC_CLOCK_MUX_SRC_PLL48M1,
+ RCC_CLOCK_MUX_SRC_PLL48M2,
+ RCC_CLOCK_MUX_SRC_PLLADC1,
+ RCC_CLOCK_MUX_SRC_PLLADC2,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HCLK,
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ RCC_CLOCK_MUX_SRC_HSE_OVER_32,
+ RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON,
+
+ RCC_CLOCK_MUX_SRC_NUMBER,
+} RccClockMuxSource;
+
+/* PLL init info */
+typedef struct PllInitInfo {
+ const char *name;
+
+ const char *channel_name[RCC_NUM_CHANNEL_PLL_OUT];
+ bool channel_exists[RCC_NUM_CHANNEL_PLL_OUT];
+ uint32_t default_channel_divider[RCC_NUM_CHANNEL_PLL_OUT];
+
+ RccClockMuxSource src_mapping[RCC_NUM_CLOCK_MUX_SRC];
+} PllInitInfo;
+
+static const PllInitInfo PLL_INIT_INFO[] = {
+ [RCC_PLL_PLL] = {
+ .name = "pll",
+ .channel_name = {
+ "pllsai3clk",
+ "pll48m1clk",
+ "pllclk"
+ },
+ .channel_exists = {
+ true, true, true
+ },
+ /* From PLLCFGR register documentation */
+ .default_channel_divider = {
+ 7, 2, 2
+ }
+ },
+ [RCC_PLL_PLLSAI1] = {
+ .name = "pllsai1",
+ .channel_name = {
+ "pllsai1clk",
+ "pll48m2clk",
+ "plladc1clk"
+ },
+ .channel_exists = {
+ true, true, true
+ },
+ /* From PLLSAI1CFGR register documentation */
+ .default_channel_divider = {
+ 7, 2, 2
+ }
+ },
+ [RCC_PLL_PLLSAI2] = {
+ .name = "pllsai2",
+ .channel_name = {
+ "pllsai2clk",
+ NULL,
+ "plladc2clk"
+ },
+ .channel_exists = {
+ true, false, true
+ },
+ /* From PLLSAI2CFGR register documentation */
+ .default_channel_divider = {
+ 7, 0, 2
+ }
+ }
+};
+
+static inline void set_pll_init_info(RccPllState *pll,
+ RccPll id)
+{
+ int i;
+
+ pll->id = id;
+ pll->vco_multiplier = 1;
+ for (i = 0; i < RCC_NUM_CHANNEL_PLL_OUT; i++) {
+ pll->channel_enabled[i] = false;
+ pll->channel_exists[i] = PLL_INIT_INFO[id].channel_exists[i];
+ pll->channel_divider[i] = PLL_INIT_INFO[id].default_channel_divider[i];
+ }
+}
+
+/* Clock mux init info */
+typedef struct ClockMuxInitInfo {
+ const char *name;
+
+ uint32_t multiplier;
+ uint32_t divider;
+ bool enabled;
+ /* If this is true, the clock will not be exposed outside of the device */
+ bool hidden;
+
+ RccClockMuxSource src_mapping[RCC_NUM_CLOCK_MUX_SRC];
+} ClockMuxInitInfo;
+
+#define FILL_DEFAULT_FACTOR \
+ .multiplier = 1, \
+ .divider = 1
+
+#define FILL_DEFAULT_INIT_ENABLED \
+ FILL_DEFAULT_FACTOR, \
+ .enabled = true
+
+#define FILL_DEFAULT_INIT_DISABLED \
+ FILL_DEFAULT_FACTOR, \
+ .enabled = false
+
+
+static const ClockMuxInitInfo CLOCK_MUX_INIT_INFO[] = {
+ [RCC_CLOCK_MUX_SYSCLK] = {
+ .name = "sysclk",
+ /* Same mapping as: CFGR_SW */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_MSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_HSE,
+ RCC_CLOCK_MUX_SRC_PLL,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_PLL_INPUT] = {
+ .name = "pll-input",
+ /* Same mapping as: PLLCFGR_PLLSRC */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_MSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_HSE,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_HCLK] = {
+ .name = "hclk",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_PCLK1] = {
+ .name = "pclk1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HCLK,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_PCLK2] = {
+ .name = "pclk2",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HCLK,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_HSE_OVER_32] = {
+ .name = "hse-divided-by-32",
+ .multiplier = 1,
+ .divider = 32,
+ .enabled = true,
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HSE,
+ },
+ .hidden = true,
+ },
+ [RCC_CLOCK_MUX_LCD_AND_RTC_COMMON] = {
+ .name = "lcd-and-rtc-common-mux",
+ /* Same mapping as: BDCR_RTCSEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_GND,
+ RCC_CLOCK_MUX_SRC_LSE,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_HSE_OVER_32,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ /* From now on, muxes with a publicly available output */
+ [RCC_CLOCK_MUX_CORTEX_REFCLK] = {
+ .name = "cortex-refclk",
+ .multiplier = 1,
+ /* REFCLK is always HCLK/8 */
+ .divider = 8,
+ .enabled = true,
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HCLK,
+ }
+ },
+ [RCC_CLOCK_MUX_USART1] = {
+ .name = "usart1",
+ /* Same mapping as: CCIPR_USART1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_USART2] = {
+ .name = "usart2",
+ /* Same mapping as: CCIPR_USART2SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_USART3] = {
+ .name = "usart3",
+ /* Same mapping as: CCIPR_USART3SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_UART4] = {
+ .name = "uart4",
+ /* Same mapping as: CCIPR_UART4SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_UART5] = {
+ .name = "uart5",
+ /* Same mapping as: CCIPR_UART5SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LPUART1] = {
+ .name = "lpuart1",
+ /* Same mapping as: CCIPR_LPUART1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_I2C1] = {
+ .name = "i2c1",
+ /* Same mapping as: CCIPR_I2C1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_I2C2] = {
+ .name = "i2c2",
+ /* Same mapping as: CCIPR_I2C2SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_I2C3] = {
+ .name = "i2c3",
+ /* Same mapping as: CCIPR_I2C3SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LPTIM1] = {
+ .name = "lptim1",
+ /* Same mapping as: CCIPR_LPTIM1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LPTIM2] = {
+ .name = "lptim2",
+ /* Same mapping as: CCIPR_LPTIM2SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SWPMI1] = {
+ .name = "swpmi1",
+ /* Same mapping as: CCIPR_SWPMI1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_HSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_MCO] = {
+ .name = "mco",
+ /* Same mapping as: CFGR_MCOSEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_MSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_HSE,
+ RCC_CLOCK_MUX_SRC_PLL,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LSCO] = {
+ .name = "lsco",
+ /* Same mapping as: BDCR_LSCOSEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_DFSDM1] = {
+ .name = "dfsdm1",
+ /* Same mapping as: CCIPR_DFSDM1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_ADC] = {
+ .name = "adc",
+ /* Same mapping as: CCIPR_ADCSEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_GND,
+ RCC_CLOCK_MUX_SRC_PLLADC1,
+ RCC_CLOCK_MUX_SRC_PLLADC2,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_CLK48] = {
+ .name = "clk48",
+ /* Same mapping as: CCIPR_CLK48SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_GND,
+ RCC_CLOCK_MUX_SRC_PLL48M2,
+ RCC_CLOCK_MUX_SRC_PLL48M1,
+ RCC_CLOCK_MUX_SRC_MSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SAI2] = {
+ .name = "sai2",
+ /* Same mapping as: CCIPR_SAI2SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PLLSAI1,
+ RCC_CLOCK_MUX_SRC_PLLSAI2,
+ RCC_CLOCK_MUX_SRC_PLLSAI3,
+ RCC_CLOCK_MUX_SRC_SAI2_EXTCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SAI1] = {
+ .name = "sai1",
+ /* Same mapping as: CCIPR_SAI1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PLLSAI1,
+ RCC_CLOCK_MUX_SRC_PLLSAI2,
+ RCC_CLOCK_MUX_SRC_PLLSAI3,
+ RCC_CLOCK_MUX_SRC_SAI1_EXTCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ /* From now on, these muxes only have one valid source */
+ [RCC_CLOCK_MUX_TSC] = {
+ .name = "tsc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_CRC] = {
+ .name = "crc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_FLASH] = {
+ .name = "flash",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_DMA2] = {
+ .name = "dma2",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_DMA1] = {
+ .name = "dma1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_RNG] = {
+ .name = "rng",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_AES] = {
+ .name = "aes",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_OTGFS] = {
+ .name = "otgfs",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOA] = {
+ .name = "gpioa",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOB] = {
+ .name = "gpiob",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOC] = {
+ .name = "gpioc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOD] = {
+ .name = "gpiod",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOE] = {
+ .name = "gpioe",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOF] = {
+ .name = "gpiof",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOG] = {
+ .name = "gpiog",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOH] = {
+ .name = "gpioh",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_QSPI] = {
+ .name = "qspi",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_FMC] = {
+ .name = "fmc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_OPAMP] = {
+ .name = "opamp",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_DAC1] = {
+ .name = "dac1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_PWR] = {
+ .name = "pwr",
+ /*
+ * PWREN is in the APB1ENR1 register,
+ * but PWR uses SYSCLK according to the clock tree.
+ */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_CAN1] = {
+ .name = "can1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SPI3] = {
+ .name = "spi3",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SPI2] = {
+ .name = "spi2",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_WWDG] = {
+ .name = "wwdg",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LCD] = {
+ .name = "lcd",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM7] = {
+ .name = "tim7",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM6] = {
+ .name = "tim6",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM5] = {
+ .name = "tim5",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM4] = {
+ .name = "tim4",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM3] = {
+ .name = "tim3",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM2] = {
+ .name = "tim2",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM17] = {
+ .name = "tim17",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM16] = {
+ .name = "tim16",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM15] = {
+ .name = "tim15",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM8] = {
+ .name = "tim8",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SPI1] = {
+ .name = "spi1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM1] = {
+ .name = "tim1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SDMMC1] = {
+ .name = "sdmmc1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_FW] = {
+ .name = "fw",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SYSCFG] = {
+ .name = "syscfg",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_RTC] = {
+ .name = "rtc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_CORTEX_FCLK] = {
+ .name = "cortex-fclk",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HCLK,
+ },
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+};
+
+static inline void set_clock_mux_init_info(RccClockMuxState *mux,
+ RccClockMux id)
+{
+ mux->id = id;
+ mux->multiplier = CLOCK_MUX_INIT_INFO[id].multiplier;
+ mux->divider = CLOCK_MUX_INIT_INFO[id].divider;
+ mux->enabled = CLOCK_MUX_INIT_INFO[id].enabled;
+ /*
+ * Every peripheral has the first source of their source list as
+ * as their default source.
+ */
+ mux->src = 0;
+}
+
+#endif /* HW_STM32L4X5_RCC_INTERNALS_H */
diff --git a/include/hw/misc/stm32l4x5_syscfg.h b/include/hw/misc/stm32l4x5_syscfg.h
new file mode 100644
index 0000000000..23bb564150
--- /dev/null
+++ b/include/hw/misc/stm32l4x5_syscfg.h
@@ -0,0 +1,53 @@
+/*
+ * STM32L4x5 SYSCFG (System Configuration Controller)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * This work is based on the stm32f4xx_syscfg by Alistair Francis.
+ * Original code is licensed under the MIT License:
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ */
+
+/*
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html
+ */
+
+#ifndef HW_STM32L4X5_SYSCFG_H
+#define HW_STM32L4X5_SYSCFG_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "hw/gpio/stm32l4x5_gpio.h"
+
+#define TYPE_STM32L4X5_SYSCFG "stm32l4x5-syscfg"
+OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5SyscfgState, STM32L4X5_SYSCFG)
+
+#define SYSCFG_NUM_EXTICR 4
+
+struct Stm32l4x5SyscfgState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t memrmp;
+ uint32_t cfgr1;
+ uint32_t exticr[SYSCFG_NUM_EXTICR];
+ uint32_t scsr;
+ uint32_t cfgr2;
+ uint32_t swpr;
+ uint32_t skr;
+ uint32_t swpr2;
+
+ qemu_irq gpio_out[GPIO_NUM_PINS];
+};
+
+#endif
diff --git a/include/hw/misc/tz-mpc.h b/include/hw/misc/tz-mpc.h
index 6f15945410..74d5d822cf 100644
--- a/include/hw/misc/tz-mpc.h
+++ b/include/hw/misc/tz-mpc.h
@@ -32,15 +32,15 @@
#define TZ_MPC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_TZ_MPC "tz-mpc"
-#define TZ_MPC(obj) OBJECT_CHECK(TZMPC, (obj), TYPE_TZ_MPC)
+OBJECT_DECLARE_SIMPLE_TYPE(TZMPC, TZ_MPC)
#define TZ_NUM_PORTS 16
#define TYPE_TZ_MPC_IOMMU_MEMORY_REGION "tz-mpc-iommu-memory-region"
-typedef struct TZMPC TZMPC;
struct TZMPC {
/*< private >*/
diff --git a/include/hw/misc/tz-msc.h b/include/hw/misc/tz-msc.h
index 116b96ae9b..77cc7f2404 100644
--- a/include/hw/misc/tz-msc.h
+++ b/include/hw/misc/tz-msc.h
@@ -52,11 +52,12 @@
#include "hw/sysbus.h"
#include "target/arm/idau.h"
+#include "qom/object.h"
#define TYPE_TZ_MSC "tz-msc"
-#define TZ_MSC(obj) OBJECT_CHECK(TZMSC, (obj), TYPE_TZ_MSC)
+OBJECT_DECLARE_SIMPLE_TYPE(TZMSC, TZ_MSC)
-typedef struct TZMSC {
+struct TZMSC {
/*< private >*/
SysBusDevice parent_obj;
@@ -74,6 +75,6 @@ typedef struct TZMSC {
AddressSpace downstream_as;
MemoryRegion upstream;
IDAUInterface *idau;
-} TZMSC;
+};
#endif
diff --git a/include/hw/misc/tz-ppc.h b/include/hw/misc/tz-ppc.h
index fc8b806e4d..021d671b29 100644
--- a/include/hw/misc/tz-ppc.h
+++ b/include/hw/misc/tz-ppc.h
@@ -38,7 +38,13 @@
*
* QEMU interface:
* + sysbus MMIO regions 0..15: MemoryRegions defining the upstream end
- * of each of the 16 ports of the PPC
+ * of each of the 16 ports of the PPC. When a port is unused (i.e. no
+ * downstream MemoryRegion is connected to it) at the end of the 0..15
+ * range then no sysbus MMIO region is created for its upstream. When an
+ * unused port lies in the middle of the range with other used ports at
+ * higher port numbers, a dummy MMIO region is created to ensure that
+ * port N's upstream is always sysbus MMIO region N. Dummy regions should
+ * not be mapped, and will assert if any access is made to them.
* + Property "port[0..15]": MemoryRegion defining the downstream device(s)
* for each of the 16 ports of the PPC
* + Named GPIO inputs "cfg_nonsec[0..15]": set to 1 if the port should be
@@ -60,13 +66,13 @@
#define TZ_PPC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_TZ_PPC "tz-ppc"
-#define TZ_PPC(obj) OBJECT_CHECK(TZPPC, (obj), TYPE_TZ_PPC)
+OBJECT_DECLARE_SIMPLE_TYPE(TZPPC, TZ_PPC)
#define TZ_NUM_PORTS 16
-typedef struct TZPPC TZPPC;
typedef struct TZPPCPort {
TZPPC *ppc;
diff --git a/include/hw/misc/unimp.h b/include/hw/misc/unimp.h
index 2a291ca42d..518d627dc5 100644
--- a/include/hw/misc/unimp.h
+++ b/include/hw/misc/unimp.h
@@ -8,19 +8,22 @@
#ifndef HW_MISC_UNIMP_H
#define HW_MISC_UNIMP_H
+#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
+#include "qapi/error.h"
+#include "qom/object.h"
#define TYPE_UNIMPLEMENTED_DEVICE "unimplemented-device"
-#define UNIMPLEMENTED_DEVICE(obj) \
- OBJECT_CHECK(UnimplementedDeviceState, (obj), TYPE_UNIMPLEMENTED_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(UnimplementedDeviceState, UNIMPLEMENTED_DEVICE)
-typedef struct {
+struct UnimplementedDeviceState {
SysBusDevice parent_obj;
MemoryRegion iomem;
+ unsigned offset_fmt_width;
char *name;
uint64_t size;
-} UnimplementedDeviceState;
+};
/**
* create_unimplemented_device: create and map a dummy device
@@ -39,11 +42,11 @@ static inline void create_unimplemented_device(const char *name,
hwaddr base,
hwaddr size)
{
- DeviceState *dev = qdev_create(NULL, TYPE_UNIMPLEMENTED_DEVICE);
+ DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE);
qdev_prop_set_string(dev, "name", name);
qdev_prop_set_uint64(dev, "size", size);
- qdev_init_nofail(dev);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map_overlap(SYS_BUS_DEVICE(dev), 0, base, -1000);
}
diff --git a/include/hw/misc/virt_ctrl.h b/include/hw/misc/virt_ctrl.h
new file mode 100644
index 0000000000..81346cf017
--- /dev/null
+++ b/include/hw/misc/virt_ctrl.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Virt system Controller
+ */
+
+#ifndef VIRT_CTRL_H
+#define VIRT_CTRL_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_VIRT_CTRL "virt-ctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtCtrlState, VIRT_CTRL)
+
+struct VirtCtrlState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t irq_enabled;
+};
+
+#endif
diff --git a/include/hw/misc/vmcoreinfo.h b/include/hw/misc/vmcoreinfo.h
index 0d11578059..0b7b55d400 100644
--- a/include/hw/misc/vmcoreinfo.h
+++ b/include/hw/misc/vmcoreinfo.h
@@ -12,20 +12,23 @@
#ifndef VMCOREINFO_H
#define VMCOREINFO_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
#include "standard-headers/linux/qemu_fw_cfg.h"
+#include "qom/object.h"
#define VMCOREINFO_DEVICE "vmcoreinfo"
-#define VMCOREINFO(obj) OBJECT_CHECK(VMCoreInfoState, (obj), VMCOREINFO_DEVICE)
+typedef struct VMCoreInfoState VMCoreInfoState;
+DECLARE_INSTANCE_CHECKER(VMCoreInfoState, VMCOREINFO,
+ VMCOREINFO_DEVICE)
typedef struct fw_cfg_vmcoreinfo FWCfgVMCoreInfo;
-typedef struct VMCoreInfoState {
- DeviceClass parent_obj;
+struct VMCoreInfoState {
+ DeviceState parent_obj;
bool has_vmcoreinfo;
FWCfgVMCoreInfo vmcoreinfo;
-} VMCoreInfoState;
+};
/* returns NULL unless there is exactly one device */
static inline VMCoreInfoState *vmcoreinfo_find(void)
diff --git a/include/hw/misc/xlnx-cfi-if.h b/include/hw/misc/xlnx-cfi-if.h
new file mode 100644
index 0000000000..f9bd12292d
--- /dev/null
+++ b/include/hw/misc/xlnx-cfi-if.h
@@ -0,0 +1,59 @@
+/*
+ * Xilinx CFI interface
+ *
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef XLNX_CFI_IF_H
+#define XLNX_CFI_IF_H 1
+
+#include "qemu/help-texts.h"
+#include "hw/hw.h"
+#include "qom/object.h"
+
+#define TYPE_XLNX_CFI_IF "xlnx-cfi-if"
+typedef struct XlnxCfiIfClass XlnxCfiIfClass;
+DECLARE_CLASS_CHECKERS(XlnxCfiIfClass, XLNX_CFI_IF, TYPE_XLNX_CFI_IF)
+
+#define XLNX_CFI_IF(obj) \
+ INTERFACE_CHECK(XlnxCfiIf, (obj), TYPE_XLNX_CFI_IF)
+
+typedef enum {
+ PACKET_TYPE_CFU = 0x52,
+ PACKET_TYPE_CFRAME = 0xA1,
+} xlnx_cfi_packet_type;
+
+typedef enum {
+ CFRAME_FAR = 1,
+ CFRAME_SFR = 2,
+ CFRAME_FDRI = 4,
+ CFRAME_CMD = 6,
+} xlnx_cfi_reg_addr;
+
+typedef struct XlnxCfiPacket {
+ uint8_t reg_addr;
+ uint32_t data[4];
+} XlnxCfiPacket;
+
+typedef struct XlnxCfiIf {
+ Object Parent;
+} XlnxCfiIf;
+
+typedef struct XlnxCfiIfClass {
+ InterfaceClass parent;
+
+ void (*cfi_transfer_packet)(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt);
+} XlnxCfiIfClass;
+
+/**
+ * Transfer a XlnxCfiPacket.
+ *
+ * @cfi_if: the object implementing this interface
+ * @XlnxCfiPacket: a pointer to the XlnxCfiPacket to transfer
+ */
+void xlnx_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt);
+
+#endif /* XLNX_CFI_IF_H */
diff --git a/include/hw/misc/xlnx-versal-cframe-reg.h b/include/hw/misc/xlnx-versal-cframe-reg.h
new file mode 100644
index 0000000000..83f6a07744
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-cframe-reg.h
@@ -0,0 +1,303 @@
+/*
+ * QEMU model of the Configuration Frame Control module
+ *
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * References:
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/CFRAME_REG-Module
+ */
+#ifndef HW_MISC_XLNX_VERSAL_CFRAME_REG_H
+#define HW_MISC_XLNX_VERSAL_CFRAME_REG_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/misc/xlnx-cfi-if.h"
+#include "hw/misc/xlnx-versal-cfu.h"
+#include "qemu/fifo32.h"
+
+#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx-cframe-reg"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameReg, XLNX_VERSAL_CFRAME_REG)
+
+#define TYPE_XLNX_VERSAL_CFRAME_BCAST_REG "xlnx.cframe-bcast-reg"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameBcastReg,
+ XLNX_VERSAL_CFRAME_BCAST_REG)
+
+/*
+ * The registers in this module are 128 bits wide but it is ok to write
+ * and read them through 4 sequential 32 bit accesses (address[3:2] = 0,
+ * 1, 2, 3).
+ */
+REG32(CRC0, 0x0)
+ FIELD(CRC, CRC, 0, 32)
+REG32(CRC1, 0x4)
+REG32(CRC2, 0x8)
+REG32(CRC3, 0xc)
+REG32(FAR0, 0x10)
+ FIELD(FAR0, SEGMENT, 23, 2)
+ FIELD(FAR0, BLOCKTYPE, 20, 3)
+ FIELD(FAR0, FRAME_ADDR, 0, 20)
+REG32(FAR1, 0x14)
+REG32(FAR2, 0x18)
+REG32(FAR3, 0x1c)
+REG32(FAR_SFR0, 0x20)
+ FIELD(FAR_SFR0, BLOCKTYPE, 20, 3)
+ FIELD(FAR_SFR0, FRAME_ADDR, 0, 20)
+REG32(FAR_SFR1, 0x24)
+REG32(FAR_SFR2, 0x28)
+REG32(FAR_SFR3, 0x2c)
+REG32(FDRI0, 0x40)
+REG32(FDRI1, 0x44)
+REG32(FDRI2, 0x48)
+REG32(FDRI3, 0x4c)
+REG32(FRCNT0, 0x50)
+ FIELD(FRCNT0, FRCNT, 0, 32)
+REG32(FRCNT1, 0x54)
+REG32(FRCNT2, 0x58)
+REG32(FRCNT3, 0x5c)
+REG32(CMD0, 0x60)
+ FIELD(CMD0, CMD, 0, 5)
+REG32(CMD1, 0x64)
+REG32(CMD2, 0x68)
+REG32(CMD3, 0x6c)
+REG32(CR_MASK0, 0x70)
+REG32(CR_MASK1, 0x74)
+REG32(CR_MASK2, 0x78)
+REG32(CR_MASK3, 0x7c)
+REG32(CTL0, 0x80)
+ FIELD(CTL, PER_FRAME_CRC, 0, 1)
+REG32(CTL1, 0x84)
+REG32(CTL2, 0x88)
+REG32(CTL3, 0x8c)
+REG32(CFRM_ISR0, 0x150)
+ FIELD(CFRM_ISR0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_ISR0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_ISR0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_ISR0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_ISR0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_ISR0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_ISR0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_ISR0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_ISR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_ISR0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_ISR0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_ISR0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_ISR0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_ISR0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_ISR0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_ISR0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_ISR0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_ISR0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_ISR0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_ISR0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_ISR0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_ISR1, 0x154)
+REG32(CFRM_ISR2, 0x158)
+REG32(CFRM_ISR3, 0x15c)
+REG32(CFRM_IMR0, 0x160)
+ FIELD(CFRM_IMR0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_IMR0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_IMR0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_IMR0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_IMR0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_IMR0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_IMR0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_IMR0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_IMR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_IMR0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_IMR0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_IMR0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_IMR0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_IMR0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_IMR0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_IMR0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_IMR0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_IMR0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_IMR0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_IMR0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_IMR0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_IMR1, 0x164)
+REG32(CFRM_IMR2, 0x168)
+REG32(CFRM_IMR3, 0x16c)
+REG32(CFRM_IER0, 0x170)
+ FIELD(CFRM_IER0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_IER0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_IER0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_IER0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_IER0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_IER0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_IER0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_IER0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_IER0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_IER0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_IER0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_IER0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_IER0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_IER0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_IER0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_IER0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_IER0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_IER0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_IER0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_IER0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_IER0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_IER1, 0x174)
+REG32(CFRM_IER2, 0x178)
+REG32(CFRM_IER3, 0x17c)
+REG32(CFRM_IDR0, 0x180)
+ FIELD(CFRM_IDR0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_IDR0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_IDR0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_IDR0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_IDR0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_IDR0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_IDR0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_IDR0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_IDR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_IDR0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_IDR0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_IDR0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_IDR0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_IDR0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_IDR0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_IDR0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_IDR0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_IDR0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_IDR0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_IDR0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_IDR0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_IDR1, 0x184)
+REG32(CFRM_IDR2, 0x188)
+REG32(CFRM_IDR3, 0x18c)
+REG32(CFRM_ITR0, 0x190)
+ FIELD(CFRM_ITR0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_ITR0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_ITR0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_ITR0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_ITR0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_ITR0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_ITR0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_ITR0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_ITR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_ITR0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_ITR0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_ITR0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_ITR0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_ITR0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_ITR0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_ITR0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_ITR0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_ITR0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_ITR0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_ITR0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_ITR0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_ITR1, 0x194)
+REG32(CFRM_ITR2, 0x198)
+REG32(CFRM_ITR3, 0x19c)
+REG32(SEU_SYNDRM00, 0x1a0)
+REG32(SEU_SYNDRM01, 0x1a4)
+REG32(SEU_SYNDRM02, 0x1a8)
+REG32(SEU_SYNDRM03, 0x1ac)
+REG32(SEU_SYNDRM10, 0x1b0)
+REG32(SEU_SYNDRM11, 0x1b4)
+REG32(SEU_SYNDRM12, 0x1b8)
+REG32(SEU_SYNDRM13, 0x1bc)
+REG32(SEU_SYNDRM20, 0x1c0)
+REG32(SEU_SYNDRM21, 0x1c4)
+REG32(SEU_SYNDRM22, 0x1c8)
+REG32(SEU_SYNDRM23, 0x1cc)
+REG32(SEU_SYNDRM30, 0x1d0)
+REG32(SEU_SYNDRM31, 0x1d4)
+REG32(SEU_SYNDRM32, 0x1d8)
+REG32(SEU_SYNDRM33, 0x1dc)
+REG32(SEU_VIRTUAL_SYNDRM0, 0x1e0)
+REG32(SEU_VIRTUAL_SYNDRM1, 0x1e4)
+REG32(SEU_VIRTUAL_SYNDRM2, 0x1e8)
+REG32(SEU_VIRTUAL_SYNDRM3, 0x1ec)
+REG32(SEU_CRC0, 0x1f0)
+REG32(SEU_CRC1, 0x1f4)
+REG32(SEU_CRC2, 0x1f8)
+REG32(SEU_CRC3, 0x1fc)
+REG32(CFRAME_FAR_BOT0, 0x200)
+REG32(CFRAME_FAR_BOT1, 0x204)
+REG32(CFRAME_FAR_BOT2, 0x208)
+REG32(CFRAME_FAR_BOT3, 0x20c)
+REG32(CFRAME_FAR_TOP0, 0x210)
+REG32(CFRAME_FAR_TOP1, 0x214)
+REG32(CFRAME_FAR_TOP2, 0x218)
+REG32(CFRAME_FAR_TOP3, 0x21c)
+REG32(LAST_FRAME_BOT0, 0x220)
+ FIELD(LAST_FRAME_BOT0, BLOCKTYPE1_LAST_FRAME_LSB, 20, 12)
+ FIELD(LAST_FRAME_BOT0, BLOCKTYPE0_LAST_FRAME, 0, 20)
+REG32(LAST_FRAME_BOT1, 0x224)
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE3_LAST_FRAME_LSB, 28, 4)
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE2_LAST_FRAME, 8, 20)
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE1_LAST_FRAME_MSB, 0, 8)
+REG32(LAST_FRAME_BOT2, 0x228)
+ FIELD(LAST_FRAME_BOT2, BLOCKTYPE3_LAST_FRAME_MSB, 0, 16)
+REG32(LAST_FRAME_BOT3, 0x22c)
+REG32(LAST_FRAME_TOP0, 0x230)
+ FIELD(LAST_FRAME_TOP0, BLOCKTYPE5_LAST_FRAME_LSB, 20, 12)
+ FIELD(LAST_FRAME_TOP0, BLOCKTYPE4_LAST_FRAME, 0, 20)
+REG32(LAST_FRAME_TOP1, 0x234)
+ FIELD(LAST_FRAME_TOP1, BLOCKTYPE6_LAST_FRAME, 8, 20)
+ FIELD(LAST_FRAME_TOP1, BLOCKTYPE5_LAST_FRAME_MSB, 0, 8)
+REG32(LAST_FRAME_TOP2, 0x238)
+REG32(LAST_FRAME_TOP3, 0x23c)
+
+#define CFRAME_REG_R_MAX (R_LAST_FRAME_TOP3 + 1)
+
+#define FRAME_NUM_QWORDS 25
+#define FRAME_NUM_WORDS (FRAME_NUM_QWORDS * 4) /* 25 * 128 bits */
+
+typedef struct XlnxCFrame {
+ uint32_t data[FRAME_NUM_WORDS];
+} XlnxCFrame;
+
+struct XlnxVersalCFrameReg {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ MemoryRegion iomem_fdri;
+ qemu_irq irq_cfrm_imr;
+
+ /* 128-bit wfifo. */
+ uint32_t wfifo[WFIFO_SZ];
+
+ uint32_t regs[CFRAME_REG_R_MAX];
+ RegisterInfo regs_info[CFRAME_REG_R_MAX];
+
+ bool rowon;
+ bool wcfg;
+ bool rcfg;
+
+ GTree *cframes;
+ Fifo32 new_f_data;
+
+ struct {
+ XlnxCfiIf *cfu_fdro;
+ uint32_t blktype_num_frames[7];
+ } cfg;
+ bool row_configured;
+};
+
+struct XlnxVersalCFrameBcastReg {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem_reg;
+ MemoryRegion iomem_fdri;
+
+ /* 128-bit wfifo. */
+ uint32_t wfifo[WFIFO_SZ];
+
+ struct {
+ XlnxCfiIf *cframe[15];
+ } cfg;
+};
+
+#endif
diff --git a/include/hw/misc/xlnx-versal-cfu.h b/include/hw/misc/xlnx-versal-cfu.h
new file mode 100644
index 0000000000..3de3ee4923
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-cfu.h
@@ -0,0 +1,258 @@
+/*
+ * QEMU model of the CFU Configuration Unit.
+ *
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * References:
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/CFU_CSR-Module
+ */
+#ifndef HW_MISC_XLNX_VERSAL_CFU_APB_H
+#define HW_MISC_XLNX_VERSAL_CFU_APB_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/misc/xlnx-cfi-if.h"
+#include "qemu/fifo32.h"
+
+#define TYPE_XLNX_VERSAL_CFU_APB "xlnx-versal-cfu-apb"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUAPB, XLNX_VERSAL_CFU_APB)
+
+#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx-versal-cfu-fdro"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUFDRO, XLNX_VERSAL_CFU_FDRO)
+
+#define TYPE_XLNX_VERSAL_CFU_SFR "xlnx-versal-cfu-sfr"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUSFR, XLNX_VERSAL_CFU_SFR)
+
+REG32(CFU_ISR, 0x0)
+ FIELD(CFU_ISR, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_ISR, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_ISR, SLVERR, 7, 1)
+ FIELD(CFU_ISR, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_ISR, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_ISR, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_ISR, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_ISR, CRC32_ERROR, 2, 1)
+ FIELD(CFU_ISR, CRC8_ERROR, 1, 1)
+ FIELD(CFU_ISR, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_IMR, 0x4)
+ FIELD(CFU_IMR, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_IMR, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_IMR, SLVERR, 7, 1)
+ FIELD(CFU_IMR, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_IMR, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_IMR, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_IMR, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_IMR, CRC32_ERROR, 2, 1)
+ FIELD(CFU_IMR, CRC8_ERROR, 1, 1)
+ FIELD(CFU_IMR, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_IER, 0x8)
+ FIELD(CFU_IER, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_IER, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_IER, SLVERR, 7, 1)
+ FIELD(CFU_IER, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_IER, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_IER, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_IER, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_IER, CRC32_ERROR, 2, 1)
+ FIELD(CFU_IER, CRC8_ERROR, 1, 1)
+ FIELD(CFU_IER, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_IDR, 0xc)
+ FIELD(CFU_IDR, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_IDR, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_IDR, SLVERR, 7, 1)
+ FIELD(CFU_IDR, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_IDR, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_IDR, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_IDR, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_IDR, CRC32_ERROR, 2, 1)
+ FIELD(CFU_IDR, CRC8_ERROR, 1, 1)
+ FIELD(CFU_IDR, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_ITR, 0x10)
+ FIELD(CFU_ITR, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_ITR, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_ITR, SLVERR, 7, 1)
+ FIELD(CFU_ITR, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_ITR, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_ITR, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_ITR, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_ITR, CRC32_ERROR, 2, 1)
+ FIELD(CFU_ITR, CRC8_ERROR, 1, 1)
+ FIELD(CFU_ITR, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_PROTECT, 0x14)
+ FIELD(CFU_PROTECT, ACTIVE, 0, 1)
+REG32(CFU_FGCR, 0x18)
+ FIELD(CFU_FGCR, GCLK_CAL, 14, 1)
+ FIELD(CFU_FGCR, SC_HBC_TRIGGER, 13, 1)
+ FIELD(CFU_FGCR, GLOW, 12, 1)
+ FIELD(CFU_FGCR, GPWRDWN, 11, 1)
+ FIELD(CFU_FGCR, GCAP, 10, 1)
+ FIELD(CFU_FGCR, GSCWE, 9, 1)
+ FIELD(CFU_FGCR, GHIGH_B, 8, 1)
+ FIELD(CFU_FGCR, GMC_B, 7, 1)
+ FIELD(CFU_FGCR, GWE, 6, 1)
+ FIELD(CFU_FGCR, GRESTORE, 5, 1)
+ FIELD(CFU_FGCR, GTS_CFG_B, 4, 1)
+ FIELD(CFU_FGCR, GLUTMASK, 3, 1)
+ FIELD(CFU_FGCR, EN_GLOBS_B, 2, 1)
+ FIELD(CFU_FGCR, EOS, 1, 1)
+ FIELD(CFU_FGCR, INIT_COMPLETE, 0, 1)
+REG32(CFU_CTL, 0x1c)
+ FIELD(CFU_CTL, GSR_GSC, 15, 1)
+ FIELD(CFU_CTL, SLVERR_EN, 14, 1)
+ FIELD(CFU_CTL, CRC32_RESET, 13, 1)
+ FIELD(CFU_CTL, AXI_ERROR_EN, 12, 1)
+ FIELD(CFU_CTL, FLUSH_AXI, 11, 1)
+ FIELD(CFU_CTL, SSI_PER_SLR_PR, 10, 1)
+ FIELD(CFU_CTL, GCAP_CLK_EN, 9, 1)
+ FIELD(CFU_CTL, STATUS_SYNC_DISABLE, 8, 1)
+ FIELD(CFU_CTL, IGNORE_CFI_ERROR, 7, 1)
+ FIELD(CFU_CTL, CFRAME_DISABLE, 6, 1)
+ FIELD(CFU_CTL, QWORD_CNT_RESET, 5, 1)
+ FIELD(CFU_CTL, CRC8_DISABLE, 4, 1)
+ FIELD(CFU_CTL, CRC32_CHECK, 3, 1)
+ FIELD(CFU_CTL, DECOMPRESS, 2, 1)
+ FIELD(CFU_CTL, SEU_GO, 1, 1)
+ FIELD(CFU_CTL, CFI_LOCAL_RESET, 0, 1)
+REG32(CFU_CRAM_RW, 0x20)
+ FIELD(CFU_CRAM_RW, RFIFO_AFULL_DEPTH, 18, 9)
+ FIELD(CFU_CRAM_RW, RD_WAVE_CNT_LEFT, 12, 6)
+ FIELD(CFU_CRAM_RW, RD_WAVE_CNT, 6, 6)
+ FIELD(CFU_CRAM_RW, WR_WAVE_CNT, 0, 6)
+REG32(CFU_MASK, 0x28)
+REG32(CFU_CRC_EXPECT, 0x2c)
+REG32(CFU_CFRAME_LEFT_T0, 0x60)
+ FIELD(CFU_CFRAME_LEFT_T0, NUM, 0, 20)
+REG32(CFU_CFRAME_LEFT_T1, 0x64)
+ FIELD(CFU_CFRAME_LEFT_T1, NUM, 0, 20)
+REG32(CFU_CFRAME_LEFT_T2, 0x68)
+ FIELD(CFU_CFRAME_LEFT_T2, NUM, 0, 20)
+REG32(CFU_ROW_RANGE, 0x6c)
+ FIELD(CFU_ROW_RANGE, HALF_FSR, 5, 1)
+ FIELD(CFU_ROW_RANGE, NUM, 0, 5)
+REG32(CFU_STATUS, 0x100)
+ FIELD(CFU_STATUS, SEU_WRITE_ERROR, 30, 1)
+ FIELD(CFU_STATUS, FRCNT_ERROR, 29, 1)
+ FIELD(CFU_STATUS, RSVD_ERROR, 28, 1)
+ FIELD(CFU_STATUS, FDRO_ERROR, 27, 1)
+ FIELD(CFU_STATUS, FDRI_ERROR, 26, 1)
+ FIELD(CFU_STATUS, FDRI_READ_ERROR, 25, 1)
+ FIELD(CFU_STATUS, READ_FDRI_ERROR, 24, 1)
+ FIELD(CFU_STATUS, READ_SFR_ERROR, 23, 1)
+ FIELD(CFU_STATUS, READ_STREAM_ERROR, 22, 1)
+ FIELD(CFU_STATUS, UNKNOWN_STREAM_PKT, 21, 1)
+ FIELD(CFU_STATUS, USR_GTS, 20, 1)
+ FIELD(CFU_STATUS, USR_GSR, 19, 1)
+ FIELD(CFU_STATUS, AXI_BAD_WSTRB, 18, 1)
+ FIELD(CFU_STATUS, AXI_BAD_AR_SIZE, 17, 1)
+ FIELD(CFU_STATUS, AXI_BAD_AW_SIZE, 16, 1)
+ FIELD(CFU_STATUS, AXI_BAD_ARADDR, 15, 1)
+ FIELD(CFU_STATUS, AXI_BAD_AWADDR, 14, 1)
+ FIELD(CFU_STATUS, SCAN_CLEAR_PASS, 13, 1)
+ FIELD(CFU_STATUS, HC_SEC_ERROR, 12, 1)
+ FIELD(CFU_STATUS, GHIGH_B_ISHIGH, 11, 1)
+ FIELD(CFU_STATUS, GHIGH_B_ISLOW, 10, 1)
+ FIELD(CFU_STATUS, GMC_B_ISHIGH, 9, 1)
+ FIELD(CFU_STATUS, GMC_B_ISLOW, 8, 1)
+ FIELD(CFU_STATUS, GPWRDWN_B_ISHIGH, 7, 1)
+ FIELD(CFU_STATUS, CFI_SEU_CRC_ERROR, 6, 1)
+ FIELD(CFU_STATUS, CFI_SEU_ECC_ERROR, 5, 1)
+ FIELD(CFU_STATUS, CFI_SEU_HEARTBEAT, 4, 1)
+ FIELD(CFU_STATUS, SCAN_CLEAR_DONE, 3, 1)
+ FIELD(CFU_STATUS, HC_COMPLETE, 2, 1)
+ FIELD(CFU_STATUS, CFI_CFRAME_BUSY, 1, 1)
+ FIELD(CFU_STATUS, CFU_STREAM_BUSY, 0, 1)
+REG32(CFU_INTERNAL_STATUS, 0x104)
+ FIELD(CFU_INTERNAL_STATUS, SSI_EOS, 22, 1)
+ FIELD(CFU_INTERNAL_STATUS, SSI_GWE, 21, 1)
+ FIELD(CFU_INTERNAL_STATUS, RFIFO_EMPTY, 20, 1)
+ FIELD(CFU_INTERNAL_STATUS, RFIFO_FULL, 19, 1)
+ FIELD(CFU_INTERNAL_STATUS, SEL_SFR, 18, 1)
+ FIELD(CFU_INTERNAL_STATUS, STREAM_CFRAME, 17, 1)
+ FIELD(CFU_INTERNAL_STATUS, FDRI_PHASE, 16, 1)
+ FIELD(CFU_INTERNAL_STATUS, CFI_PIPE_EN, 15, 1)
+ FIELD(CFU_INTERNAL_STATUS, AWFIFO_DCNT, 10, 5)
+ FIELD(CFU_INTERNAL_STATUS, WFIFO_DCNT, 5, 5)
+ FIELD(CFU_INTERNAL_STATUS, REPAIR_BUSY, 4, 1)
+ FIELD(CFU_INTERNAL_STATUS, TRIMU_BUSY, 3, 1)
+ FIELD(CFU_INTERNAL_STATUS, TRIMB_BUSY, 2, 1)
+ FIELD(CFU_INTERNAL_STATUS, HCLEANR_BUSY, 1, 1)
+ FIELD(CFU_INTERNAL_STATUS, HCLEAN_BUSY, 0, 1)
+REG32(CFU_QWORD_CNT, 0x108)
+REG32(CFU_CRC_LIVE, 0x10c)
+REG32(CFU_PENDING_READ_CNT, 0x110)
+ FIELD(CFU_PENDING_READ_CNT, NUM, 0, 25)
+REG32(CFU_FDRI_CNT, 0x114)
+REG32(CFU_ECO1, 0x118)
+REG32(CFU_ECO2, 0x11c)
+
+#define R_MAX (R_CFU_ECO2 + 1)
+
+#define NUM_STREAM 2
+#define WFIFO_SZ 4
+
+struct XlnxVersalCFUAPB {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ MemoryRegion iomem_stream[NUM_STREAM];
+ qemu_irq irq_cfu_imr;
+
+ /* 128-bit wfifo. */
+ uint32_t wfifo[WFIFO_SZ];
+
+ uint32_t regs[R_MAX];
+ RegisterInfo regs_info[R_MAX];
+
+ uint8_t fdri_row_addr;
+
+ struct {
+ XlnxCfiIf *cframe[15];
+ } cfg;
+};
+
+
+struct XlnxVersalCFUFDRO {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem_fdro;
+
+ Fifo32 fdro_data;
+};
+
+struct XlnxVersalCFUSFR {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem_sfr;
+
+ /* 128-bit wfifo. */
+ uint32_t wfifo[WFIFO_SZ];
+
+ struct {
+ XlnxVersalCFUAPB *cfu;
+ } cfg;
+};
+
+/**
+ * This is a helper function for updating a CFI data write fifo, an array of 4
+ * uint32_t and 128 bits of data that are allowed to be written through 4
+ * sequential 32 bit accesses. After the last index has been written into the
+ * write fifo (wfifo), the data is copied to and returned in a secondary fifo
+ * provided to the function (wfifo_ret), and the write fifo is cleared
+ * (zeroized).
+ *
+ * @addr: the address used when calculating the wfifo array index to update
+ * @value: the value to write into the wfifo array
+ * @wfifo: the wfifo to update
+ * @wfifo_out: will return the wfifo data when all 128 bits have been written
+ *
+ * @return: true if all 128 bits have been updated.
+ */
+bool update_wfifo(hwaddr addr, uint64_t value,
+ uint32_t *wfifo, uint32_t *wfifo_ret);
+
+#endif
diff --git a/include/hw/misc/xlnx-versal-crl.h b/include/hw/misc/xlnx-versal-crl.h
new file mode 100644
index 0000000000..dba6d3585d
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-crl.h
@@ -0,0 +1,235 @@
+/*
+ * QEMU model of the Clock-Reset-LPD (CRL).
+ *
+ * Copyright (c) 2022 Xilinx Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ */
+#ifndef HW_MISC_XLNX_VERSAL_CRL_H
+#define HW_MISC_XLNX_VERSAL_CRL_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "target/arm/cpu-qom.h"
+
+#define TYPE_XLNX_VERSAL_CRL "xlnx-versal-crl"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCRL, XLNX_VERSAL_CRL)
+
+REG32(ERR_CTRL, 0x0)
+ FIELD(ERR_CTRL, SLVERR_ENABLE, 0, 1)
+REG32(IR_STATUS, 0x4)
+ FIELD(IR_STATUS, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_MASK, 0x8)
+ FIELD(IR_MASK, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_ENABLE, 0xc)
+ FIELD(IR_ENABLE, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_DISABLE, 0x10)
+ FIELD(IR_DISABLE, ADDR_DECODE_ERR, 0, 1)
+REG32(WPROT, 0x1c)
+ FIELD(WPROT, ACTIVE, 0, 1)
+REG32(PLL_CLK_OTHER_DMN, 0x20)
+ FIELD(PLL_CLK_OTHER_DMN, APLL_BYPASS, 0, 1)
+REG32(RPLL_CTRL, 0x40)
+ FIELD(RPLL_CTRL, POST_SRC, 24, 3)
+ FIELD(RPLL_CTRL, PRE_SRC, 20, 3)
+ FIELD(RPLL_CTRL, CLKOUTDIV, 16, 2)
+ FIELD(RPLL_CTRL, FBDIV, 8, 8)
+ FIELD(RPLL_CTRL, BYPASS, 3, 1)
+ FIELD(RPLL_CTRL, RESET, 0, 1)
+REG32(RPLL_CFG, 0x44)
+ FIELD(RPLL_CFG, LOCK_DLY, 25, 7)
+ FIELD(RPLL_CFG, LOCK_CNT, 13, 10)
+ FIELD(RPLL_CFG, LFHF, 10, 2)
+ FIELD(RPLL_CFG, CP, 5, 4)
+ FIELD(RPLL_CFG, RES, 0, 4)
+REG32(RPLL_FRAC_CFG, 0x48)
+ FIELD(RPLL_FRAC_CFG, ENABLED, 31, 1)
+ FIELD(RPLL_FRAC_CFG, SEED, 22, 3)
+ FIELD(RPLL_FRAC_CFG, ALGRTHM, 19, 1)
+ FIELD(RPLL_FRAC_CFG, ORDER, 18, 1)
+ FIELD(RPLL_FRAC_CFG, DATA, 0, 16)
+REG32(PLL_STATUS, 0x50)
+ FIELD(PLL_STATUS, RPLL_STABLE, 2, 1)
+ FIELD(PLL_STATUS, RPLL_LOCK, 0, 1)
+REG32(RPLL_TO_XPD_CTRL, 0x100)
+ FIELD(RPLL_TO_XPD_CTRL, CLKACT, 25, 1)
+ FIELD(RPLL_TO_XPD_CTRL, DIVISOR0, 8, 10)
+REG32(LPD_TOP_SWITCH_CTRL, 0x104)
+ FIELD(LPD_TOP_SWITCH_CTRL, CLKACT_ADMA, 26, 1)
+ FIELD(LPD_TOP_SWITCH_CTRL, CLKACT, 25, 1)
+ FIELD(LPD_TOP_SWITCH_CTRL, DIVISOR0, 8, 10)
+ FIELD(LPD_TOP_SWITCH_CTRL, SRCSEL, 0, 3)
+REG32(LPD_LSBUS_CTRL, 0x108)
+ FIELD(LPD_LSBUS_CTRL, CLKACT, 25, 1)
+ FIELD(LPD_LSBUS_CTRL, DIVISOR0, 8, 10)
+ FIELD(LPD_LSBUS_CTRL, SRCSEL, 0, 3)
+REG32(CPU_R5_CTRL, 0x10c)
+ FIELD(CPU_R5_CTRL, CLKACT_OCM2, 28, 1)
+ FIELD(CPU_R5_CTRL, CLKACT_OCM, 27, 1)
+ FIELD(CPU_R5_CTRL, CLKACT_CORE, 26, 1)
+ FIELD(CPU_R5_CTRL, CLKACT, 25, 1)
+ FIELD(CPU_R5_CTRL, DIVISOR0, 8, 10)
+ FIELD(CPU_R5_CTRL, SRCSEL, 0, 3)
+REG32(IOU_SWITCH_CTRL, 0x114)
+ FIELD(IOU_SWITCH_CTRL, CLKACT, 25, 1)
+ FIELD(IOU_SWITCH_CTRL, DIVISOR0, 8, 10)
+ FIELD(IOU_SWITCH_CTRL, SRCSEL, 0, 3)
+REG32(GEM0_REF_CTRL, 0x118)
+ FIELD(GEM0_REF_CTRL, CLKACT_RX, 27, 1)
+ FIELD(GEM0_REF_CTRL, CLKACT_TX, 26, 1)
+ FIELD(GEM0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(GEM0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(GEM0_REF_CTRL, SRCSEL, 0, 3)
+REG32(GEM1_REF_CTRL, 0x11c)
+ FIELD(GEM1_REF_CTRL, CLKACT_RX, 27, 1)
+ FIELD(GEM1_REF_CTRL, CLKACT_TX, 26, 1)
+ FIELD(GEM1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(GEM1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(GEM1_REF_CTRL, SRCSEL, 0, 3)
+REG32(GEM_TSU_REF_CTRL, 0x120)
+ FIELD(GEM_TSU_REF_CTRL, CLKACT, 25, 1)
+ FIELD(GEM_TSU_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(GEM_TSU_REF_CTRL, SRCSEL, 0, 3)
+REG32(USB0_BUS_REF_CTRL, 0x124)
+ FIELD(USB0_BUS_REF_CTRL, CLKACT, 25, 1)
+ FIELD(USB0_BUS_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(USB0_BUS_REF_CTRL, SRCSEL, 0, 3)
+REG32(UART0_REF_CTRL, 0x128)
+ FIELD(UART0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(UART0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(UART0_REF_CTRL, SRCSEL, 0, 3)
+REG32(UART1_REF_CTRL, 0x12c)
+ FIELD(UART1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(UART1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(UART1_REF_CTRL, SRCSEL, 0, 3)
+REG32(SPI0_REF_CTRL, 0x130)
+ FIELD(SPI0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(SPI0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(SPI0_REF_CTRL, SRCSEL, 0, 3)
+REG32(SPI1_REF_CTRL, 0x134)
+ FIELD(SPI1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(SPI1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(SPI1_REF_CTRL, SRCSEL, 0, 3)
+REG32(CAN0_REF_CTRL, 0x138)
+ FIELD(CAN0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(CAN0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(CAN0_REF_CTRL, SRCSEL, 0, 3)
+REG32(CAN1_REF_CTRL, 0x13c)
+ FIELD(CAN1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(CAN1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(CAN1_REF_CTRL, SRCSEL, 0, 3)
+REG32(I2C0_REF_CTRL, 0x140)
+ FIELD(I2C0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(I2C0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(I2C0_REF_CTRL, SRCSEL, 0, 3)
+REG32(I2C1_REF_CTRL, 0x144)
+ FIELD(I2C1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(I2C1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(I2C1_REF_CTRL, SRCSEL, 0, 3)
+REG32(DBG_LPD_CTRL, 0x148)
+ FIELD(DBG_LPD_CTRL, CLKACT, 25, 1)
+ FIELD(DBG_LPD_CTRL, DIVISOR0, 8, 10)
+ FIELD(DBG_LPD_CTRL, SRCSEL, 0, 3)
+REG32(TIMESTAMP_REF_CTRL, 0x14c)
+ FIELD(TIMESTAMP_REF_CTRL, CLKACT, 25, 1)
+ FIELD(TIMESTAMP_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(TIMESTAMP_REF_CTRL, SRCSEL, 0, 3)
+REG32(CRL_SAFETY_CHK, 0x150)
+REG32(PSM_REF_CTRL, 0x154)
+ FIELD(PSM_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(PSM_REF_CTRL, SRCSEL, 0, 3)
+REG32(DBG_TSTMP_CTRL, 0x158)
+ FIELD(DBG_TSTMP_CTRL, CLKACT, 25, 1)
+ FIELD(DBG_TSTMP_CTRL, DIVISOR0, 8, 10)
+ FIELD(DBG_TSTMP_CTRL, SRCSEL, 0, 3)
+REG32(CPM_TOPSW_REF_CTRL, 0x15c)
+ FIELD(CPM_TOPSW_REF_CTRL, CLKACT, 25, 1)
+ FIELD(CPM_TOPSW_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(CPM_TOPSW_REF_CTRL, SRCSEL, 0, 3)
+REG32(USB3_DUAL_REF_CTRL, 0x160)
+ FIELD(USB3_DUAL_REF_CTRL, CLKACT, 25, 1)
+ FIELD(USB3_DUAL_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(USB3_DUAL_REF_CTRL, SRCSEL, 0, 3)
+REG32(RST_CPU_R5, 0x300)
+ FIELD(RST_CPU_R5, RESET_PGE, 4, 1)
+ FIELD(RST_CPU_R5, RESET_AMBA, 2, 1)
+ FIELD(RST_CPU_R5, RESET_CPU1, 1, 1)
+ FIELD(RST_CPU_R5, RESET_CPU0, 0, 1)
+REG32(RST_ADMA, 0x304)
+ FIELD(RST_ADMA, RESET, 0, 1)
+REG32(RST_GEM0, 0x308)
+ FIELD(RST_GEM0, RESET, 0, 1)
+REG32(RST_GEM1, 0x30c)
+ FIELD(RST_GEM1, RESET, 0, 1)
+REG32(RST_SPARE, 0x310)
+ FIELD(RST_SPARE, RESET, 0, 1)
+REG32(RST_USB0, 0x314)
+ FIELD(RST_USB0, RESET, 0, 1)
+REG32(RST_UART0, 0x318)
+ FIELD(RST_UART0, RESET, 0, 1)
+REG32(RST_UART1, 0x31c)
+ FIELD(RST_UART1, RESET, 0, 1)
+REG32(RST_SPI0, 0x320)
+ FIELD(RST_SPI0, RESET, 0, 1)
+REG32(RST_SPI1, 0x324)
+ FIELD(RST_SPI1, RESET, 0, 1)
+REG32(RST_CAN0, 0x328)
+ FIELD(RST_CAN0, RESET, 0, 1)
+REG32(RST_CAN1, 0x32c)
+ FIELD(RST_CAN1, RESET, 0, 1)
+REG32(RST_I2C0, 0x330)
+ FIELD(RST_I2C0, RESET, 0, 1)
+REG32(RST_I2C1, 0x334)
+ FIELD(RST_I2C1, RESET, 0, 1)
+REG32(RST_DBG_LPD, 0x338)
+ FIELD(RST_DBG_LPD, RPU_DBG1_RESET, 5, 1)
+ FIELD(RST_DBG_LPD, RPU_DBG0_RESET, 4, 1)
+ FIELD(RST_DBG_LPD, RESET_HSDP, 1, 1)
+ FIELD(RST_DBG_LPD, RESET, 0, 1)
+REG32(RST_GPIO, 0x33c)
+ FIELD(RST_GPIO, RESET, 0, 1)
+REG32(RST_TTC, 0x344)
+ FIELD(RST_TTC, TTC3_RESET, 3, 1)
+ FIELD(RST_TTC, TTC2_RESET, 2, 1)
+ FIELD(RST_TTC, TTC1_RESET, 1, 1)
+ FIELD(RST_TTC, TTC0_RESET, 0, 1)
+REG32(RST_TIMESTAMP, 0x348)
+ FIELD(RST_TIMESTAMP, RESET, 0, 1)
+REG32(RST_SWDT, 0x34c)
+ FIELD(RST_SWDT, RESET, 0, 1)
+REG32(RST_OCM, 0x350)
+ FIELD(RST_OCM, RESET, 0, 1)
+REG32(RST_IPI, 0x354)
+ FIELD(RST_IPI, RESET, 0, 1)
+REG32(RST_SYSMON, 0x358)
+ FIELD(RST_SYSMON, SEQ_RST, 1, 1)
+ FIELD(RST_SYSMON, CFG_RST, 0, 1)
+REG32(RST_FPD, 0x360)
+ FIELD(RST_FPD, SRST, 1, 1)
+ FIELD(RST_FPD, POR, 0, 1)
+REG32(PSM_RST_MODE, 0x370)
+ FIELD(PSM_RST_MODE, WAKEUP, 2, 1)
+ FIELD(PSM_RST_MODE, RST_MODE, 0, 2)
+
+#define CRL_R_MAX (R_PSM_RST_MODE + 1)
+
+#define RPU_MAX_CPU 2
+
+struct XlnxVersalCRL {
+ SysBusDevice parent_obj;
+ qemu_irq irq;
+
+ struct {
+ ARMCPU *cpu_r5[RPU_MAX_CPU];
+ DeviceState *adma[8];
+ DeviceState *uart[2];
+ DeviceState *gem[2];
+ DeviceState *usb;
+ } cfg;
+
+ RegisterInfoArray *reg_array;
+ uint32_t regs[CRL_R_MAX];
+ RegisterInfo regs_info[CRL_R_MAX];
+};
+#endif
diff --git a/include/hw/misc/xlnx-versal-pmc-iou-slcr.h b/include/hw/misc/xlnx-versal-pmc-iou-slcr.h
new file mode 100644
index 0000000000..0c4a4fd66d
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-pmc-iou-slcr.h
@@ -0,0 +1,79 @@
+/*
+ * Header file for the Xilinx Versal's PMC IOU SLCR
+ *
+ * Copyright (C) 2021 Xilinx Inc
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This is a model of Xilinx Versal's PMC I/O Peripheral Control and Status
+ * module documented in Versal's Technical Reference manual [1] and the Versal
+ * ACAP Register reference [2].
+ *
+ * References:
+ *
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PMC_IOP_SLCR-Module
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion for the device's registers
+ * + sysbus IRQ 0: PMC (AXI and APB) parity error interrupt detected by the PMC
+ * I/O peripherals.
+ * + sysbus IRQ 1: Device interrupt.
+ * + Named GPIO output "sd-emmc-sel[0]": Enables 0: SD mode or 1: eMMC mode on
+ * SD/eMMC controller 0.
+ * + Named GPIO output "sd-emmc-sel[1]": Enables 0: SD mode or 1: eMMC mode on
+ * SD/eMMC controller 1.
+ * + Named GPIO output "qspi-ospi-mux-sel": Selects 0: QSPI linear region or 1:
+ * OSPI linear region.
+ * + Named GPIO output "ospi-mux-sel": Selects 0: OSPI Indirect access mode or
+ * 1: OSPI direct access mode.
+ */
+
+#ifndef XLNX_VERSAL_PMC_IOU_SLCR_H
+#define XLNX_VERSAL_PMC_IOU_SLCR_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define TYPE_XILINX_VERSAL_PMC_IOU_SLCR "xlnx.versal-pmc-iou-slcr"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalPmcIouSlcr, XILINX_VERSAL_PMC_IOU_SLCR)
+
+#define XILINX_VERSAL_PMC_IOU_SLCR_R_MAX (0x828 / 4 + 1)
+
+struct XlnxVersalPmcIouSlcr {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq_parity_imr;
+ qemu_irq irq_imr;
+ qemu_irq sd_emmc_sel[2];
+ qemu_irq qspi_ospi_mux_sel;
+ qemu_irq ospi_mux_sel;
+
+ uint32_t regs[XILINX_VERSAL_PMC_IOU_SLCR_R_MAX];
+ RegisterInfo regs_info[XILINX_VERSAL_PMC_IOU_SLCR_R_MAX];
+};
+
+#endif /* XLNX_VERSAL_PMC_IOU_SLCR_H */
diff --git a/include/hw/misc/xlnx-versal-trng.h b/include/hw/misc/xlnx-versal-trng.h
new file mode 100644
index 0000000000..0bcef8a613
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-trng.h
@@ -0,0 +1,58 @@
+/*
+ * Non-crypto strength model of the True Random Number Generator
+ * in the AMD/Xilinx Versal device family.
+ *
+ * Copyright (c) 2017-2020 Xilinx Inc.
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef XLNX_VERSAL_TRNG_H
+#define XLNX_VERSAL_TRNG_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define TYPE_XLNX_VERSAL_TRNG "xlnx.versal-trng"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalTRng, XLNX_VERSAL_TRNG);
+
+#define RMAX_XLNX_VERSAL_TRNG ((0xf0 / 4) + 1)
+
+typedef struct XlnxVersalTRng {
+ SysBusDevice parent_obj;
+ qemu_irq irq;
+ GRand *prng;
+
+ uint32_t hw_version;
+ uint32_t forced_faults;
+
+ uint32_t rand_count;
+ uint64_t rand_reseed;
+
+ uint64_t forced_prng_seed;
+ uint64_t forced_prng_count;
+ uint64_t tst_seed[2];
+
+ uint32_t regs[RMAX_XLNX_VERSAL_TRNG];
+ RegisterInfo regs_info[RMAX_XLNX_VERSAL_TRNG];
+} XlnxVersalTRng;
+
+#undef RMAX_XLNX_VERSAL_TRNG
+#endif
diff --git a/include/hw/misc/xlnx-versal-xramc.h b/include/hw/misc/xlnx-versal-xramc.h
new file mode 100644
index 0000000000..d3d1862676
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-xramc.h
@@ -0,0 +1,97 @@
+/*
+ * QEMU model of the Xilinx XRAM Controller.
+ *
+ * Copyright (c) 2021 Xilinx Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ */
+
+#ifndef XLNX_VERSAL_XRAMC_H
+#define XLNX_VERSAL_XRAMC_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define TYPE_XLNX_XRAM_CTRL "xlnx.versal-xramc"
+
+#define XLNX_XRAM_CTRL(obj) \
+ OBJECT_CHECK(XlnxXramCtrl, (obj), TYPE_XLNX_XRAM_CTRL)
+
+REG32(XRAM_ERR_CTRL, 0x0)
+ FIELD(XRAM_ERR_CTRL, UE_RES, 3, 1)
+ FIELD(XRAM_ERR_CTRL, PWR_ERR_RES, 2, 1)
+ FIELD(XRAM_ERR_CTRL, PZ_ERR_RES, 1, 1)
+ FIELD(XRAM_ERR_CTRL, APB_ERR_RES, 0, 1)
+REG32(XRAM_ISR, 0x4)
+ FIELD(XRAM_ISR, INV_APB, 0, 1)
+REG32(XRAM_IMR, 0x8)
+ FIELD(XRAM_IMR, INV_APB, 0, 1)
+REG32(XRAM_IEN, 0xc)
+ FIELD(XRAM_IEN, INV_APB, 0, 1)
+REG32(XRAM_IDS, 0x10)
+ FIELD(XRAM_IDS, INV_APB, 0, 1)
+REG32(XRAM_ECC_CNTL, 0x14)
+ FIELD(XRAM_ECC_CNTL, FI_MODE, 2, 1)
+ FIELD(XRAM_ECC_CNTL, DET_ONLY, 1, 1)
+ FIELD(XRAM_ECC_CNTL, ECC_ON_OFF, 0, 1)
+REG32(XRAM_CLR_EXE, 0x18)
+ FIELD(XRAM_CLR_EXE, MON_7, 7, 1)
+ FIELD(XRAM_CLR_EXE, MON_6, 6, 1)
+ FIELD(XRAM_CLR_EXE, MON_5, 5, 1)
+ FIELD(XRAM_CLR_EXE, MON_4, 4, 1)
+ FIELD(XRAM_CLR_EXE, MON_3, 3, 1)
+ FIELD(XRAM_CLR_EXE, MON_2, 2, 1)
+ FIELD(XRAM_CLR_EXE, MON_1, 1, 1)
+ FIELD(XRAM_CLR_EXE, MON_0, 0, 1)
+REG32(XRAM_CE_FFA, 0x1c)
+ FIELD(XRAM_CE_FFA, ADDR, 0, 20)
+REG32(XRAM_CE_FFD0, 0x20)
+REG32(XRAM_CE_FFD1, 0x24)
+REG32(XRAM_CE_FFD2, 0x28)
+REG32(XRAM_CE_FFD3, 0x2c)
+REG32(XRAM_CE_FFE, 0x30)
+ FIELD(XRAM_CE_FFE, SYNDROME, 0, 16)
+REG32(XRAM_UE_FFA, 0x34)
+ FIELD(XRAM_UE_FFA, ADDR, 0, 20)
+REG32(XRAM_UE_FFD0, 0x38)
+REG32(XRAM_UE_FFD1, 0x3c)
+REG32(XRAM_UE_FFD2, 0x40)
+REG32(XRAM_UE_FFD3, 0x44)
+REG32(XRAM_UE_FFE, 0x48)
+ FIELD(XRAM_UE_FFE, SYNDROME, 0, 16)
+REG32(XRAM_FI_D0, 0x4c)
+REG32(XRAM_FI_D1, 0x50)
+REG32(XRAM_FI_D2, 0x54)
+REG32(XRAM_FI_D3, 0x58)
+REG32(XRAM_FI_SY, 0x5c)
+ FIELD(XRAM_FI_SY, DATA, 0, 16)
+REG32(XRAM_RMW_UE_FFA, 0x70)
+ FIELD(XRAM_RMW_UE_FFA, ADDR, 0, 20)
+REG32(XRAM_FI_CNTR, 0x74)
+ FIELD(XRAM_FI_CNTR, COUNT, 0, 24)
+REG32(XRAM_IMP, 0x80)
+ FIELD(XRAM_IMP, SIZE, 0, 4)
+REG32(XRAM_PRDY_DBG, 0x84)
+ FIELD(XRAM_PRDY_DBG, ISLAND3, 12, 4)
+ FIELD(XRAM_PRDY_DBG, ISLAND2, 8, 4)
+ FIELD(XRAM_PRDY_DBG, ISLAND1, 4, 4)
+ FIELD(XRAM_PRDY_DBG, ISLAND0, 0, 4)
+REG32(XRAM_SAFETY_CHK, 0xff8)
+
+#define XRAM_CTRL_R_MAX (R_XRAM_SAFETY_CHK + 1)
+
+typedef struct XlnxXramCtrl {
+ SysBusDevice parent_obj;
+ MemoryRegion ram;
+ qemu_irq irq;
+
+ struct {
+ uint64_t size;
+ unsigned int encoded_size;
+ } cfg;
+
+ RegisterInfoArray *reg_array;
+ uint32_t regs[XRAM_CTRL_R_MAX];
+ RegisterInfo regs_info[XRAM_CTRL_R_MAX];
+} XlnxXramCtrl;
+#endif
diff --git a/include/hw/misc/xlnx-zynqmp-apu-ctrl.h b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h
new file mode 100644
index 0000000000..c3bf3c1583
--- /dev/null
+++ b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h
@@ -0,0 +1,93 @@
+/*
+ * QEMU model of ZynqMP APU Control.
+ *
+ * Copyright (c) 2013-2022 Xilinx Inc
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Written by Peter Crosthwaite <peter.crosthwaite@xilinx.com> and
+ * Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ *
+ */
+#ifndef HW_MISC_XLNX_ZYNQMP_APU_CTRL_H
+#define HW_MISC_XLNX_ZYNQMP_APU_CTRL_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "target/arm/cpu-qom.h"
+
+#define TYPE_XLNX_ZYNQMP_APU_CTRL "xlnx.apu-ctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPAPUCtrl, XLNX_ZYNQMP_APU_CTRL)
+
+REG32(APU_ERR_CTRL, 0x0)
+ FIELD(APU_ERR_CTRL, PSLVERR, 0, 1)
+REG32(ISR, 0x10)
+ FIELD(ISR, INV_APB, 0, 1)
+REG32(IMR, 0x14)
+ FIELD(IMR, INV_APB, 0, 1)
+REG32(IEN, 0x18)
+ FIELD(IEN, INV_APB, 0, 1)
+REG32(IDS, 0x1c)
+ FIELD(IDS, INV_APB, 0, 1)
+REG32(CONFIG_0, 0x20)
+ FIELD(CONFIG_0, CFGTE, 24, 4)
+ FIELD(CONFIG_0, CFGEND, 16, 4)
+ FIELD(CONFIG_0, VINITHI, 8, 4)
+ FIELD(CONFIG_0, AA64NAA32, 0, 4)
+REG32(CONFIG_1, 0x24)
+ FIELD(CONFIG_1, L2RSTDISABLE, 29, 1)
+ FIELD(CONFIG_1, L1RSTDISABLE, 28, 1)
+ FIELD(CONFIG_1, CP15DISABLE, 0, 4)
+REG32(RVBARADDR0L, 0x40)
+ FIELD(RVBARADDR0L, ADDR, 2, 30)
+REG32(RVBARADDR0H, 0x44)
+ FIELD(RVBARADDR0H, ADDR, 0, 8)
+REG32(RVBARADDR1L, 0x48)
+ FIELD(RVBARADDR1L, ADDR, 2, 30)
+REG32(RVBARADDR1H, 0x4c)
+ FIELD(RVBARADDR1H, ADDR, 0, 8)
+REG32(RVBARADDR2L, 0x50)
+ FIELD(RVBARADDR2L, ADDR, 2, 30)
+REG32(RVBARADDR2H, 0x54)
+ FIELD(RVBARADDR2H, ADDR, 0, 8)
+REG32(RVBARADDR3L, 0x58)
+ FIELD(RVBARADDR3L, ADDR, 2, 30)
+REG32(RVBARADDR3H, 0x5c)
+ FIELD(RVBARADDR3H, ADDR, 0, 8)
+REG32(ACE_CTRL, 0x60)
+ FIELD(ACE_CTRL, AWQOS, 16, 4)
+ FIELD(ACE_CTRL, ARQOS, 0, 4)
+REG32(SNOOP_CTRL, 0x80)
+ FIELD(SNOOP_CTRL, ACE_INACT, 4, 1)
+ FIELD(SNOOP_CTRL, ACP_INACT, 0, 1)
+REG32(PWRCTL, 0x90)
+ FIELD(PWRCTL, CLREXMONREQ, 17, 1)
+ FIELD(PWRCTL, L2FLUSHREQ, 16, 1)
+ FIELD(PWRCTL, CPUPWRDWNREQ, 0, 4)
+REG32(PWRSTAT, 0x94)
+ FIELD(PWRSTAT, CLREXMONACK, 17, 1)
+ FIELD(PWRSTAT, L2FLUSHDONE, 16, 1)
+ FIELD(PWRSTAT, DBGNOPWRDWN, 0, 4)
+
+#define APU_R_MAX ((R_PWRSTAT) + 1)
+
+#define APU_MAX_CPU 4
+
+struct XlnxZynqMPAPUCtrl {
+ SysBusDevice busdev;
+
+ ARMCPU *cpus[APU_MAX_CPU];
+ /* WFIs towards PMU. */
+ qemu_irq wfi_out[4];
+ /* CPU Power status towards INTC Redirect. */
+ qemu_irq cpu_power_status[4];
+ qemu_irq irq_imr;
+
+ uint8_t cpu_pwrdwn_req;
+ uint8_t cpu_in_wfi;
+
+ RegisterInfoArray *reg_array;
+ uint32_t regs[APU_R_MAX];
+ RegisterInfo regs_info[APU_R_MAX];
+};
+
+#endif
diff --git a/include/hw/misc/xlnx-zynqmp-crf.h b/include/hw/misc/xlnx-zynqmp-crf.h
new file mode 100644
index 0000000000..02ef0bdeee
--- /dev/null
+++ b/include/hw/misc/xlnx-zynqmp-crf.h
@@ -0,0 +1,211 @@
+/*
+ * QEMU model of the CRF - Clock Reset FPD.
+ *
+ * Copyright (c) 2022 Xilinx Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ */
+#ifndef HW_MISC_XLNX_ZYNQMP_CRF_H
+#define HW_MISC_XLNX_ZYNQMP_CRF_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define TYPE_XLNX_ZYNQMP_CRF "xlnx.zynqmp_crf"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPCRF, XLNX_ZYNQMP_CRF)
+
+REG32(ERR_CTRL, 0x0)
+ FIELD(ERR_CTRL, SLVERR_ENABLE, 0, 1)
+REG32(IR_STATUS, 0x4)
+ FIELD(IR_STATUS, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_MASK, 0x8)
+ FIELD(IR_MASK, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_ENABLE, 0xc)
+ FIELD(IR_ENABLE, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_DISABLE, 0x10)
+ FIELD(IR_DISABLE, ADDR_DECODE_ERR, 0, 1)
+REG32(CRF_WPROT, 0x1c)
+ FIELD(CRF_WPROT, ACTIVE, 0, 1)
+REG32(APLL_CTRL, 0x20)
+ FIELD(APLL_CTRL, POST_SRC, 24, 3)
+ FIELD(APLL_CTRL, PRE_SRC, 20, 3)
+ FIELD(APLL_CTRL, CLKOUTDIV, 17, 1)
+ FIELD(APLL_CTRL, DIV2, 16, 1)
+ FIELD(APLL_CTRL, FBDIV, 8, 7)
+ FIELD(APLL_CTRL, BYPASS, 3, 1)
+ FIELD(APLL_CTRL, RESET, 0, 1)
+REG32(APLL_CFG, 0x24)
+ FIELD(APLL_CFG, LOCK_DLY, 25, 7)
+ FIELD(APLL_CFG, LOCK_CNT, 13, 10)
+ FIELD(APLL_CFG, LFHF, 10, 2)
+ FIELD(APLL_CFG, CP, 5, 4)
+ FIELD(APLL_CFG, RES, 0, 4)
+REG32(APLL_FRAC_CFG, 0x28)
+ FIELD(APLL_FRAC_CFG, ENABLED, 31, 1)
+ FIELD(APLL_FRAC_CFG, SEED, 22, 3)
+ FIELD(APLL_FRAC_CFG, ALGRTHM, 19, 1)
+ FIELD(APLL_FRAC_CFG, ORDER, 18, 1)
+ FIELD(APLL_FRAC_CFG, DATA, 0, 16)
+REG32(DPLL_CTRL, 0x2c)
+ FIELD(DPLL_CTRL, POST_SRC, 24, 3)
+ FIELD(DPLL_CTRL, PRE_SRC, 20, 3)
+ FIELD(DPLL_CTRL, CLKOUTDIV, 17, 1)
+ FIELD(DPLL_CTRL, DIV2, 16, 1)
+ FIELD(DPLL_CTRL, FBDIV, 8, 7)
+ FIELD(DPLL_CTRL, BYPASS, 3, 1)
+ FIELD(DPLL_CTRL, RESET, 0, 1)
+REG32(DPLL_CFG, 0x30)
+ FIELD(DPLL_CFG, LOCK_DLY, 25, 7)
+ FIELD(DPLL_CFG, LOCK_CNT, 13, 10)
+ FIELD(DPLL_CFG, LFHF, 10, 2)
+ FIELD(DPLL_CFG, CP, 5, 4)
+ FIELD(DPLL_CFG, RES, 0, 4)
+REG32(DPLL_FRAC_CFG, 0x34)
+ FIELD(DPLL_FRAC_CFG, ENABLED, 31, 1)
+ FIELD(DPLL_FRAC_CFG, SEED, 22, 3)
+ FIELD(DPLL_FRAC_CFG, ALGRTHM, 19, 1)
+ FIELD(DPLL_FRAC_CFG, ORDER, 18, 1)
+ FIELD(DPLL_FRAC_CFG, DATA, 0, 16)
+REG32(VPLL_CTRL, 0x38)
+ FIELD(VPLL_CTRL, POST_SRC, 24, 3)
+ FIELD(VPLL_CTRL, PRE_SRC, 20, 3)
+ FIELD(VPLL_CTRL, CLKOUTDIV, 17, 1)
+ FIELD(VPLL_CTRL, DIV2, 16, 1)
+ FIELD(VPLL_CTRL, FBDIV, 8, 7)
+ FIELD(VPLL_CTRL, BYPASS, 3, 1)
+ FIELD(VPLL_CTRL, RESET, 0, 1)
+REG32(VPLL_CFG, 0x3c)
+ FIELD(VPLL_CFG, LOCK_DLY, 25, 7)
+ FIELD(VPLL_CFG, LOCK_CNT, 13, 10)
+ FIELD(VPLL_CFG, LFHF, 10, 2)
+ FIELD(VPLL_CFG, CP, 5, 4)
+ FIELD(VPLL_CFG, RES, 0, 4)
+REG32(VPLL_FRAC_CFG, 0x40)
+ FIELD(VPLL_FRAC_CFG, ENABLED, 31, 1)
+ FIELD(VPLL_FRAC_CFG, SEED, 22, 3)
+ FIELD(VPLL_FRAC_CFG, ALGRTHM, 19, 1)
+ FIELD(VPLL_FRAC_CFG, ORDER, 18, 1)
+ FIELD(VPLL_FRAC_CFG, DATA, 0, 16)
+REG32(PLL_STATUS, 0x44)
+ FIELD(PLL_STATUS, VPLL_STABLE, 5, 1)
+ FIELD(PLL_STATUS, DPLL_STABLE, 4, 1)
+ FIELD(PLL_STATUS, APLL_STABLE, 3, 1)
+ FIELD(PLL_STATUS, VPLL_LOCK, 2, 1)
+ FIELD(PLL_STATUS, DPLL_LOCK, 1, 1)
+ FIELD(PLL_STATUS, APLL_LOCK, 0, 1)
+REG32(APLL_TO_LPD_CTRL, 0x48)
+ FIELD(APLL_TO_LPD_CTRL, DIVISOR0, 8, 6)
+REG32(DPLL_TO_LPD_CTRL, 0x4c)
+ FIELD(DPLL_TO_LPD_CTRL, DIVISOR0, 8, 6)
+REG32(VPLL_TO_LPD_CTRL, 0x50)
+ FIELD(VPLL_TO_LPD_CTRL, DIVISOR0, 8, 6)
+REG32(ACPU_CTRL, 0x60)
+ FIELD(ACPU_CTRL, CLKACT_HALF, 25, 1)
+ FIELD(ACPU_CTRL, CLKACT_FULL, 24, 1)
+ FIELD(ACPU_CTRL, DIVISOR0, 8, 6)
+ FIELD(ACPU_CTRL, SRCSEL, 0, 3)
+REG32(DBG_TRACE_CTRL, 0x64)
+ FIELD(DBG_TRACE_CTRL, CLKACT, 24, 1)
+ FIELD(DBG_TRACE_CTRL, DIVISOR0, 8, 6)
+ FIELD(DBG_TRACE_CTRL, SRCSEL, 0, 3)
+REG32(DBG_FPD_CTRL, 0x68)
+ FIELD(DBG_FPD_CTRL, CLKACT, 24, 1)
+ FIELD(DBG_FPD_CTRL, DIVISOR0, 8, 6)
+ FIELD(DBG_FPD_CTRL, SRCSEL, 0, 3)
+REG32(DP_VIDEO_REF_CTRL, 0x70)
+ FIELD(DP_VIDEO_REF_CTRL, CLKACT, 24, 1)
+ FIELD(DP_VIDEO_REF_CTRL, DIVISOR1, 16, 6)
+ FIELD(DP_VIDEO_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(DP_VIDEO_REF_CTRL, SRCSEL, 0, 3)
+REG32(DP_AUDIO_REF_CTRL, 0x74)
+ FIELD(DP_AUDIO_REF_CTRL, CLKACT, 24, 1)
+ FIELD(DP_AUDIO_REF_CTRL, DIVISOR1, 16, 6)
+ FIELD(DP_AUDIO_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(DP_AUDIO_REF_CTRL, SRCSEL, 0, 3)
+REG32(DP_STC_REF_CTRL, 0x7c)
+ FIELD(DP_STC_REF_CTRL, CLKACT, 24, 1)
+ FIELD(DP_STC_REF_CTRL, DIVISOR1, 16, 6)
+ FIELD(DP_STC_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(DP_STC_REF_CTRL, SRCSEL, 0, 3)
+REG32(DDR_CTRL, 0x80)
+ FIELD(DDR_CTRL, CLKACT, 24, 1)
+ FIELD(DDR_CTRL, DIVISOR0, 8, 6)
+ FIELD(DDR_CTRL, SRCSEL, 0, 3)
+REG32(GPU_REF_CTRL, 0x84)
+ FIELD(GPU_REF_CTRL, PP1_CLKACT, 26, 1)
+ FIELD(GPU_REF_CTRL, PP0_CLKACT, 25, 1)
+ FIELD(GPU_REF_CTRL, CLKACT, 24, 1)
+ FIELD(GPU_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(GPU_REF_CTRL, SRCSEL, 0, 3)
+REG32(SATA_REF_CTRL, 0xa0)
+ FIELD(SATA_REF_CTRL, CLKACT, 24, 1)
+ FIELD(SATA_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(SATA_REF_CTRL, SRCSEL, 0, 3)
+REG32(PCIE_REF_CTRL, 0xb4)
+ FIELD(PCIE_REF_CTRL, CLKACT, 24, 1)
+ FIELD(PCIE_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(PCIE_REF_CTRL, SRCSEL, 0, 3)
+REG32(GDMA_REF_CTRL, 0xb8)
+ FIELD(GDMA_REF_CTRL, CLKACT, 24, 1)
+ FIELD(GDMA_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(GDMA_REF_CTRL, SRCSEL, 0, 3)
+REG32(DPDMA_REF_CTRL, 0xbc)
+ FIELD(DPDMA_REF_CTRL, CLKACT, 24, 1)
+ FIELD(DPDMA_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(DPDMA_REF_CTRL, SRCSEL, 0, 3)
+REG32(TOPSW_MAIN_CTRL, 0xc0)
+ FIELD(TOPSW_MAIN_CTRL, CLKACT, 24, 1)
+ FIELD(TOPSW_MAIN_CTRL, DIVISOR0, 8, 6)
+ FIELD(TOPSW_MAIN_CTRL, SRCSEL, 0, 3)
+REG32(TOPSW_LSBUS_CTRL, 0xc4)
+ FIELD(TOPSW_LSBUS_CTRL, CLKACT, 24, 1)
+ FIELD(TOPSW_LSBUS_CTRL, DIVISOR0, 8, 6)
+ FIELD(TOPSW_LSBUS_CTRL, SRCSEL, 0, 3)
+REG32(DBG_TSTMP_CTRL, 0xf8)
+ FIELD(DBG_TSTMP_CTRL, DIVISOR0, 8, 6)
+ FIELD(DBG_TSTMP_CTRL, SRCSEL, 0, 3)
+REG32(RST_FPD_TOP, 0x100)
+ FIELD(RST_FPD_TOP, PCIE_CFG_RESET, 19, 1)
+ FIELD(RST_FPD_TOP, PCIE_BRIDGE_RESET, 18, 1)
+ FIELD(RST_FPD_TOP, PCIE_CTRL_RESET, 17, 1)
+ FIELD(RST_FPD_TOP, DP_RESET, 16, 1)
+ FIELD(RST_FPD_TOP, SWDT_RESET, 15, 1)
+ FIELD(RST_FPD_TOP, AFI_FM5_RESET, 12, 1)
+ FIELD(RST_FPD_TOP, AFI_FM4_RESET, 11, 1)
+ FIELD(RST_FPD_TOP, AFI_FM3_RESET, 10, 1)
+ FIELD(RST_FPD_TOP, AFI_FM2_RESET, 9, 1)
+ FIELD(RST_FPD_TOP, AFI_FM1_RESET, 8, 1)
+ FIELD(RST_FPD_TOP, AFI_FM0_RESET, 7, 1)
+ FIELD(RST_FPD_TOP, GDMA_RESET, 6, 1)
+ FIELD(RST_FPD_TOP, GPU_PP1_RESET, 5, 1)
+ FIELD(RST_FPD_TOP, GPU_PP0_RESET, 4, 1)
+ FIELD(RST_FPD_TOP, GPU_RESET, 3, 1)
+ FIELD(RST_FPD_TOP, GT_RESET, 2, 1)
+ FIELD(RST_FPD_TOP, SATA_RESET, 1, 1)
+REG32(RST_FPD_APU, 0x104)
+ FIELD(RST_FPD_APU, ACPU3_PWRON_RESET, 13, 1)
+ FIELD(RST_FPD_APU, ACPU2_PWRON_RESET, 12, 1)
+ FIELD(RST_FPD_APU, ACPU1_PWRON_RESET, 11, 1)
+ FIELD(RST_FPD_APU, ACPU0_PWRON_RESET, 10, 1)
+ FIELD(RST_FPD_APU, APU_L2_RESET, 8, 1)
+ FIELD(RST_FPD_APU, ACPU3_RESET, 3, 1)
+ FIELD(RST_FPD_APU, ACPU2_RESET, 2, 1)
+ FIELD(RST_FPD_APU, ACPU1_RESET, 1, 1)
+ FIELD(RST_FPD_APU, ACPU0_RESET, 0, 1)
+REG32(RST_DDR_SS, 0x108)
+ FIELD(RST_DDR_SS, DDR_RESET, 3, 1)
+ FIELD(RST_DDR_SS, APM_RESET, 2, 1)
+
+#define CRF_R_MAX (R_RST_DDR_SS + 1)
+
+struct XlnxZynqMPCRF {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq_ir;
+
+ RegisterInfoArray *reg_array;
+ uint32_t regs[CRF_R_MAX];
+ RegisterInfo regs_info[CRF_R_MAX];
+};
+
+#endif
diff --git a/include/hw/net/allwinner-sun8i-emac.h b/include/hw/net/allwinner-sun8i-emac.h
new file mode 100644
index 0000000000..185895f4e1
--- /dev/null
+++ b/include/hw/net/allwinner-sun8i-emac.h
@@ -0,0 +1,104 @@
+/*
+ * Allwinner Sun8i Ethernet MAC emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_NET_ALLWINNER_SUN8I_EMAC_H
+#define HW_NET_ALLWINNER_SUN8I_EMAC_H
+
+#include "qom/object.h"
+#include "net/net.h"
+#include "hw/sysbus.h"
+
+/**
+ * Object model
+ * @{
+ */
+
+#define TYPE_AW_SUN8I_EMAC "allwinner-sun8i-emac"
+OBJECT_DECLARE_SIMPLE_TYPE(AwSun8iEmacState, AW_SUN8I_EMAC)
+
+/** @} */
+
+/**
+ * Allwinner Sun8i EMAC object instance state
+ */
+struct AwSun8iEmacState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Interrupt output signal to notify CPU */
+ qemu_irq irq;
+
+ /** Memory region where DMA transfers are done */
+ MemoryRegion *dma_mr;
+
+ /** Address space used internally for DMA transfers */
+ AddressSpace dma_as;
+
+ /** Generic Network Interface Controller (NIC) for networking API */
+ NICState *nic;
+
+ /** Generic Network Interface Controller (NIC) configuration */
+ NICConf conf;
+
+ /**
+ * @name Media Independent Interface (MII)
+ * @{
+ */
+
+ uint8_t mii_phy_addr; /**< PHY address */
+ uint32_t mii_cr; /**< Control */
+ uint32_t mii_st; /**< Status */
+ uint32_t mii_adv; /**< Advertised Abilities */
+
+ /** @} */
+
+ /**
+ * @name Hardware Registers
+ * @{
+ */
+
+ uint32_t basic_ctl0; /**< Basic Control 0 */
+ uint32_t basic_ctl1; /**< Basic Control 1 */
+ uint32_t int_en; /**< Interrupt Enable */
+ uint32_t int_sta; /**< Interrupt Status */
+ uint32_t frm_flt; /**< Receive Frame Filter */
+
+ uint32_t rx_ctl0; /**< Receive Control 0 */
+ uint32_t rx_ctl1; /**< Receive Control 1 */
+ uint32_t rx_desc_head; /**< Receive Descriptor List Address */
+ uint32_t rx_desc_curr; /**< Current Receive Descriptor Address */
+
+ uint32_t tx_ctl0; /**< Transmit Control 0 */
+ uint32_t tx_ctl1; /**< Transmit Control 1 */
+ uint32_t tx_desc_head; /**< Transmit Descriptor List Address */
+ uint32_t tx_desc_curr; /**< Current Transmit Descriptor Address */
+ uint32_t tx_flowctl; /**< Transmit Flow Control */
+
+ uint32_t mii_cmd; /**< Management Interface Command */
+ uint32_t mii_data; /**< Management Interface Data */
+
+ /** @} */
+
+};
+
+#endif /* HW_NET_ALLWINNER_SUN8I_EMAC_H */
diff --git a/include/hw/net/allwinner_emac.h b/include/hw/net/allwinner_emac.h
index 905a43deb4..534e748982 100644
--- a/include/hw/net/allwinner_emac.h
+++ b/include/hw/net/allwinner_emac.h
@@ -27,9 +27,11 @@
#include "net/net.h"
#include "qemu/fifo8.h"
#include "hw/net/mii.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_AW_EMAC "allwinner-emac"
-#define AW_EMAC(obj) OBJECT_CHECK(AwEmacState, (obj), TYPE_AW_EMAC)
+OBJECT_DECLARE_SIMPLE_TYPE(AwEmacState, AW_EMAC)
/*
* Allwinner EMAC register list
@@ -143,7 +145,7 @@ typedef struct RTL8201CPState {
uint16_t anlpar;
} RTL8201CPState;
-typedef struct AwEmacState {
+struct AwEmacState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -170,6 +172,6 @@ typedef struct AwEmacState {
Fifo8 tx_fifo[NUM_TX_FIFOS];
uint32_t tx_length[NUM_TX_FIFOS];
uint32_t tx_channel;
-} AwEmacState;
+};
#endif
diff --git a/include/hw/net/cadence_gem.h b/include/hw/net/cadence_gem.h
index 5426961d91..91ebb5c8ae 100644
--- a/include/hw/net/cadence_gem.h
+++ b/include/hw/net/cadence_gem.h
@@ -23,9 +23,11 @@
*/
#ifndef CADENCE_GEM_H
+#define CADENCE_GEM_H
+#include "qom/object.h"
#define TYPE_CADENCE_GEM "cadence_gem"
-#define CADENCE_GEM(obj) OBJECT_CHECK(CadenceGEMState, (obj), TYPE_CADENCE_GEM)
+OBJECT_DECLARE_SIMPLE_TYPE(CadenceGEMState, CADENCE_GEM)
#include "net/net.h"
#include "hw/sysbus.h"
@@ -39,7 +41,10 @@
#define MAX_TYPE1_SCREENERS 16
#define MAX_TYPE2_SCREENERS 16
-typedef struct CadenceGEMState {
+#define MAX_JUMBO_FRAME_SIZE_MASK 0x3FFF
+#define MAX_FRAME_SIZE MAX_JUMBO_FRAME_SIZE_MASK
+
+struct CadenceGEMState {
/*< private >*/
SysBusDevice parent_obj;
@@ -56,6 +61,7 @@ typedef struct CadenceGEMState {
uint8_t num_type1_screeners;
uint8_t num_type2_screeners;
uint32_t revision;
+ uint16_t jumbo_max_len;
/* GEM registers backing store */
uint32_t regs[CADENCE_GEM_MAXREG];
@@ -68,6 +74,8 @@ typedef struct CadenceGEMState {
/* Mask of register bits which are write 1 to clear */
uint32_t regs_w1c[CADENCE_GEM_MAXREG];
+ /* PHY address */
+ uint8_t phy_addr;
/* PHY registers backing store */
uint16_t phy_regs[32];
@@ -79,10 +87,11 @@ typedef struct CadenceGEMState {
uint8_t can_rx_state; /* Debug only */
+ uint8_t tx_packet[MAX_FRAME_SIZE];
+ uint8_t rx_packet[MAX_FRAME_SIZE];
uint32_t rx_desc[MAX_PRIORITY_QUEUES][DESC_MAX_NUM_WORDS];
bool sar_active[4];
-} CadenceGEMState;
+};
-#define CADENCE_GEM_H
#endif
diff --git a/include/hw/net/dp8393x.h b/include/hw/net/dp8393x.h
new file mode 100644
index 0000000000..4a3f7478be
--- /dev/null
+++ b/include/hw/net/dp8393x.h
@@ -0,0 +1,60 @@
+/*
+ * QEMU NS SONIC DP8393x netcard
+ *
+ * Copyright (c) 2008-2009 Herve Poussineau
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_NET_DP8393X_H
+#define HW_NET_DP8393X_H
+
+#include "hw/sysbus.h"
+#include "net/net.h"
+#include "exec/memory.h"
+
+#define SONIC_REG_COUNT 0x40
+
+#define TYPE_DP8393X "dp8393x"
+OBJECT_DECLARE_SIMPLE_TYPE(dp8393xState, DP8393X)
+
+struct dp8393xState {
+ SysBusDevice parent_obj;
+
+ /* Hardware */
+ uint8_t it_shift;
+ bool big_endian;
+ bool last_rba_is_full;
+ qemu_irq irq;
+ int irq_level;
+ QEMUTimer *watchdog;
+ int64_t wt_last_update;
+ NICConf conf;
+ NICState *nic;
+ MemoryRegion mmio;
+
+ /* Registers */
+ uint16_t cam[16][3];
+ uint16_t regs[SONIC_REG_COUNT];
+
+ /* Temporaries */
+ uint8_t tx_buffer[0x10000];
+ int loopback_packet;
+
+ /* Memory access */
+ MemoryRegion *dma_mr;
+ AddressSpace as;
+};
+
+#endif
diff --git a/include/hw/net/ftgmac100.h b/include/hw/net/ftgmac100.h
index 94cfe05332..765d1538a4 100644
--- a/include/hw/net/ftgmac100.h
+++ b/include/hw/net/ftgmac100.h
@@ -9,9 +9,10 @@
#ifndef FTGMAC100_H
#define FTGMAC100_H
+#include "qom/object.h"
#define TYPE_FTGMAC100 "ftgmac100"
-#define FTGMAC100(obj) OBJECT_CHECK(FTGMAC100State, (obj), TYPE_FTGMAC100)
+OBJECT_DECLARE_SIMPLE_TYPE(FTGMAC100State, FTGMAC100)
#include "hw/sysbus.h"
#include "net/net.h"
@@ -21,7 +22,7 @@
*/
#define FTGMAC100_MAX_FRAME_SIZE 9220
-typedef struct FTGMAC100State {
+struct FTGMAC100State {
/*< private >*/
SysBusDevice parent_obj;
@@ -64,6 +65,23 @@ typedef struct FTGMAC100State {
bool aspeed;
uint32_t txdes0_edotr;
uint32_t rxdes0_edorr;
-} FTGMAC100State;
+};
+
+#define TYPE_ASPEED_MII "aspeed-mmi"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedMiiState, ASPEED_MII)
+
+/*
+ * AST2600 MII controller
+ */
+struct AspeedMiiState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ FTGMAC100State *nic;
+
+ MemoryRegion iomem;
+ uint32_t phycr;
+ uint32_t phydata;
+};
#endif
diff --git a/include/hw/net/imx_fec.h b/include/hw/net/imx_fec.h
index 7b3faa4019..2d13290c78 100644
--- a/include/hw/net/imx_fec.h
+++ b/include/hw/net/imx_fec.h
@@ -23,9 +23,10 @@
#ifndef IMX_FEC_H
#define IMX_FEC_H
+#include "qom/object.h"
#define TYPE_IMX_FEC "imx.fec"
-#define IMX_FEC(obj) OBJECT_CHECK(IMXFECState, (obj), TYPE_IMX_FEC)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXFECState, IMX_FEC)
#define TYPE_IMX_ENET "imx.enet"
@@ -247,7 +248,7 @@ typedef struct {
#define FSL_IMX25_FEC_SIZE 0x4000
-typedef struct IMXFECState {
+struct IMXFECState {
/*< private >*/
SysBusDevice parent_obj;
@@ -268,11 +269,14 @@ typedef struct IMXFECState {
uint32_t phy_advertise;
uint32_t phy_int;
uint32_t phy_int_mask;
+ uint32_t phy_num;
+ bool phy_connected;
+ struct IMXFECState *phy_consumer;
bool is_fec;
/* Buffer used to assemble a Tx frame */
uint8_t frame[ENET_MAX_FRAME_SIZE];
-} IMXFECState;
+};
#endif
diff --git a/include/hw/net/lan9118.h b/include/hw/net/lan9118.h
new file mode 100644
index 0000000000..4bf9da7a63
--- /dev/null
+++ b/include/hw/net/lan9118.h
@@ -0,0 +1,20 @@
+/*
+ * SMSC LAN9118 Ethernet interface emulation
+ *
+ * Copyright (c) 2009 CodeSourcery, LLC.
+ * Written by Paul Brook
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_NET_LAN9118_H
+#define HW_NET_LAN9118_H
+
+#include "net/net.h"
+
+#define TYPE_LAN9118 "lan9118"
+
+void lan9118_init(uint32_t, qemu_irq);
+
+#endif
diff --git a/include/hw/net/lance.h b/include/hw/net/lance.h
index ffdd35c4d7..f645d6af67 100644
--- a/include/hw/net/lance.h
+++ b/include/hw/net/lance.h
@@ -31,15 +31,18 @@
#include "net/net.h"
#include "hw/net/pcnet.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_LANCE "lance"
-#define SYSBUS_PCNET(obj) \
- OBJECT_CHECK(SysBusPCNetState, (obj), TYPE_LANCE)
+typedef struct SysBusPCNetState SysBusPCNetState;
+DECLARE_INSTANCE_CHECKER(SysBusPCNetState, SYSBUS_PCNET,
+ TYPE_LANCE)
-typedef struct {
+struct SysBusPCNetState {
SysBusDevice parent_obj;
PCNetState state;
-} SysBusPCNetState;
+};
#endif
diff --git a/include/hw/net/lasi_82596.h b/include/hw/net/lasi_82596.h
new file mode 100644
index 0000000000..439356ec19
--- /dev/null
+++ b/include/hw/net/lasi_82596.h
@@ -0,0 +1,31 @@
+/*
+ * QEMU LASI i82596 device emulation
+ *
+ * Copyright (c) 201 Helge Deller <deller@gmx.de>
+ *
+ */
+
+#ifndef LASI_82596_H
+#define LASI_82596_H
+
+#include "net/net.h"
+#include "hw/net/i82596.h"
+#include "hw/sysbus.h"
+
+#define TYPE_LASI_82596 "lasi_82596"
+typedef struct SysBusI82596State SysBusI82596State;
+DECLARE_INSTANCE_CHECKER(SysBusI82596State, SYSBUS_I82596,
+ TYPE_LASI_82596)
+
+struct SysBusI82596State {
+ SysBusDevice parent_obj;
+
+ I82596State state;
+ uint16_t last_val;
+ int val_index:1;
+};
+
+SysBusI82596State *lasi_82596_init(MemoryRegion *addr_space, hwaddr hpa,
+ qemu_irq irq, gboolean match_default);
+
+#endif
diff --git a/include/hw/net/mii.h b/include/hw/net/mii.h
index 4ae4dcce7e..f7feddac9b 100644
--- a/include/hw/net/mii.h
+++ b/include/hw/net/mii.h
@@ -55,6 +55,7 @@
#define MII_BMCR_CTST (1 << 7) /* Collision test */
#define MII_BMCR_SPEED1000 (1 << 6) /* MSB of Speed (1000) */
+#define MII_BMSR_100T4 (1 << 15) /* Can do 100mbps T4 */
#define MII_BMSR_100TX_FD (1 << 14) /* Can do 100mbps, full-duplex */
#define MII_BMSR_100TX_HD (1 << 13) /* Can do 100mbps, half-duplex */
#define MII_BMSR_10T_FD (1 << 12) /* Can do 10mbps, full-duplex */
@@ -70,7 +71,7 @@
#define MII_BMSR_JABBER (1 << 1) /* Jabber detected */
#define MII_BMSR_EXTCAP (1 << 0) /* Ext-reg capability */
-#define MII_ANAR_PAUSE_ASYM (1 << 11) /* Try for asymetric pause */
+#define MII_ANAR_PAUSE_ASYM (1 << 11) /* Try for asymmetric pause */
#define MII_ANAR_PAUSE (1 << 10) /* Try for pause */
#define MII_ANAR_TXFD (1 << 8)
#define MII_ANAR_TX (1 << 7)
@@ -81,20 +82,31 @@
#define MII_ANLPAR_ACK (1 << 14)
#define MII_ANLPAR_PAUSEASY (1 << 11) /* can pause asymmetrically */
#define MII_ANLPAR_PAUSE (1 << 10) /* can pause */
+#define MII_ANLPAR_T4 (1 << 9)
#define MII_ANLPAR_TXFD (1 << 8)
#define MII_ANLPAR_TX (1 << 7)
#define MII_ANLPAR_10FD (1 << 6)
#define MII_ANLPAR_10 (1 << 5)
#define MII_ANLPAR_CSMACD (1 << 0)
-#define MII_ANER_NWAY (1 << 0) /* Can do N-way auto-nego */
+#define MII_ANER_NP (1 << 2) /* Next Page Able */
+#define MII_ANER_NWAY (1 << 0) /* Can do N-way auto-nego */
+#define MII_ANNP_MP (1 << 13) /* Message Page */
+
+#define MII_CTRL1000_MASTER (1 << 11) /* MASTER-SLAVE Manual Configuration Value */
+#define MII_CTRL1000_PORT (1 << 10) /* T2_Repeater/DTE bit */
#define MII_CTRL1000_FULL (1 << 9) /* 1000BASE-T full duplex */
#define MII_CTRL1000_HALF (1 << 8) /* 1000BASE-T half duplex */
+#define MII_STAT1000_LOK (1 << 13) /* Local Receiver Status */
+#define MII_STAT1000_ROK (1 << 12) /* Remote Receiver Status */
#define MII_STAT1000_FULL (1 << 11) /* 1000BASE-T full duplex */
#define MII_STAT1000_HALF (1 << 10) /* 1000BASE-T half duplex */
+#define MII_EXTSTAT_1000T_FD (1 << 13) /* 1000BASE-T Full Duplex */
+#define MII_EXTSTAT_1000T_HD (1 << 12) /* 1000BASE-T Half Duplex */
+
/* List of vendor identifiers */
/* RealTek 8201 */
#define RTL8201CP_PHYID1 0x0000
diff --git a/include/hw/net/msf2-emac.h b/include/hw/net/msf2-emac.h
new file mode 100644
index 0000000000..846ba6e6dc
--- /dev/null
+++ b/include/hw/net/msf2-emac.h
@@ -0,0 +1,53 @@
+/*
+ * QEMU model of the Smartfusion2 Ethernet MAC.
+ *
+ * Copyright (c) 2020 Subbaraya Sundeep <sundeep.lkml@gmail.com>.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sysbus.h"
+#include "exec/memory.h"
+#include "net/net.h"
+#include "net/eth.h"
+#include "qom/object.h"
+
+#define TYPE_MSS_EMAC "msf2-emac"
+OBJECT_DECLARE_SIMPLE_TYPE(MSF2EmacState, MSS_EMAC)
+
+#define R_MAX (0x1a0 / 4)
+#define PHY_MAX_REGS 32
+
+struct MSF2EmacState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ MemoryRegion *dma_mr;
+ AddressSpace dma_as;
+
+ qemu_irq irq;
+ NICState *nic;
+ NICConf conf;
+
+ uint8_t mac_addr[ETH_ALEN];
+ uint32_t rx_desc;
+ uint16_t phy_regs[PHY_MAX_REGS];
+
+ uint32_t regs[R_MAX];
+};
diff --git a/include/hw/net/mv88w8618_eth.h b/include/hw/net/mv88w8618_eth.h
new file mode 100644
index 0000000000..41074940ec
--- /dev/null
+++ b/include/hw/net/mv88w8618_eth.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell MV88W8618 / Freecom MusicPal emulation.
+ *
+ * Copyright (c) 2008-2021 QEMU contributors
+ */
+
+#ifndef HW_NET_MV88W8618_ETH_H
+#define HW_NET_MV88W8618_ETH_H
+
+#define TYPE_MV88W8618_ETH "mv88w8618_eth"
+
+#endif
diff --git a/include/hw/net/ne2000-isa.h b/include/hw/net/ne2000-isa.h
index ff2bed9c95..73bae10ad1 100644
--- a/include/hw/net/ne2000-isa.h
+++ b/include/hw/net/ne2000-isa.h
@@ -6,10 +6,14 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
-#include "hw/hw.h"
-#include "hw/qdev.h"
+
+#ifndef HW_NET_NE2000_ISA_H
+#define HW_NET_NE2000_ISA_H
+
#include "hw/isa/isa.h"
+#include "hw/qdev-properties.h"
#include "net/net.h"
+#include "qapi/error.h"
#define TYPE_ISA_NE2000 "ne2k_isa"
@@ -18,16 +22,16 @@ static inline ISADevice *isa_ne2000_init(ISABus *bus, int base, int irq,
{
ISADevice *d;
- qemu_check_nic_model(nd, "ne2k_isa");
-
- d = isa_try_create(bus, TYPE_ISA_NE2000);
+ d = isa_try_new(TYPE_ISA_NE2000);
if (d) {
DeviceState *dev = DEVICE(d);
qdev_prop_set_uint32(dev, "iobase", base);
qdev_prop_set_uint32(dev, "irq", irq);
qdev_set_nic_properties(dev, nd);
- qdev_init_nofail(dev);
+ isa_realize_and_unref(d, bus, &error_fatal);
}
return d;
}
+
+#endif
diff --git a/include/hw/net/npcm7xx_emc.h b/include/hw/net/npcm7xx_emc.h
new file mode 100644
index 0000000000..b789007160
--- /dev/null
+++ b/include/hw/net/npcm7xx_emc.h
@@ -0,0 +1,283 @@
+/*
+ * Nuvoton NPCM7xx EMC Module
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef NPCM7XX_EMC_H
+#define NPCM7XX_EMC_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "net/net.h"
+
+/* 32-bit register indices. */
+enum NPCM7xxPWMRegister {
+ /* Control registers. */
+ REG_CAMCMR,
+ REG_CAMEN,
+
+ /* There are 16 CAMn[ML] registers. */
+ REG_CAMM_BASE,
+ REG_CAML_BASE,
+ REG_CAMML_LAST = 0x21,
+
+ REG_TXDLSA = 0x22,
+ REG_RXDLSA,
+ REG_MCMDR,
+ REG_MIID,
+ REG_MIIDA,
+ REG_FFTCR,
+ REG_TSDR,
+ REG_RSDR,
+ REG_DMARFC,
+ REG_MIEN,
+
+ /* Status registers. */
+ REG_MISTA,
+ REG_MGSTA,
+ REG_MPCNT,
+ REG_MRPC,
+ REG_MRPCC,
+ REG_MREPC,
+ REG_DMARFS,
+ REG_CTXDSA,
+ REG_CTXBSA,
+ REG_CRXDSA,
+ REG_CRXBSA,
+
+ NPCM7XX_NUM_EMC_REGS,
+};
+
+/* REG_CAMCMR fields */
+/* Enable CAM Compare */
+#define REG_CAMCMR_ECMP (1 << 4)
+/* Complement CAM Compare */
+#define REG_CAMCMR_CCAM (1 << 3)
+/* Accept Broadcast Packet */
+#define REG_CAMCMR_ABP (1 << 2)
+/* Accept Multicast Packet */
+#define REG_CAMCMR_AMP (1 << 1)
+/* Accept Unicast Packet */
+#define REG_CAMCMR_AUP (1 << 0)
+
+/* REG_MCMDR fields */
+/* Software Reset */
+#define REG_MCMDR_SWR (1 << 24)
+/* Internal Loopback Select */
+#define REG_MCMDR_LBK (1 << 21)
+/* Operation Mode Select */
+#define REG_MCMDR_OPMOD (1 << 20)
+/* Enable MDC Clock Generation */
+#define REG_MCMDR_ENMDC (1 << 19)
+/* Full-Duplex Mode Select */
+#define REG_MCMDR_FDUP (1 << 18)
+/* Enable SQE Checking */
+#define REG_MCMDR_ENSEQ (1 << 17)
+/* Send PAUSE Frame */
+#define REG_MCMDR_SDPZ (1 << 16)
+/* No Defer */
+#define REG_MCMDR_NDEF (1 << 9)
+/* Frame Transmission On */
+#define REG_MCMDR_TXON (1 << 8)
+/* Strip CRC Checksum */
+#define REG_MCMDR_SPCRC (1 << 5)
+/* Accept CRC Error Packet */
+#define REG_MCMDR_AEP (1 << 4)
+/* Accept Control Packet */
+#define REG_MCMDR_ACP (1 << 3)
+/* Accept Runt Packet */
+#define REG_MCMDR_ARP (1 << 2)
+/* Accept Long Packet */
+#define REG_MCMDR_ALP (1 << 1)
+/* Frame Reception On */
+#define REG_MCMDR_RXON (1 << 0)
+
+/* REG_MIEN fields */
+/* Enable Transmit Descriptor Unavailable Interrupt */
+#define REG_MIEN_ENTDU (1 << 23)
+/* Enable Transmit Completion Interrupt */
+#define REG_MIEN_ENTXCP (1 << 18)
+/* Enable Transmit Interrupt */
+#define REG_MIEN_ENTXINTR (1 << 16)
+/* Enable Receive Descriptor Unavailable Interrupt */
+#define REG_MIEN_ENRDU (1 << 10)
+/* Enable Receive Good Interrupt */
+#define REG_MIEN_ENRXGD (1 << 4)
+/* Enable Receive Interrupt */
+#define REG_MIEN_ENRXINTR (1 << 0)
+
+/* REG_MISTA fields */
+/* TODO: Add error fields and support simulated errors? */
+/* Transmit Bus Error Interrupt */
+#define REG_MISTA_TXBERR (1 << 24)
+/* Transmit Descriptor Unavailable Interrupt */
+#define REG_MISTA_TDU (1 << 23)
+/* Transmit Completion Interrupt */
+#define REG_MISTA_TXCP (1 << 18)
+/* Transmit Interrupt */
+#define REG_MISTA_TXINTR (1 << 16)
+/* Receive Bus Error Interrupt */
+#define REG_MISTA_RXBERR (1 << 11)
+/* Receive Descriptor Unavailable Interrupt */
+#define REG_MISTA_RDU (1 << 10)
+/* DMA Early Notification Interrupt */
+#define REG_MISTA_DENI (1 << 9)
+/* Maximum Frame Length Interrupt */
+#define REG_MISTA_DFOI (1 << 8)
+/* Receive Good Interrupt */
+#define REG_MISTA_RXGD (1 << 4)
+/* Packet Too Long Interrupt */
+#define REG_MISTA_PTLE (1 << 3)
+/* Receive Interrupt */
+#define REG_MISTA_RXINTR (1 << 0)
+
+/* REG_MGSTA fields */
+/* Transmission Halted */
+#define REG_MGSTA_TXHA (1 << 11)
+/* Receive Halted */
+#define REG_MGSTA_RXHA (1 << 11)
+
+/* REG_DMARFC fields */
+/* Maximum Receive Frame Length */
+#define REG_DMARFC_RXMS(word) extract32((word), 0, 16)
+
+/* REG MIIDA fields */
+/* Busy Bit */
+#define REG_MIIDA_BUSY (1 << 17)
+
+/* Transmit and receive descriptors */
+typedef struct NPCM7xxEMCTxDesc NPCM7xxEMCTxDesc;
+typedef struct NPCM7xxEMCRxDesc NPCM7xxEMCRxDesc;
+
+struct NPCM7xxEMCTxDesc {
+ uint32_t flags;
+ uint32_t txbsa;
+ uint32_t status_and_length;
+ uint32_t ntxdsa;
+};
+
+struct NPCM7xxEMCRxDesc {
+ uint32_t status_and_length;
+ uint32_t rxbsa;
+ uint32_t reserved;
+ uint32_t nrxdsa;
+};
+
+/* NPCM7xxEMCTxDesc.flags values */
+/* Owner: 0 = cpu, 1 = emc */
+#define TX_DESC_FLAG_OWNER_MASK (1 << 31)
+/* Transmit interrupt enable */
+#define TX_DESC_FLAG_INTEN (1 << 2)
+/* CRC append */
+#define TX_DESC_FLAG_CRCAPP (1 << 1)
+/* Padding enable */
+#define TX_DESC_FLAG_PADEN (1 << 0)
+
+/* NPCM7xxEMCTxDesc.status_and_length values */
+/* Collision count */
+#define TX_DESC_STATUS_CCNT_SHIFT 28
+#define TX_DESC_STATUS_CCNT_BITSIZE 4
+/* SQE error */
+#define TX_DESC_STATUS_SQE (1 << 26)
+/* Transmission paused */
+#define TX_DESC_STATUS_PAU (1 << 25)
+/* P transmission halted */
+#define TX_DESC_STATUS_TXHA (1 << 24)
+/* Late collision */
+#define TX_DESC_STATUS_LC (1 << 23)
+/* Transmission abort */
+#define TX_DESC_STATUS_TXABT (1 << 22)
+/* No carrier sense */
+#define TX_DESC_STATUS_NCS (1 << 21)
+/* Defer exceed */
+#define TX_DESC_STATUS_EXDEF (1 << 20)
+/* Transmission complete */
+#define TX_DESC_STATUS_TXCP (1 << 19)
+/* Transmission deferred */
+#define TX_DESC_STATUS_DEF (1 << 17)
+/* Transmit interrupt */
+#define TX_DESC_STATUS_TXINTR (1 << 16)
+
+#define TX_DESC_PKT_LEN(word) extract32((word), 0, 16)
+
+/* Transmit buffer start address */
+#define TX_DESC_TXBSA(word) ((uint32_t) (word) & ~3u)
+
+/* Next transmit descriptor start address */
+#define TX_DESC_NTXDSA(word) ((uint32_t) (word) & ~3u)
+
+/* NPCM7xxEMCRxDesc.status_and_length values */
+/* Owner: 0b00 = cpu, 0b01 = undefined, 0b10 = emc, 0b11 = undefined */
+#define RX_DESC_STATUS_OWNER_SHIFT 30
+#define RX_DESC_STATUS_OWNER_BITSIZE 2
+#define RX_DESC_STATUS_OWNER_MASK (3 << RX_DESC_STATUS_OWNER_SHIFT)
+/* Runt packet */
+#define RX_DESC_STATUS_RP (1 << 22)
+/* Alignment error */
+#define RX_DESC_STATUS_ALIE (1 << 21)
+/* Frame reception complete */
+#define RX_DESC_STATUS_RXGD (1 << 20)
+/* Packet too long */
+#define RX_DESC_STATUS_PTLE (1 << 19)
+/* CRC error */
+#define RX_DESC_STATUS_CRCE (1 << 17)
+/* Receive interrupt */
+#define RX_DESC_STATUS_RXINTR (1 << 16)
+
+#define RX_DESC_PKT_LEN(word) extract32((word), 0, 16)
+
+/* Receive buffer start address */
+#define RX_DESC_RXBSA(word) ((uint32_t) (word) & ~3u)
+
+/* Next receive descriptor start address */
+#define RX_DESC_NRXDSA(word) ((uint32_t) (word) & ~3u)
+
+/* Minimum packet length, when TX_DESC_FLAG_PADEN is set. */
+#define MIN_PACKET_LENGTH 64
+
+struct NPCM7xxEMCState {
+ /*< private >*/
+ SysBusDevice parent;
+ /*< public >*/
+
+ MemoryRegion iomem;
+
+ qemu_irq tx_irq;
+ qemu_irq rx_irq;
+
+ NICState *nic;
+ NICConf conf;
+
+ /* 0 or 1, for log messages */
+ uint8_t emc_num;
+
+ uint32_t regs[NPCM7XX_NUM_EMC_REGS];
+
+ /*
+ * tx is active. Set to true by TSDR and then switches off when out of
+ * descriptors. If the TXON bit in REG_MCMDR is off then this is off.
+ */
+ bool tx_active;
+
+ /*
+ * rx is active. Set to true by RSDR and then switches off when out of
+ * descriptors. If the RXON bit in REG_MCMDR is off then this is off.
+ */
+ bool rx_active;
+};
+
+#define TYPE_NPCM7XX_EMC "npcm7xx-emc"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxEMCState, NPCM7XX_EMC)
+
+#endif /* NPCM7XX_EMC_H */
diff --git a/include/hw/net/npcm_gmac.h b/include/hw/net/npcm_gmac.h
new file mode 100644
index 0000000000..6340ffe92c
--- /dev/null
+++ b/include/hw/net/npcm_gmac.h
@@ -0,0 +1,343 @@
+/*
+ * Nuvoton NPCM7xx/8xx GMAC Module
+ *
+ * Copyright 2024 Google LLC
+ * Authors:
+ * Hao Wu <wuhaotsh@google.com>
+ * Nabih Estefan <nabihestefan@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef NPCM_GMAC_H
+#define NPCM_GMAC_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "net/net.h"
+
+#define NPCM_GMAC_NR_REGS (0x1060 / sizeof(uint32_t))
+
+#define NPCM_GMAC_MAX_PHYS 32
+#define NPCM_GMAC_MAX_PHY_REGS 32
+
+struct NPCMGMACRxDesc {
+ uint32_t rdes0;
+ uint32_t rdes1;
+ uint32_t rdes2;
+ uint32_t rdes3;
+};
+
+/* NPCMGMACRxDesc.flags values */
+/* RDES2 and RDES3 are buffer addresses */
+/* Owner: 0 = software, 1 = dma */
+#define RX_DESC_RDES0_OWN BIT(31)
+/* Destination Address Filter Fail */
+#define RX_DESC_RDES0_DEST_ADDR_FILT_FAIL BIT(30)
+/* Frame length */
+#define RX_DESC_RDES0_FRAME_LEN_MASK(word) extract32(word, 16, 14)
+/* Frame length Shift*/
+#define RX_DESC_RDES0_FRAME_LEN_SHIFT 16
+/* Error Summary */
+#define RX_DESC_RDES0_ERR_SUMM_MASK BIT(15)
+/* Descriptor Error */
+#define RX_DESC_RDES0_DESC_ERR_MASK BIT(14)
+/* Source Address Filter Fail */
+#define RX_DESC_RDES0_SRC_ADDR_FILT_FAIL_MASK BIT(13)
+/* Length Error */
+#define RX_DESC_RDES0_LEN_ERR_MASK BIT(12)
+/* Overflow Error */
+#define RX_DESC_RDES0_OVRFLW_ERR_MASK BIT(11)
+/* VLAN Tag */
+#define RX_DESC_RDES0_VLAN_TAG_MASK BIT(10)
+/* First Descriptor */
+#define RX_DESC_RDES0_FIRST_DESC_MASK BIT(9)
+/* Last Descriptor */
+#define RX_DESC_RDES0_LAST_DESC_MASK BIT(8)
+/* IPC Checksum Error/Giant Frame */
+#define RX_DESC_RDES0_IPC_CHKSM_ERR_GNT_FRM_MASK BIT(7)
+/* Late Collision */
+#define RX_DESC_RDES0_LT_COLL_MASK BIT(6)
+/* Frame Type */
+#define RX_DESC_RDES0_FRM_TYPE_MASK BIT(5)
+/* Receive Watchdog Timeout */
+#define RX_DESC_RDES0_REC_WTCHDG_TMT_MASK BIT(4)
+/* Receive Error */
+#define RX_DESC_RDES0_RCV_ERR_MASK BIT(3)
+/* Dribble Bit Error */
+#define RX_DESC_RDES0_DRBL_BIT_ERR_MASK BIT(2)
+/* Cyclcic Redundancy Check Error */
+#define RX_DESC_RDES0_CRC_ERR_MASK BIT(1)
+/* Rx MAC Address/Payload Checksum Error */
+#define RC_DESC_RDES0_RCE_MASK BIT(0)
+
+/* Disable Interrupt on Completion */
+#define RX_DESC_RDES1_DIS_INTR_COMP_MASK BIT(31)
+/* Receive end of ring */
+#define RX_DESC_RDES1_RC_END_RING_MASK BIT(25)
+/* Second Address Chained */
+#define RX_DESC_RDES1_SEC_ADDR_CHND_MASK BIT(24)
+/* Receive Buffer 2 Size */
+#define RX_DESC_RDES1_BFFR2_SZ_SHIFT 11
+#define RX_DESC_RDES1_BFFR2_SZ_MASK(word) extract32(word, \
+ RX_DESC_RDES1_BFFR2_SZ_SHIFT, 11)
+/* Receive Buffer 1 Size */
+#define RX_DESC_RDES1_BFFR1_SZ_MASK(word) extract32(word, 0, 11)
+
+
+struct NPCMGMACTxDesc {
+ uint32_t tdes0;
+ uint32_t tdes1;
+ uint32_t tdes2;
+ uint32_t tdes3;
+};
+
+/* NPCMGMACTxDesc.flags values */
+/* TDES2 and TDES3 are buffer addresses */
+/* Owner: 0 = software, 1 = gmac */
+#define TX_DESC_TDES0_OWN BIT(31)
+/* Tx Time Stamp Status */
+#define TX_DESC_TDES0_TTSS_MASK BIT(17)
+/* IP Header Error */
+#define TX_DESC_TDES0_IP_HEAD_ERR_MASK BIT(16)
+/* Error Summary */
+#define TX_DESC_TDES0_ERR_SUMM_MASK BIT(15)
+/* Jabber Timeout */
+#define TX_DESC_TDES0_JBBR_TMT_MASK BIT(14)
+/* Frame Flushed */
+#define TX_DESC_TDES0_FRM_FLSHD_MASK BIT(13)
+/* Payload Checksum Error */
+#define TX_DESC_TDES0_PYLD_CHKSM_ERR_MASK BIT(12)
+/* Loss of Carrier */
+#define TX_DESC_TDES0_LSS_CARR_MASK BIT(11)
+/* No Carrier */
+#define TX_DESC_TDES0_NO_CARR_MASK BIT(10)
+/* Late Collision */
+#define TX_DESC_TDES0_LATE_COLL_MASK BIT(9)
+/* Excessive Collision */
+#define TX_DESC_TDES0_EXCS_COLL_MASK BIT(8)
+/* VLAN Frame */
+#define TX_DESC_TDES0_VLAN_FRM_MASK BIT(7)
+/* Collision Count */
+#define TX_DESC_TDES0_COLL_CNT_MASK(word) extract32(word, 3, 4)
+/* Excessive Deferral */
+#define TX_DESC_TDES0_EXCS_DEF_MASK BIT(2)
+/* Underflow Error */
+#define TX_DESC_TDES0_UNDRFLW_ERR_MASK BIT(1)
+/* Deferred Bit */
+#define TX_DESC_TDES0_DFRD_BIT_MASK BIT(0)
+
+/* Interrupt of Completion */
+#define TX_DESC_TDES1_INTERR_COMP_MASK BIT(31)
+/* Last Segment */
+#define TX_DESC_TDES1_LAST_SEG_MASK BIT(30)
+/* First Segment */
+#define TX_DESC_TDES1_FIRST_SEG_MASK BIT(29)
+/* Checksum Insertion Control */
+#define TX_DESC_TDES1_CHKSM_INS_CTRL_MASK(word) extract32(word, 27, 2)
+/* Disable Cyclic Redundancy Check */
+#define TX_DESC_TDES1_DIS_CDC_MASK BIT(26)
+/* Transmit End of Ring */
+#define TX_DESC_TDES1_TX_END_RING_MASK BIT(25)
+/* Secondary Address Chained */
+#define TX_DESC_TDES1_SEC_ADDR_CHND_MASK BIT(24)
+/* Transmit Buffer 2 Size */
+#define TX_DESC_TDES1_BFFR2_SZ_MASK(word) extract32(word, 11, 11)
+/* Transmit Buffer 1 Size */
+#define TX_DESC_TDES1_BFFR1_SZ_MASK(word) extract32(word, 0, 11)
+
+typedef struct NPCMGMACState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ NICState *nic;
+ NICConf conf;
+
+ uint32_t regs[NPCM_GMAC_NR_REGS];
+ uint16_t phy_regs[NPCM_GMAC_MAX_PHYS][NPCM_GMAC_MAX_PHY_REGS];
+} NPCMGMACState;
+
+#define TYPE_NPCM_GMAC "npcm-gmac"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCMGMACState, NPCM_GMAC)
+
+/* Mask for RO bits in Status */
+#define NPCM_DMA_STATUS_RO_MASK(word) (word & 0xfffe0000)
+/* Mask for RO bits in Status */
+#define NPCM_DMA_STATUS_W1C_MASK(word) (word & 0x1e7ff)
+
+/* Transmit Process State */
+#define NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT 20
+/* Transmit States */
+#define NPCM_DMA_STATUS_TX_STOPPED_STATE \
+ (0b000)
+#define NPCM_DMA_STATUS_TX_RUNNING_FETCHING_STATE \
+ (0b001)
+#define NPCM_DMA_STATUS_TX_RUNNING_WAITING_STATE \
+ (0b010)
+#define NPCM_DMA_STATUS_TX_RUNNING_READ_STATE \
+ (0b011)
+#define NPCM_DMA_STATUS_TX_SUSPENDED_STATE \
+ (0b110)
+#define NPCM_DMA_STATUS_TX_RUNNING_CLOSING_STATE \
+ (0b111)
+/* Transmit Process State */
+#define NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT 17
+/* Receive States */
+#define NPCM_DMA_STATUS_RX_STOPPED_STATE \
+ (0b000)
+#define NPCM_DMA_STATUS_RX_RUNNING_FETCHING_STATE \
+ (0b001)
+#define NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE \
+ (0b011)
+#define NPCM_DMA_STATUS_RX_SUSPENDED_STATE \
+ (0b100)
+#define NPCM_DMA_STATUS_RX_RUNNING_CLOSING_STATE \
+ (0b101)
+#define NPCM_DMA_STATUS_RX_RUNNING_TRANSFERRING_STATE \
+ (0b111)
+
+
+/* Early Receive Interrupt */
+#define NPCM_DMA_STATUS_ERI BIT(14)
+/* Fatal Bus Error Interrupt */
+#define NPCM_DMA_STATUS_FBI BIT(13)
+/* Early transmit Interrupt */
+#define NPCM_DMA_STATUS_ETI BIT(10)
+/* Receive Watchdog Timeout */
+#define NPCM_DMA_STATUS_RWT BIT(9)
+/* Receive Process Stopped */
+#define NPCM_DMA_STATUS_RPS BIT(8)
+/* Receive Buffer Unavailable */
+#define NPCM_DMA_STATUS_RU BIT(7)
+/* Receive Interrupt */
+#define NPCM_DMA_STATUS_RI BIT(6)
+/* Transmit Underflow */
+#define NPCM_DMA_STATUS_UNF BIT(5)
+/* Receive Overflow */
+#define NPCM_DMA_STATUS_OVF BIT(4)
+/* Transmit Jabber Timeout */
+#define NPCM_DMA_STATUS_TJT BIT(3)
+/* Transmit Buffer Unavailable */
+#define NPCM_DMA_STATUS_TU BIT(2)
+/* Transmit Process Stopped */
+#define NPCM_DMA_STATUS_TPS BIT(1)
+/* Transmit Interrupt */
+#define NPCM_DMA_STATUS_TI BIT(0)
+
+/* Normal Interrupt Summary */
+#define NPCM_DMA_STATUS_NIS BIT(16)
+/* Interrupts enabled by NIE */
+#define NPCM_DMA_STATUS_NIS_BITS (NPCM_DMA_STATUS_TI | \
+ NPCM_DMA_STATUS_TU | \
+ NPCM_DMA_STATUS_RI | \
+ NPCM_DMA_STATUS_ERI)
+/* Abnormal Interrupt Summary */
+#define NPCM_DMA_STATUS_AIS BIT(15)
+/* Interrupts enabled by AIE */
+#define NPCM_DMA_STATUS_AIS_BITS (NPCM_DMA_STATUS_TPS | \
+ NPCM_DMA_STATUS_TJT | \
+ NPCM_DMA_STATUS_OVF | \
+ NPCM_DMA_STATUS_UNF | \
+ NPCM_DMA_STATUS_RU | \
+ NPCM_DMA_STATUS_RPS | \
+ NPCM_DMA_STATUS_RWT | \
+ NPCM_DMA_STATUS_ETI | \
+ NPCM_DMA_STATUS_FBI)
+
+/* Early Receive Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_ERE BIT(14)
+/* Fatal Bus Error Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_FBE BIT(13)
+/* Early transmit Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_ETE BIT(10)
+/* Receive Watchdog Timout Enable */
+#define NPCM_DMA_INTR_ENAB_RWE BIT(9)
+/* Receive Process Stopped Enable */
+#define NPCM_DMA_INTR_ENAB_RSE BIT(8)
+/* Receive Buffer Unavailable Enable */
+#define NPCM_DMA_INTR_ENAB_RUE BIT(7)
+/* Receive Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_RIE BIT(6)
+/* Transmit Underflow Enable */
+#define NPCM_DMA_INTR_ENAB_UNE BIT(5)
+/* Receive Overflow Enable */
+#define NPCM_DMA_INTR_ENAB_OVE BIT(4)
+/* Transmit Jabber Timeout Enable */
+#define NPCM_DMA_INTR_ENAB_TJE BIT(3)
+/* Transmit Buffer Unavailable Enable */
+#define NPCM_DMA_INTR_ENAB_TUE BIT(2)
+/* Transmit Process Stopped Enable */
+#define NPCM_DMA_INTR_ENAB_TSE BIT(1)
+/* Transmit Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_TIE BIT(0)
+
+/* Normal Interrupt Summary Enable */
+#define NPCM_DMA_INTR_ENAB_NIE BIT(16)
+/* Interrupts enabled by NIE Enable */
+#define NPCM_DMA_INTR_ENAB_NIE_BITS (NPCM_DMA_INTR_ENAB_TIE | \
+ NPCM_DMA_INTR_ENAB_TUE | \
+ NPCM_DMA_INTR_ENAB_RIE | \
+ NPCM_DMA_INTR_ENAB_ERE)
+/* Abnormal Interrupt Summary Enable */
+#define NPCM_DMA_INTR_ENAB_AIE BIT(15)
+/* Interrupts enabled by AIE Enable */
+#define NPCM_DMA_INTR_ENAB_AIE_BITS (NPCM_DMA_INTR_ENAB_TSE | \
+ NPCM_DMA_INTR_ENAB_TJE | \
+ NPCM_DMA_INTR_ENAB_OVE | \
+ NPCM_DMA_INTR_ENAB_UNE | \
+ NPCM_DMA_INTR_ENAB_RUE | \
+ NPCM_DMA_INTR_ENAB_RSE | \
+ NPCM_DMA_INTR_ENAB_RWE | \
+ NPCM_DMA_INTR_ENAB_ETE | \
+ NPCM_DMA_INTR_ENAB_FBE)
+
+/* Flushing Disabled */
+#define NPCM_DMA_CONTROL_FLUSH_MASK BIT(24)
+/* Start/stop Transmit */
+#define NPCM_DMA_CONTROL_START_STOP_TX BIT(13)
+/* Start/stop Receive */
+#define NPCM_DMA_CONTROL_START_STOP_RX BIT(1)
+/* Next receive descriptor start address */
+#define NPCM_DMA_HOST_RX_DESC_MASK(word) ((uint32_t) (word) & ~3u)
+/* Next transmit descriptor start address */
+#define NPCM_DMA_HOST_TX_DESC_MASK(word) ((uint32_t) (word) & ~3u)
+
+/* Receive enable */
+#define NPCM_GMAC_MAC_CONFIG_RX_EN BIT(2)
+/* Transmit enable */
+#define NPCM_GMAC_MAC_CONFIG_TX_EN BIT(3)
+
+/* Frame Receive All */
+#define NPCM_GMAC_FRAME_FILTER_REC_ALL_MASK BIT(31)
+/* Frame HPF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_HPF_MASK BIT(10)
+/* Frame SAF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_SAF_MASK BIT(9)
+/* Frame SAIF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_SAIF_MASK BIT(8)
+/* Frame PCF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_PCF_MASK BIT(word) extract32((word), 6, 2)
+/* Frame DBF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_DBF_MASK BIT(5)
+/* Frame PM Filter*/
+#define NPCM_GMAC_FRAME_FILTER_PM_MASK BIT(4)
+/* Frame DAIF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_DAIF_MASK BIT(3)
+/* Frame HMC Filter*/
+#define NPCM_GMAC_FRAME_FILTER_HMC_MASK BIT(2)
+/* Frame HUC Filter*/
+#define NPCM_GMAC_FRAME_FILTER_HUC_MASK BIT(1)
+/* Frame PR Filter*/
+#define NPCM_GMAC_FRAME_FILTER_PR_MASK BIT(0)
+
+#endif /* NPCM_GMAC_H */
diff --git a/include/hw/net/smc91c111.h b/include/hw/net/smc91c111.h
new file mode 100644
index 0000000000..dba32a233f
--- /dev/null
+++ b/include/hw/net/smc91c111.h
@@ -0,0 +1,18 @@
+/*
+ * SMSC 91C111 Ethernet interface emulation
+ *
+ * Copyright (c) 2005 CodeSourcery, LLC.
+ * Written by Paul Brook
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_NET_SMC91C111_H
+#define HW_NET_SMC91C111_H
+
+#include "net/net.h"
+
+void smc91c111_init(uint32_t, qemu_irq);
+
+#endif
diff --git a/include/hw/net/xlnx-versal-canfd.h b/include/hw/net/xlnx-versal-canfd.h
new file mode 100644
index 0000000000..ad3104dd13
--- /dev/null
+++ b/include/hw/net/xlnx-versal-canfd.h
@@ -0,0 +1,87 @@
+/*
+ * QEMU model of the Xilinx Versal CANFD Controller.
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ *
+ * Written-by: Vikram Garhwal<vikram.garhwal@amd.com>
+ * Based on QEMU CANFD Device emulation implemented by Jin Yang, Deniz Eren and
+ * Pavel Pisa.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_CANFD_XILINX_H
+#define HW_CANFD_XILINX_H
+
+#include "hw/register.h"
+#include "hw/ptimer.h"
+#include "net/can_emu.h"
+#include "hw/qdev-clock.h"
+
+#define TYPE_XILINX_CANFD "xlnx.versal-canfd"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCANFDState, XILINX_CANFD)
+
+#define NUM_REGS_PER_MSG_SPACE 18 /* 1 ID + 1 DLC + 16 Data(DW0 - DW15) regs. */
+#define MAX_NUM_RX 64
+#define OFFSET_RX1_DW15 (0x4144 / 4)
+#define CANFD_TIMER_MAX 0xFFFFUL
+#define CANFD_DEFAULT_CLOCK (25 * 1000 * 1000)
+
+#define XLNX_VERSAL_CANFD_R_MAX (OFFSET_RX1_DW15 + \
+ ((MAX_NUM_RX - 1) * NUM_REGS_PER_MSG_SPACE) + 1)
+
+typedef struct XlnxVersalCANFDState {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+
+ qemu_irq irq_canfd_int;
+ qemu_irq irq_addr_err;
+
+ RegisterInfo reg_info[XLNX_VERSAL_CANFD_R_MAX];
+ RegisterAccessInfo *tx_regs;
+ RegisterAccessInfo *rx0_regs;
+ RegisterAccessInfo *rx1_regs;
+ RegisterAccessInfo *af_regs;
+ RegisterAccessInfo *txe_regs;
+ RegisterAccessInfo *rx_mailbox_regs;
+ RegisterAccessInfo *af_mask_regs_mailbox;
+
+ uint32_t regs[XLNX_VERSAL_CANFD_R_MAX];
+
+ ptimer_state *canfd_timer;
+
+ CanBusClientState bus_client;
+ CanBusState *canfdbus;
+
+ struct {
+ uint8_t rx0_fifo;
+ uint8_t rx1_fifo;
+ uint8_t tx_fifo;
+ bool enable_rx_fifo1;
+ uint32_t ext_clk_freq;
+ } cfg;
+
+} XlnxVersalCANFDState;
+
+typedef struct tx_ready_reg_info {
+ uint32_t can_id;
+ uint32_t reg_num;
+} tx_ready_reg_info;
+
+#endif
diff --git a/include/hw/net/xlnx-zynqmp-can.h b/include/hw/net/xlnx-zynqmp-can.h
new file mode 100644
index 0000000000..fd2aa77760
--- /dev/null
+++ b/include/hw/net/xlnx-zynqmp-can.h
@@ -0,0 +1,79 @@
+/*
+ * QEMU model of the Xilinx ZynqMP CAN controller.
+ *
+ * Copyright (c) 2020 Xilinx Inc.
+ *
+ * Written-by: Vikram Garhwal<fnu.vikram@xilinx.com>
+ *
+ * Based on QEMU CAN Device emulation implemented by Jin Yang, Deniz Eren and
+ * Pavel Pisa.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_ZYNQMP_CAN_H
+#define XLNX_ZYNQMP_CAN_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "net/can_emu.h"
+#include "net/can_host.h"
+#include "qemu/fifo32.h"
+#include "hw/ptimer.h"
+#include "hw/qdev-clock.h"
+
+#define TYPE_XLNX_ZYNQMP_CAN "xlnx.zynqmp-can"
+
+#define XLNX_ZYNQMP_CAN(obj) \
+ OBJECT_CHECK(XlnxZynqMPCANState, (obj), TYPE_XLNX_ZYNQMP_CAN)
+
+#define MAX_CAN_CTRLS 2
+#define XLNX_ZYNQMP_CAN_R_MAX (0x84 / 4)
+#define MAILBOX_CAPACITY 64
+#define CAN_TIMER_MAX 0XFFFFUL
+#define CAN_DEFAULT_CLOCK (24 * 1000 * 1000)
+
+/* Each CAN_FRAME will have 4 * 32bit size. */
+#define CAN_FRAME_SIZE 4
+#define RXFIFO_SIZE (MAILBOX_CAPACITY * CAN_FRAME_SIZE)
+
+typedef struct XlnxZynqMPCANState {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+
+ qemu_irq irq;
+
+ CanBusClientState bus_client;
+ CanBusState *canbus;
+
+ struct {
+ uint32_t ext_clk_freq;
+ } cfg;
+
+ RegisterInfo reg_info[XLNX_ZYNQMP_CAN_R_MAX];
+ uint32_t regs[XLNX_ZYNQMP_CAN_R_MAX];
+
+ Fifo32 rx_fifo;
+ Fifo32 tx_fifo;
+ Fifo32 txhpb_fifo;
+
+ ptimer_state *can_timer;
+} XlnxZynqMPCANState;
+
+#endif
diff --git a/include/hw/nmi.h b/include/hw/nmi.h
index ad857f3832..fff41bebc6 100644
--- a/include/hw/nmi.h
+++ b/include/hw/nmi.h
@@ -22,25 +22,23 @@
#ifndef NMI_H
#define NMI_H
-#include "qemu-common.h"
#include "qom/object.h"
#define TYPE_NMI "nmi"
-#define NMI_CLASS(klass) \
- OBJECT_CLASS_CHECK(NMIClass, (klass), TYPE_NMI)
-#define NMI_GET_CLASS(obj) \
- OBJECT_GET_CLASS(NMIClass, (obj), TYPE_NMI)
+typedef struct NMIClass NMIClass;
+DECLARE_CLASS_CHECKERS(NMIClass, NMI,
+ TYPE_NMI)
#define NMI(obj) \
- INTERFACE_CHECK(NMI, (obj), TYPE_NMI)
+ INTERFACE_CHECK(NMIState, (obj), TYPE_NMI)
typedef struct NMIState NMIState;
-typedef struct NMIClass {
+struct NMIClass {
InterfaceClass parent_class;
void (*nmi_monitor_handler)(NMIState *n, int cpu_index, Error **errp);
-} NMIClass;
+};
void nmi_monitor_handle(int cpu_index, Error **errp);
diff --git a/include/hw/nubus/mac-nubus-bridge.h b/include/hw/nubus/mac-nubus-bridge.h
new file mode 100644
index 0000000000..be4dd83530
--- /dev/null
+++ b/include/hw/nubus/mac-nubus-bridge.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013-2018 Laurent Vivier <laurent@vivier.eu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HW_NUBUS_MAC_NUBUS_BRIDGE_H
+#define HW_NUBUS_MAC_NUBUS_BRIDGE_H
+
+#include "hw/nubus/nubus.h"
+#include "qom/object.h"
+
+#define MAC_NUBUS_FIRST_SLOT 0x9
+#define MAC_NUBUS_LAST_SLOT 0xe
+#define MAC_NUBUS_SLOT_NB (MAC_NUBUS_LAST_SLOT - MAC_NUBUS_FIRST_SLOT + 1)
+
+#define TYPE_MAC_NUBUS_BRIDGE "mac-nubus-bridge"
+OBJECT_DECLARE_SIMPLE_TYPE(MacNubusBridge, MAC_NUBUS_BRIDGE)
+
+struct MacNubusBridge {
+ NubusBridge parent_obj;
+
+ MemoryRegion super_slot_alias;
+ MemoryRegion slot_alias;
+};
+
+#endif
diff --git a/include/hw/nubus/nubus-virtio-mmio.h b/include/hw/nubus/nubus-virtio-mmio.h
new file mode 100644
index 0000000000..de497b7f76
--- /dev/null
+++ b/include/hw/nubus/nubus-virtio-mmio.h
@@ -0,0 +1,36 @@
+/*
+ * QEMU Macintosh Nubus Virtio MMIO card
+ *
+ * Copyright (c) 2023 Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_NUBUS_VIRTIO_MMIO_H
+#define HW_NUBUS_VIRTIO_MMIO_H
+
+#include "hw/nubus/nubus.h"
+#include "qom/object.h"
+#include "hw/intc/goldfish_pic.h"
+#include "hw/virtio/virtio-mmio.h"
+
+#define TYPE_NUBUS_VIRTIO_MMIO "nubus-virtio-mmio"
+OBJECT_DECLARE_TYPE(NubusVirtioMMIO, NubusVirtioMMIODeviceClass,
+ NUBUS_VIRTIO_MMIO)
+
+struct NubusVirtioMMIODeviceClass {
+ DeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+};
+
+#define NUBUS_VIRTIO_MMIO_NUM_DEVICES 32
+
+struct NubusVirtioMMIO {
+ NubusDevice parent_obj;
+
+ GoldfishPICState pic;
+ VirtIOMMIOProxy virtio_mmio[NUBUS_VIRTIO_MMIO_NUM_DEVICES];
+};
+
+#endif
diff --git a/include/hw/nubus/nubus.h b/include/hw/nubus/nubus.h
new file mode 100644
index 0000000000..fee79b71d1
--- /dev/null
+++ b/include/hw/nubus/nubus.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2013-2018 Laurent Vivier <laurent@vivier.eu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HW_NUBUS_NUBUS_H
+#define HW_NUBUS_NUBUS_H
+
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "exec/address-spaces.h"
+#include "qom/object.h"
+#include "qemu/units.h"
+
+#define NUBUS_SUPER_SLOT_SIZE 0x10000000U
+#define NUBUS_SUPER_SLOT_NB 0xe
+
+#define NUBUS_SLOT_BASE (NUBUS_SUPER_SLOT_SIZE * \
+ (NUBUS_SUPER_SLOT_NB + 1))
+
+#define NUBUS_SLOT_SIZE 0x01000000
+#define NUBUS_FIRST_SLOT 0x0
+#define NUBUS_LAST_SLOT 0xf
+#define NUBUS_SLOT_NB (NUBUS_LAST_SLOT - NUBUS_FIRST_SLOT + 1)
+
+#define NUBUS_IRQS 16
+
+#define TYPE_NUBUS_DEVICE "nubus-device"
+OBJECT_DECLARE_SIMPLE_TYPE(NubusDevice, NUBUS_DEVICE)
+
+#define TYPE_NUBUS_BUS "nubus-bus"
+OBJECT_DECLARE_SIMPLE_TYPE(NubusBus, NUBUS_BUS)
+
+#define TYPE_NUBUS_BRIDGE "nubus-bridge"
+OBJECT_DECLARE_SIMPLE_TYPE(NubusBridge, NUBUS_BRIDGE);
+
+struct NubusBus {
+ BusState qbus;
+
+ AddressSpace nubus_as;
+ MemoryRegion nubus_mr;
+
+ MemoryRegion super_slot_io;
+ MemoryRegion slot_io;
+
+ uint16_t slot_available_mask;
+
+ qemu_irq irqs[NUBUS_IRQS];
+};
+
+#define NUBUS_DECL_ROM_MAX_SIZE (1 * MiB)
+
+struct NubusDevice {
+ DeviceState qdev;
+
+ int32_t slot;
+ MemoryRegion super_slot_mem;
+ MemoryRegion slot_mem;
+
+ char *romfile;
+ MemoryRegion decl_rom;
+};
+
+void nubus_set_irq(NubusDevice *nd, int level);
+
+struct NubusBridge {
+ SysBusDevice parent_obj;
+
+ NubusBus bus;
+};
+
+#endif
diff --git a/include/hw/nvram/chrp_nvram.h b/include/hw/nvram/chrp_nvram.h
index b4f5b2b104..4a0f5c21b8 100644
--- a/include/hw/nvram/chrp_nvram.h
+++ b/include/hw/nvram/chrp_nvram.h
@@ -18,6 +18,8 @@
#ifndef CHRP_NVRAM_H
#define CHRP_NVRAM_H
+#include "qemu/bswap.h"
+
/* OpenBIOS NVRAM partition */
typedef struct {
uint8_t signature;
@@ -48,7 +50,8 @@ chrp_nvram_finish_partition(ChrpNvramPartHdr *header, uint32_t size)
header->checksum = sum & 0xff;
}
-int chrp_nvram_create_system_partition(uint8_t *data, int min_len);
+/* chrp_nvram_create_system_partition() failure is fatal */
+int chrp_nvram_create_system_partition(uint8_t *data, int min_len, int max_len);
int chrp_nvram_create_free_partition(uint8_t *data, int len);
#endif
diff --git a/include/hw/nvram/eeprom_at24c.h b/include/hw/nvram/eeprom_at24c.h
new file mode 100644
index 0000000000..acb9857b2a
--- /dev/null
+++ b/include/hw/nvram/eeprom_at24c.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ */
+
+#ifndef EEPROM_AT24C_H
+#define EEPROM_AT24C_H
+
+#include "hw/i2c/i2c.h"
+
+/*
+ * Create and realize an AT24C EEPROM device on the heap.
+ * @bus: I2C bus to put it on
+ * @address: I2C address of the EEPROM slave when put on a bus
+ * @rom_size: size of the EEPROM
+ *
+ * Create the device state structure, initialize it, put it on the specified
+ * @bus, and drop the reference to it (the device is realized).
+ */
+I2CSlave *at24c_eeprom_init(I2CBus *bus, uint8_t address, uint32_t rom_size);
+
+
+/*
+ * Create and realize an AT24C EEPROM device on the heap with initial data.
+ * @bus: I2C bus to put it on
+ * @address: I2C address of the EEPROM slave when put on a bus
+ * @rom_size: size of the EEPROM
+ * @init_rom: Array of bytes to initialize EEPROM memory with
+ * @init_rom_size: Size of @init_rom, must be less than or equal to @rom_size
+ *
+ * Create the device state structure, initialize it, put it on the specified
+ * @bus, and drop the reference to it (the device is realized). Copies the data
+ * from @init_rom to the beginning of the EEPROM memory buffer.
+ */
+I2CSlave *at24c_eeprom_init_rom(I2CBus *bus, uint8_t address, uint32_t rom_size,
+ const uint8_t *init_rom, uint32_t init_rom_size);
+
+#endif
diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h
index f5a6895a74..c1f81a5f13 100644
--- a/include/hw/nvram/fw_cfg.h
+++ b/include/hw/nvram/fw_cfg.h
@@ -5,14 +5,39 @@
#include "standard-headers/linux/qemu_fw_cfg.h"
#include "hw/sysbus.h"
#include "sysemu/dma.h"
+#include "qom/object.h"
#define TYPE_FW_CFG "fw_cfg"
#define TYPE_FW_CFG_IO "fw_cfg_io"
#define TYPE_FW_CFG_MEM "fw_cfg_mem"
+#define TYPE_FW_CFG_DATA_GENERATOR_INTERFACE "fw_cfg-data-generator"
-#define FW_CFG(obj) OBJECT_CHECK(FWCfgState, (obj), TYPE_FW_CFG)
-#define FW_CFG_IO(obj) OBJECT_CHECK(FWCfgIoState, (obj), TYPE_FW_CFG_IO)
-#define FW_CFG_MEM(obj) OBJECT_CHECK(FWCfgMemState, (obj), TYPE_FW_CFG_MEM)
+OBJECT_DECLARE_SIMPLE_TYPE(FWCfgState, FW_CFG)
+OBJECT_DECLARE_SIMPLE_TYPE(FWCfgIoState, FW_CFG_IO)
+OBJECT_DECLARE_SIMPLE_TYPE(FWCfgMemState, FW_CFG_MEM)
+
+typedef struct FWCfgDataGeneratorClass FWCfgDataGeneratorClass;
+DECLARE_CLASS_CHECKERS(FWCfgDataGeneratorClass, FW_CFG_DATA_GENERATOR,
+ TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)
+
+struct FWCfgDataGeneratorClass {
+ /*< private >*/
+ InterfaceClass parent_class;
+ /*< public >*/
+
+ /**
+ * get_data:
+ * @obj: the object implementing this interface
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Returns: reference to a byte array containing the data on success,
+ * or NULL on error.
+ *
+ * The caller should release the reference when no longer
+ * required.
+ */
+ GByteArray *(*get_data)(Object *obj, Error **errp);
+};
typedef struct fw_cfg_file FWCfgFile;
@@ -53,6 +78,12 @@ struct FWCfgState {
dma_addr_t dma_addr;
AddressSpace *dma_as;
MemoryRegion dma_iomem;
+
+ /* restore during migration */
+ bool acpi_mr_restore;
+ uint64_t table_mr_size;
+ uint64_t linker_mr_size;
+ uint64_t rsdp_mr_size;
};
struct FWCfgIoState {
@@ -99,6 +130,20 @@ void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len);
void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value);
/**
+ * fw_cfg_modify_string:
+ * @s: fw_cfg device being modified
+ * @key: selector key value for new fw_cfg item
+ * @value: NUL-terminated ascii string
+ *
+ * Replace the fw_cfg item available by selecting the given key. The new
+ * data will consist of a dynamically allocated copy of the provided string,
+ * including its NUL terminator. The data being replaced, assumed to have
+ * been dynamically allocated during an earlier call to either
+ * fw_cfg_add_string() or fw_cfg_modify_string(), is freed before returning.
+ */
+void fw_cfg_modify_string(FWCfgState *s, uint16_t key, const char *value);
+
+/**
* fw_cfg_add_i16:
* @s: fw_cfg device being modified
* @key: selector key value for new fw_cfg item
@@ -137,6 +182,20 @@ void fw_cfg_modify_i16(FWCfgState *s, uint16_t key, uint16_t value);
void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value);
/**
+ * fw_cfg_modify_i32:
+ * @s: fw_cfg device being modified
+ * @key: selector key value for new fw_cfg item
+ * @value: 32-bit integer
+ *
+ * Replace the fw_cfg item available by selecting the given key. The new
+ * data will consist of a dynamically allocated copy of the given 32-bit
+ * value, converted to little-endian representation. The data being replaced,
+ * assumed to have been dynamically allocated during an earlier call to
+ * either fw_cfg_add_i32() or fw_cfg_modify_i32(), is freed before returning.
+ */
+void fw_cfg_modify_i32(FWCfgState *s, uint16_t key, uint32_t value);
+
+/**
* fw_cfg_add_i64:
* @s: fw_cfg device being modified
* @key: selector key value for new fw_cfg item
@@ -149,6 +208,20 @@ void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value);
void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value);
/**
+ * fw_cfg_modify_i64:
+ * @s: fw_cfg device being modified
+ * @key: selector key value for new fw_cfg item
+ * @value: 64-bit integer
+ *
+ * Replace the fw_cfg item available by selecting the given key. The new
+ * data will consist of a dynamically allocated copy of the given 64-bit
+ * value, converted to little-endian representation. The data being replaced,
+ * assumed to have been dynamically allocated during an earlier call to
+ * either fw_cfg_add_i64() or fw_cfg_modify_i64(), is freed before returning.
+ */
+void fw_cfg_modify_i64(FWCfgState *s, uint16_t key, uint64_t value);
+
+/**
* fw_cfg_add_file:
* @s: fw_cfg device being modified
* @filename: name of new fw_cfg file item
@@ -215,6 +288,35 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
void *fw_cfg_modify_file(FWCfgState *s, const char *filename, void *data,
size_t len);
+/**
+ * fw_cfg_add_from_generator:
+ * @s: fw_cfg device being modified
+ * @filename: name of new fw_cfg file item
+ * @gen_id: name of object implementing FW_CFG_DATA_GENERATOR interface
+ * @errp: pointer to a NULL initialized error object
+ *
+ * Add a new NAMED fw_cfg item with the content generated from the
+ * @gen_id object. The data generated by the @gen_id object is copied
+ * into the data structure of the fw_cfg device.
+ * The next available (unused) selector key starting at FW_CFG_FILE_FIRST
+ * will be used; also, a new entry will be added to the file directory
+ * structure residing at key value FW_CFG_FILE_DIR, containing the item name,
+ * data size, and assigned selector key value.
+ *
+ * Returns: %true on success, %false on error.
+ */
+bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename,
+ const char *gen_id, Error **errp);
+
+/**
+ * fw_cfg_add_extra_pci_roots:
+ * @bus: main pci root bus to be scanned from
+ * @s: fw_cfg device being modified
+ *
+ * Add a new fw_cfg item...
+ */
+void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s);
+
FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
AddressSpace *dma_as);
FWCfgState *fw_cfg_init_io(uint32_t iobase);
@@ -226,4 +328,39 @@ FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr,
FWCfgState *fw_cfg_find(void);
bool fw_cfg_dma_enabled(void *opaque);
+/**
+ * fw_cfg_arch_key_name:
+ *
+ * @key: The uint16 selector key.
+ *
+ * The key is architecture-specific (the FW_CFG_ARCH_LOCAL mask is expected
+ * to be set in the key).
+ *
+ * Returns: The stringified architecture-specific name if the selector
+ * refers to a well-known numerically defined item, or NULL on
+ * key lookup failure.
+ */
+const char *fw_cfg_arch_key_name(uint16_t key);
+
+/**
+ * load_image_to_fw_cfg() - Load an image file into an fw_cfg entry identified
+ * by key.
+ * @fw_cfg: The firmware config instance to store the data in.
+ * @size_key: The firmware config key to store the size of the loaded
+ * data under, with fw_cfg_add_i32().
+ * @data_key: The firmware config key to store the loaded data under,
+ * with fw_cfg_add_bytes().
+ * @image_name: The name of the image file to load. If it is NULL, the
+ * function returns without doing anything.
+ * @try_decompress: Whether the image should be decompressed (gunzipped) before
+ * adding it to fw_cfg. If decompression fails, the image is
+ * loaded as-is.
+ *
+ * In case of failure, the function prints an error message to stderr and the
+ * process exits with status 1.
+ */
+void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key,
+ uint16_t data_key, const char *image_name,
+ bool try_decompress);
+
#endif
diff --git a/include/hw/nvram/fw_cfg_acpi.h b/include/hw/nvram/fw_cfg_acpi.h
new file mode 100644
index 0000000000..b39eb0490f
--- /dev/null
+++ b/include/hw/nvram/fw_cfg_acpi.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ACPI support for fw_cfg
+ *
+ */
+
+#ifndef FW_CFG_ACPI_H
+#define FW_CFG_ACPI_H
+
+#include "exec/hwaddr.h"
+
+void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap);
+
+#endif
diff --git a/include/hw/nvram/mac_nvram.h b/include/hw/nvram/mac_nvram.h
new file mode 100644
index 0000000000..0c4dfaeff6
--- /dev/null
+++ b/include/hw/nvram/mac_nvram.h
@@ -0,0 +1,52 @@
+/*
+ * PowerMac NVRAM emulation
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef MAC_NVRAM_H
+#define MAC_NVRAM_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+#define MACIO_NVRAM_SIZE 0x2000
+
+#define TYPE_MACIO_NVRAM "macio-nvram"
+OBJECT_DECLARE_SIMPLE_TYPE(MacIONVRAMState, MACIO_NVRAM)
+
+struct MacIONVRAMState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ uint32_t size;
+ uint32_t it_shift;
+
+ MemoryRegion mem;
+ uint8_t *data;
+ BlockBackend *blk;
+};
+
+void pmac_format_nvram_partition(MacIONVRAMState *nvr, int len);
+
+#endif /* MAC_NVRAM_H */
diff --git a/include/hw/nvram/npcm7xx_otp.h b/include/hw/nvram/npcm7xx_otp.h
new file mode 100644
index 0000000000..ea4b5d0731
--- /dev/null
+++ b/include/hw/nvram/npcm7xx_otp.h
@@ -0,0 +1,79 @@
+/*
+ * Nuvoton NPCM7xx OTP (Fuse Array) Interface
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_OTP_H
+#define NPCM7XX_OTP_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/* Each OTP module holds 8192 bits of one-time programmable storage */
+#define NPCM7XX_OTP_ARRAY_BITS (8192)
+#define NPCM7XX_OTP_ARRAY_BYTES (NPCM7XX_OTP_ARRAY_BITS / BITS_PER_BYTE)
+
+/* Fuse array offsets */
+#define NPCM7XX_FUSE_FUSTRAP (0)
+#define NPCM7XX_FUSE_CP_FUSTRAP (12)
+#define NPCM7XX_FUSE_DAC_CALIB (16)
+#define NPCM7XX_FUSE_ADC_CALIB (24)
+#define NPCM7XX_FUSE_DERIVATIVE (64)
+#define NPCM7XX_FUSE_TEST_SIG (72)
+#define NPCM7XX_FUSE_DIE_LOCATION (74)
+#define NPCM7XX_FUSE_GP1 (80)
+#define NPCM7XX_FUSE_GP2 (128)
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_OTP_NR_REGS (0x18 / sizeof(uint32_t))
+
+/**
+ * struct NPCM7xxOTPState - Device state for one OTP module.
+ * @parent: System bus device.
+ * @mmio: Memory region through which registers are accessed.
+ * @regs: Register contents.
+ * @array: OTP storage array.
+ */
+typedef struct NPCM7xxOTPState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ uint32_t regs[NPCM7XX_OTP_NR_REGS];
+ uint8_t array[NPCM7XX_OTP_ARRAY_BYTES];
+} NPCM7xxOTPState;
+
+#define TYPE_NPCM7XX_OTP "npcm7xx-otp"
+#define NPCM7XX_OTP(obj) OBJECT_CHECK(NPCM7xxOTPState, (obj), TYPE_NPCM7XX_OTP)
+
+#define TYPE_NPCM7XX_KEY_STORAGE "npcm7xx-key-storage"
+#define TYPE_NPCM7XX_FUSE_ARRAY "npcm7xx-fuse-array"
+
+typedef struct NPCM7xxOTPClass NPCM7xxOTPClass;
+
+/**
+ * npcm7xx_otp_array_write - ECC encode and write data to OTP array.
+ * @s: OTP module.
+ * @data: Data to be encoded and written.
+ * @offset: Offset of first byte to be written in the OTP array.
+ * @len: Number of bytes before ECC encoding.
+ *
+ * Each nibble of data is encoded into a byte, so the number of bytes written
+ * to the array will be @len * 2.
+ */
+void npcm7xx_otp_array_write(NPCM7xxOTPState *s, const void *data,
+ unsigned int offset, unsigned int len);
+
+#endif /* NPCM7XX_OTP_H */
diff --git a/include/hw/nvram/nrf51_nvm.h b/include/hw/nvram/nrf51_nvm.h
new file mode 100644
index 0000000000..d85e788df5
--- /dev/null
+++ b/include/hw/nvram/nrf51_nvm.h
@@ -0,0 +1,65 @@
+/*
+ * Nordic Semiconductor nRF51 non-volatile memory
+ *
+ * It provides an interface to erase regions in flash memory.
+ * Furthermore it provides the user and factory information registers.
+ *
+ * QEMU interface:
+ * + sysbus MMIO regions 0: NVMC peripheral registers
+ * + sysbus MMIO regions 1: FICR peripheral registers
+ * + sysbus MMIO regions 2: UICR peripheral registers
+ * + flash-size property: flash size in bytes.
+ *
+ * Accuracy of the peripheral model:
+ * + Code regions (MPU configuration) are disregarded.
+ *
+ * Copyright 2018 Steffen Görtz <contrib@steffen-goertz.de>
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+#ifndef NRF51_NVM_H
+#define NRF51_NVM_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#define TYPE_NRF51_NVM "nrf51_soc.nvm"
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51NVMState, NRF51_NVM)
+
+#define NRF51_UICR_FIXTURE_SIZE 64
+
+#define NRF51_NVMC_SIZE 0x1000
+
+#define NRF51_NVMC_READY 0x400
+#define NRF51_NVMC_READY_READY 0x01
+#define NRF51_NVMC_CONFIG 0x504
+#define NRF51_NVMC_CONFIG_MASK 0x03
+#define NRF51_NVMC_CONFIG_WEN 0x01
+#define NRF51_NVMC_CONFIG_EEN 0x02
+#define NRF51_NVMC_ERASEPCR1 0x508
+#define NRF51_NVMC_ERASEPCR0 0x510
+#define NRF51_NVMC_ERASEALL 0x50C
+#define NRF51_NVMC_ERASEUICR 0x514
+#define NRF51_NVMC_ERASE 0x01
+
+#define NRF51_UICR_SIZE 0x100
+
+struct NRF51NVMState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+ MemoryRegion ficr;
+ MemoryRegion uicr;
+ MemoryRegion flash;
+
+ uint32_t uicr_content[NRF51_UICR_FIXTURE_SIZE];
+ uint32_t flash_size;
+ uint8_t *storage;
+
+ uint32_t config;
+
+};
+
+
+#endif
diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h
new file mode 100644
index 0000000000..6fc13f8cc1
--- /dev/null
+++ b/include/hw/nvram/xlnx-bbram.h
@@ -0,0 +1,54 @@
+/*
+ * QEMU model of the Xilinx BBRAM Battery Backed RAM
+ *
+ * Copyright (c) 2015-2021 Xilinx Inc.
+ *
+ * Written by Edgar E. Iglesias <edgari@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef XLNX_BBRAM_H
+#define XLNX_BBRAM_H
+
+#include "sysemu/block-backend.h"
+#include "hw/qdev-core.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define RMAX_XLNX_BBRAM ((0x4c / 4) + 1)
+
+#define TYPE_XLNX_BBRAM "xlnx.bbram-ctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxBBRam, XLNX_BBRAM);
+
+struct XlnxBBRam {
+ SysBusDevice parent_obj;
+ qemu_irq irq_bbram;
+
+ BlockBackend *blk;
+
+ uint32_t crc_zpads;
+ bool bbram8_wo;
+ bool blk_ro;
+
+ uint32_t regs[RMAX_XLNX_BBRAM];
+ RegisterInfo regs_info[RMAX_XLNX_BBRAM];
+};
+
+#endif
diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h
new file mode 100644
index 0000000000..cff7924106
--- /dev/null
+++ b/include/hw/nvram/xlnx-efuse.h
@@ -0,0 +1,132 @@
+/*
+ * QEMU model of the Xilinx eFuse core
+ *
+ * Copyright (c) 2015 Xilinx Inc.
+ *
+ * Written by Edgar E. Iglesias <edgari@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_EFUSE_H
+#define XLNX_EFUSE_H
+
+#include "sysemu/block-backend.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_XLNX_EFUSE "xlnx-efuse"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxEFuse, XLNX_EFUSE);
+
+struct XlnxEFuse {
+ DeviceState parent_obj;
+ BlockBackend *blk;
+ bool blk_ro;
+ uint32_t *fuse32;
+
+ DeviceState *dev;
+
+ bool init_tbits;
+
+ uint8_t efuse_nr;
+ uint32_t efuse_size;
+
+ uint32_t *ro_bits;
+ uint32_t ro_bits_cnt;
+};
+
+/**
+ * xlnx_efuse_calc_crc:
+ * @data: an array of 32-bit words for which the CRC should be computed
+ * @u32_cnt: the array size in number of 32-bit words
+ * @zpads: the number of 32-bit zeros prepended to @data before computation
+ *
+ * This function is used to compute the CRC for an array of 32-bit words,
+ * using a Xilinx-specific data padding.
+ *
+ * Returns: the computed 32-bit CRC
+ */
+uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt,
+ unsigned zpads);
+
+/**
+ * xlnx_efuse_get_bit:
+ * @s: the efuse object
+ * @bit: the efuse bit-address to read the data
+ *
+ * Returns: the bit, 0 or 1, at @bit of object @s
+ */
+bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit);
+
+/**
+ * xlnx_efuse_set_bit:
+ * @s: the efuse object
+ * @bit: the efuse bit-address to be written a value of 1
+ *
+ * Returns: true on success, false on failure
+ */
+bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit);
+
+/**
+ * xlnx_efuse_k256_check:
+ * @s: the efuse object
+ * @crc: the 32-bit CRC to be compared with
+ * @start: the efuse bit-address (which must be multiple of 32) of the
+ * start of a 256-bit array
+ *
+ * This function computes the CRC of a 256-bit array starting at @start
+ * then compares to the given @crc
+ *
+ * Returns: true of @crc == computed, false otherwise
+ */
+bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start);
+
+/**
+ * xlnx_efuse_tbits_check:
+ * @s: the efuse object
+ *
+ * This function inspects a number of efuse bits at specific addresses
+ * to see if they match a validation pattern. Each pattern is a group
+ * of 4 bits, and there are 3 groups.
+ *
+ * Returns: a 3-bit mask, where a bit of '1' means the corresponding
+ * group has a valid pattern.
+ */
+uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s);
+
+/**
+ * xlnx_efuse_get_row:
+ * @s: the efuse object
+ * @bit: the efuse bit address for which a 32-bit value is read
+ *
+ * Returns: the entire 32 bits of the efuse, starting at a bit
+ * address that is multiple of 32 and contains the bit at @bit
+ */
+static inline uint32_t xlnx_efuse_get_row(XlnxEFuse *s, unsigned int bit)
+{
+ if (!(s->fuse32)) {
+ return 0;
+ } else {
+ unsigned int row_idx = bit / 32;
+
+ assert(row_idx < (s->efuse_size * s->efuse_nr / 32));
+ return s->fuse32[row_idx];
+ }
+}
+
+#endif
diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h
new file mode 100644
index 0000000000..86e2261b9a
--- /dev/null
+++ b/include/hw/nvram/xlnx-versal-efuse.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Xilinx Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef XLNX_VERSAL_EFUSE_H
+#define XLNX_VERSAL_EFUSE_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/nvram/xlnx-efuse.h"
+
+#define XLNX_VERSAL_EFUSE_CTRL_R_MAX ((0x100 / 4) + 1)
+
+#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx-versal-efuse"
+#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx-pmc-efuse-cache"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCtrl, XLNX_VERSAL_EFUSE_CTRL);
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCache, XLNX_VERSAL_EFUSE_CACHE);
+
+struct XlnxVersalEFuseCtrl {
+ SysBusDevice parent_obj;
+ qemu_irq irq_efuse_imr;
+
+ XlnxEFuse *efuse;
+
+ void *extra_pg0_lock_spec; /* Opaque property */
+ uint32_t extra_pg0_lock_n16;
+
+ uint32_t regs[XLNX_VERSAL_EFUSE_CTRL_R_MAX];
+ RegisterInfo regs_info[XLNX_VERSAL_EFUSE_CTRL_R_MAX];
+};
+
+struct XlnxVersalEFuseCache {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+
+ XlnxEFuse *efuse;
+};
+
+/**
+ * xlnx_versal_efuse_read_row:
+ * @s: the efuse object
+ * @bit: the bit-address within the 32-bit row to be read
+ * @denied: if non-NULL, to receive true if the row is write-only
+ *
+ * Returns: the 32-bit word containing address @bit; 0 if @denies is true
+ */
+uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *s, uint32_t bit, bool *denied);
+
+#endif
diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h
new file mode 100644
index 0000000000..f5beacc2e6
--- /dev/null
+++ b/include/hw/nvram/xlnx-zynqmp-efuse.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 Xilinx Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef XLNX_ZYNQMP_EFUSE_H
+#define XLNX_ZYNQMP_EFUSE_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/nvram/xlnx-efuse.h"
+
+#define XLNX_ZYNQMP_EFUSE_R_MAX ((0x10fc / 4) + 1)
+
+#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx-zynqmp-efuse"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPEFuse, XLNX_ZYNQMP_EFUSE);
+
+struct XlnxZynqMPEFuse {
+ SysBusDevice parent_obj;
+ qemu_irq irq;
+
+ XlnxEFuse *efuse;
+ uint32_t regs[XLNX_ZYNQMP_EFUSE_R_MAX];
+ RegisterInfo regs_info[XLNX_ZYNQMP_EFUSE_R_MAX];
+};
+
+#endif
diff --git a/include/hw/openrisc/boot.h b/include/hw/openrisc/boot.h
new file mode 100644
index 0000000000..25a313d63a
--- /dev/null
+++ b/include/hw/openrisc/boot.h
@@ -0,0 +1,34 @@
+/*
+ * QEMU OpenRISC boot helpers.
+ *
+ * Copyright (c) 2022 Stafford Horne <shorne@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef OPENRISC_BOOT_H
+#define OPENRISC_BOOT_H
+
+#include "exec/cpu-defs.h"
+
+hwaddr openrisc_load_kernel(ram_addr_t ram_size,
+ const char *kernel_filename,
+ uint32_t *bootstrap_pc);
+
+hwaddr openrisc_load_initrd(void *fdt, const char *filename,
+ hwaddr load_start, uint64_t mem_size);
+
+uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start,
+ uint64_t mem_size);
+
+#endif /* OPENRISC_BOOT_H */
diff --git a/include/hw/or-irq.h b/include/hw/or-irq.h
index 5a31e5a188..c0a42f3711 100644
--- a/include/hw/or-irq.h
+++ b/include/hw/or-irq.h
@@ -25,7 +25,6 @@
#ifndef HW_OR_IRQ_H
#define HW_OR_IRQ_H
-#include "hw/irq.h"
#include "hw/sysbus.h"
#include "qom/object.h"
@@ -34,11 +33,9 @@
/* This can safely be increased if necessary without breaking
* migration compatibility (as long as it remains greater than 15).
*/
-#define MAX_OR_LINES 32
+#define MAX_OR_LINES 48
-typedef struct OrIRQState qemu_or_irq;
-
-#define OR_IRQ(obj) OBJECT_CHECK(qemu_or_irq, (obj), TYPE_OR_IRQ)
+OBJECT_DECLARE_SIMPLE_TYPE(OrIRQState, OR_IRQ)
struct OrIRQState {
DeviceState parent_obj;
diff --git a/include/hw/pci-bridge/cxl_upstream_port.h b/include/hw/pci-bridge/cxl_upstream_port.h
new file mode 100644
index 0000000000..12635139f6
--- /dev/null
+++ b/include/hw/pci-bridge/cxl_upstream_port.h
@@ -0,0 +1,19 @@
+
+#ifndef CXL_USP_H
+#define CXL_USP_H
+#include "hw/pci/pcie.h"
+#include "hw/pci/pcie_port.h"
+#include "hw/cxl/cxl.h"
+
+typedef struct CXLUpstreamPort {
+ /*< private >*/
+ PCIEPort parent_obj;
+
+ /*< public >*/
+ CXLComponentState cxl_cstate;
+ CXLCCI swcci;
+ DOECap doe_cdat;
+ uint64_t sn;
+} CXLUpstreamPort;
+
+#endif /* CXL_SUP_H */
diff --git a/include/hw/pci-bridge/pci_expander_bridge.h b/include/hw/pci-bridge/pci_expander_bridge.h
new file mode 100644
index 0000000000..0b3856d615
--- /dev/null
+++ b/include/hw/pci-bridge/pci_expander_bridge.h
@@ -0,0 +1,12 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PCI_EXPANDER_BRIDGE_H
+#define PCI_EXPANDER_BRIDGE_H
+
+#include "hw/cxl/cxl.h"
+
+void pxb_cxl_hook_up_registers(CXLState *state, PCIBus *bus, Error **errp);
+
+#endif /* PCI_EXPANDER_BRIDGE_H */
diff --git a/include/hw/pci-bridge/simba.h b/include/hw/pci-bridge/simba.h
index e13ba27d0b..979cb17435 100644
--- a/include/hw/pci-bridge/simba.h
+++ b/include/hw/pci-bridge/simba.h
@@ -24,14 +24,19 @@
* THE SOFTWARE.
*/
+#ifndef HW_PCI_BRIDGE_SIMBA_H
+#define HW_PCI_BRIDGE_SIMBA_H
+
#include "hw/pci/pci_bridge.h"
+#include "qom/object.h"
-typedef struct SimbaPCIBridge {
+struct SimbaPCIBridge {
/*< private >*/
PCIBridge parent_obj;
-} SimbaPCIBridge;
+};
#define TYPE_SIMBA_PCI_BRIDGE "pbm-bridge"
-#define SIMBA_PCI_BRIDGE(obj) \
- OBJECT_CHECK(SimbaPCIBridge, (obj), TYPE_SIMBA_PCI_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(SimbaPCIBridge, SIMBA_PCI_BRIDGE)
+
+#endif
diff --git a/include/hw/pci-bridge/xio3130_downstream.h b/include/hw/pci-bridge/xio3130_downstream.h
new file mode 100644
index 0000000000..1d10139aea
--- /dev/null
+++ b/include/hw/pci-bridge/xio3130_downstream.h
@@ -0,0 +1,15 @@
+/*
+ * TI X3130 pci express downstream port switch
+ *
+ * Copyright (C) 2022 Igor Mammedov <imammedo@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PCI_BRIDGE_XIO3130_DOWNSTREAM_H
+#define HW_PCI_BRIDGE_XIO3130_DOWNSTREAM_H
+
+#define TYPE_XIO3130_DOWNSTREAM "xio3130-downstream"
+
+#endif
+
diff --git a/include/hw/pci-host/articia.h b/include/hw/pci-host/articia.h
new file mode 100644
index 0000000000..529c240274
--- /dev/null
+++ b/include/hw/pci-host/articia.h
@@ -0,0 +1,17 @@
+/*
+ * Mai Logic Articia S emulation
+ *
+ * Copyright (c) 2023 BALATON Zoltan
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ */
+
+#ifndef ARTICIA_H
+#define ARTICIA_H
+
+#define TYPE_ARTICIA "articia"
+#define TYPE_ARTICIA_PCI_HOST "articia-pci-host"
+#define TYPE_ARTICIA_PCI_BRIDGE "articia-pci-bridge"
+
+#endif
diff --git a/include/hw/pci-host/astro.h b/include/hw/pci-host/astro.h
new file mode 100644
index 0000000000..e2966917cd
--- /dev/null
+++ b/include/hw/pci-host/astro.h
@@ -0,0 +1,94 @@
+/*
+ * HP-PARISC Astro Bus connector with Elroy PCI host bridges
+ */
+
+#ifndef ASTRO_H
+#define ASTRO_H
+
+#include "hw/pci/pci_host.h"
+
+#define ASTRO_HPA 0xfed00000
+
+#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
+
+#define TYPE_ASTRO_CHIP "astro-chip"
+OBJECT_DECLARE_SIMPLE_TYPE(AstroState, ASTRO_CHIP)
+
+#define TYPE_ELROY_PCI_HOST_BRIDGE "elroy-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(ElroyState, ELROY_PCI_HOST_BRIDGE)
+
+#define ELROY_NUM 4 /* # of Elroys */
+#define ELROY_IRQS 8 /* IOSAPIC IRQs */
+
+/* ASTRO Memory and I/O regions */
+#define LMMIO_DIST_BASE_ADDR 0xf4000000ULL
+#define LMMIO_DIST_BASE_SIZE 0x4000000ULL
+
+#define IOS_DIST_BASE_ADDR 0xfffee00000ULL
+#define IOS_DIST_BASE_SIZE 0x10000ULL
+
+#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
+
+struct AstroState;
+
+struct ElroyState {
+ PCIHostState parent_obj;
+
+ /* parent Astro device */
+ struct AstroState *astro;
+
+ /* HPA of this Elroy */
+ hwaddr hpa;
+
+ /* PCI bus number (Elroy number) */
+ unsigned int pci_bus_num;
+
+ uint64_t config_address;
+ uint64_t config_reg_elroy;
+
+ uint64_t status_control;
+ uint64_t arb_mask;
+ uint64_t mmio_base[(0x0250 - 0x200) / 8];
+ uint64_t error_config;
+
+ uint32_t iosapic_reg_select;
+ uint64_t iosapic_reg[0x20];
+
+ uint32_t ilr;
+
+ MemoryRegion this_mem;
+
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_mmio_alias;
+ MemoryRegion pci_hole;
+ MemoryRegion pci_io;
+};
+
+struct AstroState {
+ PCIHostState parent_obj;
+
+ uint64_t ioc_ctrl;
+ uint64_t ioc_status_ctrl;
+ uint64_t ioc_ranges[(0x03d8 - 0x300) / 8];
+ uint64_t ioc_rope_config;
+ uint64_t ioc_status_control;
+ uint64_t ioc_flush_control;
+ uint64_t ioc_rope_control[8];
+ uint64_t tlb_ibase;
+ uint64_t tlb_imask;
+ uint64_t tlb_pcom;
+ uint64_t tlb_tcnfg;
+ uint64_t tlb_pdir_base;
+
+ struct ElroyState *elroy[ELROY_NUM];
+
+ MemoryRegion this_mem;
+
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_io;
+
+ IOMMUMemoryRegion iommu;
+ AddressSpace iommu_as;
+};
+
+#endif
diff --git a/include/hw/pci-host/bonito.h b/include/hw/pci-host/bonito.h
new file mode 100644
index 0000000000..b8ecf7870a
--- /dev/null
+++ b/include/hw/pci-host/bonito.h
@@ -0,0 +1,18 @@
+/*
+ * QEMU Bonito64 north bridge support
+ *
+ * Copyright (c) 2008 yajin (yajin@vm-kernel.org)
+ * Copyright (c) 2010 Huacai Chen (zltjiangshi@gmail.com)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PCI_HOST_BONITO_H
+#define HW_PCI_HOST_BONITO_H
+
+#include "qom/object.h"
+
+#define TYPE_BONITO_PCI_HOST_BRIDGE "Bonito-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(BonitoState, BONITO_PCI_HOST_BRIDGE)
+
+#endif
diff --git a/include/hw/pci-host/designware.h b/include/hw/pci-host/designware.h
index a4f2c0695b..908f3d946b 100644
--- a/include/hw/pci-host/designware.h
+++ b/include/hw/pci-host/designware.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,23 +21,17 @@
#ifndef DESIGNWARE_H
#define DESIGNWARE_H
-#include "hw/hw.h"
#include "hw/sysbus.h"
-#include "hw/pci/pci.h"
-#include "hw/pci/pci_bus.h"
-#include "hw/pci/pcie_host.h"
#include "hw/pci/pci_bridge.h"
+#include "qom/object.h"
#define TYPE_DESIGNWARE_PCIE_HOST "designware-pcie-host"
-#define DESIGNWARE_PCIE_HOST(obj) \
- OBJECT_CHECK(DesignwarePCIEHost, (obj), TYPE_DESIGNWARE_PCIE_HOST)
+OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIEHost, DESIGNWARE_PCIE_HOST)
#define TYPE_DESIGNWARE_PCIE_ROOT "designware-pcie-root"
-#define DESIGNWARE_PCIE_ROOT(obj) \
- OBJECT_CHECK(DesignwarePCIERoot, (obj), TYPE_DESIGNWARE_PCIE_ROOT)
+OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIERoot, DESIGNWARE_PCIE_ROOT)
struct DesignwarePCIERoot;
-typedef struct DesignwarePCIERoot DesignwarePCIERoot;
typedef struct DesignwarePCIEViewport {
DesignwarePCIERoot *root;
@@ -81,7 +75,7 @@ struct DesignwarePCIERoot {
DesignwarePCIEMSI msi;
};
-typedef struct DesignwarePCIEHost {
+struct DesignwarePCIEHost {
PCIHostState parent_obj;
DesignwarePCIERoot root;
@@ -97,6 +91,6 @@ typedef struct DesignwarePCIEHost {
} pci;
MemoryRegion mmio;
-} DesignwarePCIEHost;
+};
-#endif /* DESIGNWARE_H */
+#endif /* DESIGNWARE_H */
diff --git a/include/hw/pci-host/dino.h b/include/hw/pci-host/dino.h
new file mode 100644
index 0000000000..fd7975c798
--- /dev/null
+++ b/include/hw/pci-host/dino.h
@@ -0,0 +1,146 @@
+/*
+ * HP-PARISC Dino PCI chipset emulation, as in B160L and similar machines
+ *
+ * (C) 2017-2019 by Helge Deller <deller@gmx.de>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ * Documentation available at:
+ * https://parisc.wiki.kernel.org/images-parisc/9/91/Dino_ers.pdf
+ * https://parisc.wiki.kernel.org/images-parisc/7/70/Dino_3_1_Errata.pdf
+ */
+
+#ifndef DINO_H
+#define DINO_H
+
+#include "hw/pci/pci_host.h"
+
+#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(DinoState, DINO_PCI_HOST_BRIDGE)
+
+#define DINO_IAR0 0x004
+#define DINO_IODC 0x008
+#define DINO_IRR0 0x00C /* RO */
+#define DINO_IAR1 0x010
+#define DINO_IRR1 0x014 /* RO */
+#define DINO_IMR 0x018
+#define DINO_IPR 0x01C
+#define DINO_TOC_ADDR 0x020
+#define DINO_ICR 0x024
+#define DINO_ILR 0x028 /* RO */
+#define DINO_IO_COMMAND 0x030 /* WO */
+#define DINO_IO_STATUS 0x034 /* RO */
+#define DINO_IO_CONTROL 0x038
+#define DINO_IO_GSC_ERR_RESP 0x040 /* RO */
+#define DINO_IO_ERR_INFO 0x044 /* RO */
+#define DINO_IO_PCI_ERR_RESP 0x048 /* RO */
+#define DINO_IO_FBB_EN 0x05c
+#define DINO_IO_ADDR_EN 0x060
+#define DINO_PCI_CONFIG_ADDR 0x064
+#define DINO_PCI_CONFIG_DATA 0x068
+#define DINO_PCI_IO_DATA 0x06c
+#define DINO_PCI_MEM_DATA 0x070 /* Dino 3.x only */
+#define DINO_GSC2X_CONFIG 0x7b4 /* RO */
+#define DINO_GMASK 0x800
+#define DINO_PAMR 0x804
+#define DINO_PAPR 0x808
+#define DINO_DAMODE 0x80c
+#define DINO_PCICMD 0x810
+#define DINO_PCISTS 0x814 /* R/WC */
+#define DINO_MLTIM 0x81c
+#define DINO_BRDG_FEAT 0x820
+#define DINO_PCIROR 0x824
+#define DINO_PCIWOR 0x828
+#define DINO_TLTIM 0x830
+
+#define DINO_IRQS 11 /* bits 0-10 are architected */
+#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */
+#define DINO_LOCAL_IRQS (DINO_IRQS + 1)
+#define DINO_MASK_IRQ(x) (1 << (x))
+
+#define DINO_IRQ_PCIINTA 0
+#define DINO_IRQ_PCIINTB 1
+#define DINO_IRQ_PCIINTC 2
+#define DINO_IRQ_PCIINTD 3
+#define DINO_IRQ_PCIINTE 4
+#define DINO_IRQ_PCIINTF 5
+#define DINO_IRQ_GSCEXTINT 6
+#define DINO_IRQ_BUSERRINT 7
+#define DINO_IRQ_PS2INT 8
+#define DINO_IRQ_UNUSED 9
+#define DINO_IRQ_RS232INT 10
+
+#define PCIINTA 0x001
+#define PCIINTB 0x002
+#define PCIINTC 0x004
+#define PCIINTD 0x008
+#define PCIINTE 0x010
+#define PCIINTF 0x020
+#define GSCEXTINT 0x040
+/* #define xxx 0x080 - bit 7 is "default" */
+/* #define xxx 0x100 - bit 8 not used */
+/* #define xxx 0x200 - bit 9 not used */
+#define RS232INT 0x400
+
+#define DINO_MEM_CHUNK_SIZE (8 * MiB)
+
+#define DINO800_REGS (1 + (DINO_TLTIM - DINO_GMASK) / 4)
+static const uint32_t reg800_keep_bits[DINO800_REGS] = {
+ MAKE_64BIT_MASK(0, 1), /* GMASK */
+ MAKE_64BIT_MASK(0, 7), /* PAMR */
+ MAKE_64BIT_MASK(0, 7), /* PAPR */
+ MAKE_64BIT_MASK(0, 8), /* DAMODE */
+ MAKE_64BIT_MASK(0, 7), /* PCICMD */
+ MAKE_64BIT_MASK(0, 9), /* PCISTS */
+ MAKE_64BIT_MASK(0, 32), /* Undefined */
+ MAKE_64BIT_MASK(0, 8), /* MLTIM */
+ MAKE_64BIT_MASK(0, 30), /* BRDG_FEAT */
+ MAKE_64BIT_MASK(0, 24), /* PCIROR */
+ MAKE_64BIT_MASK(0, 22), /* PCIWOR */
+ MAKE_64BIT_MASK(0, 32), /* Undocumented */
+ MAKE_64BIT_MASK(0, 9), /* TLTIM */
+};
+
+/* offsets to DINO HPA: */
+#define DINO_PCI_ADDR 0x064
+#define DINO_CONFIG_DATA 0x068
+#define DINO_IO_DATA 0x06c
+
+struct DinoState {
+ PCIHostState parent_obj;
+
+ /*
+ * PCI_CONFIG_ADDR is parent_obj.config_reg, via pci_host_conf_be_ops,
+ * so that we can map PCI_CONFIG_DATA to pci_host_data_be_ops.
+ */
+ uint32_t config_reg_dino; /* keep original copy, including 2 lowest bits */
+
+ uint32_t iar0;
+ uint32_t iar1;
+ uint32_t imr;
+ uint32_t ipr;
+ uint32_t icr;
+ uint32_t ilr;
+ uint32_t io_fbb_en;
+ uint32_t io_addr_en;
+ uint32_t io_control;
+ uint32_t toc_addr;
+
+ uint32_t reg800[DINO800_REGS];
+
+ MemoryRegion this_mem;
+ MemoryRegion pci_mem;
+ MemoryRegion pci_mem_alias[32];
+
+ MemoryRegion *memory_as;
+
+ AddressSpace bm_as;
+ MemoryRegion bm;
+ MemoryRegion bm_ram_alias;
+ MemoryRegion bm_pci_alias;
+ MemoryRegion bm_cpu_alias;
+
+ qemu_irq irqs[DINO_IRQS];
+};
+
+#endif
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
index aef38b881b..dce883573b 100644
--- a/include/hw/pci-host/gpex.h
+++ b/include/hw/pci-host/gpex.h
@@ -13,35 +13,43 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>
*/
#ifndef HW_GPEX_H
#define HW_GPEX_H
-#include "hw/hw.h"
+#include "exec/hwaddr.h"
#include "hw/sysbus.h"
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci/pcie_host.h"
+#include "qom/object.h"
#define TYPE_GPEX_HOST "gpex-pcihost"
-#define GPEX_HOST(obj) \
- OBJECT_CHECK(GPEXHost, (obj), TYPE_GPEX_HOST)
+OBJECT_DECLARE_SIMPLE_TYPE(GPEXHost, GPEX_HOST)
#define TYPE_GPEX_ROOT_DEVICE "gpex-root"
-#define MCH_PCI_DEVICE(obj) \
- OBJECT_CHECK(GPEXRootState, (obj), TYPE_GPEX_ROOT_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(GPEXRootState, GPEX_ROOT_DEVICE)
#define GPEX_NUM_IRQS 4
-typedef struct GPEXRootState {
+struct GPEXRootState {
/*< private >*/
PCIDevice parent_obj;
/*< public >*/
-} GPEXRootState;
+};
-typedef struct GPEXHost {
+struct GPEXConfig {
+ MemMapEntry ecam;
+ MemMapEntry mmio32;
+ MemMapEntry mmio64;
+ MemMapEntry pio;
+ int irq;
+ PCIBus *bus;
+};
+
+struct GPEXHost {
/*< private >*/
PCIExpressHost parent_obj;
/*< public >*/
@@ -50,10 +58,28 @@ typedef struct GPEXHost {
MemoryRegion io_ioport;
MemoryRegion io_mmio;
+ MemoryRegion io_ioport_window;
+ MemoryRegion io_mmio_window;
qemu_irq irq[GPEX_NUM_IRQS];
int irq_num[GPEX_NUM_IRQS];
-} GPEXHost;
+
+ bool allow_unmapped_accesses;
+
+ struct GPEXConfig gpex_cfg;
+};
int gpex_set_irq_num(GPEXHost *s, int index, int gsi);
+void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg);
+void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq);
+
+#define PCI_HOST_PIO_BASE "x-pio-base"
+#define PCI_HOST_PIO_SIZE "x-pio-size"
+#define PCI_HOST_ECAM_BASE "x-ecam-base"
+#define PCI_HOST_ECAM_SIZE "x-ecam-size"
+#define PCI_HOST_BELOW_4G_MMIO_BASE "x-below-4g-mmio-base"
+#define PCI_HOST_BELOW_4G_MMIO_SIZE "x-below-4g-mmio-size"
+#define PCI_HOST_ABOVE_4G_MMIO_BASE "x-above-4g-mmio-base"
+#define PCI_HOST_ABOVE_4G_MMIO_SIZE "x-above-4g-mmio-size"
+
#endif /* HW_GPEX_H */
diff --git a/include/hw/pci-host/grackle.h b/include/hw/pci-host/grackle.h
new file mode 100644
index 0000000000..7ad3a779f0
--- /dev/null
+++ b/include/hw/pci-host/grackle.h
@@ -0,0 +1,44 @@
+/*
+ * QEMU Grackle PCI host (heathrow OldWorld PowerMac)
+ *
+ * Copyright (c) 2006-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef GRACKLE_H
+#define GRACKLE_H
+
+#include "hw/pci/pci_host.h"
+
+#define TYPE_GRACKLE_PCI_HOST_BRIDGE "grackle-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(GrackleState, GRACKLE_PCI_HOST_BRIDGE)
+
+struct GrackleState {
+ PCIHostState parent_obj;
+
+ uint32_t ofw_addr;
+ qemu_irq irqs[4];
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_hole;
+ MemoryRegion pci_io;
+};
+
+#endif
diff --git a/include/hw/pci-host/i440fx.h b/include/hw/pci-host/i440fx.h
new file mode 100644
index 0000000000..c988f70890
--- /dev/null
+++ b/include/hw/pci-host/i440fx.h
@@ -0,0 +1,37 @@
+/*
+ * QEMU i440FX North Bridge Emulation
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HW_PCI_I440FX_H
+#define HW_PCI_I440FX_H
+
+#include "hw/pci/pci_device.h"
+#include "hw/pci-host/pam.h"
+#include "qom/object.h"
+
+#define I440FX_HOST_PROP_PCI_TYPE "pci-type"
+
+#define TYPE_I440FX_PCI_HOST_BRIDGE "i440FX-pcihost"
+#define TYPE_I440FX_PCI_DEVICE "i440FX"
+
+OBJECT_DECLARE_SIMPLE_TYPE(PCII440FXState, I440FX_PCI_DEVICE)
+
+struct PCII440FXState {
+ /*< private >*/
+ PCIDevice parent_obj;
+ /*< public >*/
+
+ PAMMemoryRegion pam_regions[PAM_REGIONS_COUNT];
+ MemoryRegion smram_region;
+ MemoryRegion smram, low_smram;
+};
+
+#define TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE "igd-passthrough-i440FX"
+
+#endif
diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h
new file mode 100644
index 0000000000..e753449593
--- /dev/null
+++ b/include/hw/pci-host/ls7a.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU LoongArch CPU
+ *
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LS7A_H
+#define HW_LS7A_H
+
+#include "hw/pci-host/pam.h"
+#include "qemu/units.h"
+#include "qemu/range.h"
+#include "qom/object.h"
+
+#define VIRT_PCI_MEM_BASE 0x40000000UL
+#define VIRT_PCI_MEM_SIZE 0x40000000UL
+#define VIRT_PCI_IO_OFFSET 0x4000
+#define VIRT_PCI_CFG_BASE 0x20000000
+#define VIRT_PCI_CFG_SIZE 0x08000000
+#define VIRT_PCI_IO_BASE 0x18004000UL
+#define VIRT_PCI_IO_SIZE 0xC000
+
+#define VIRT_PCH_REG_BASE 0x10000000UL
+#define VIRT_IOAPIC_REG_BASE (VIRT_PCH_REG_BASE)
+#define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL
+
+/*
+ * GSI_BASE is hard-coded with 64 in linux kernel, else kernel fails to boot
+ * 0 - 15 GSI for ISA devices even if there is no ISA devices
+ * 16 - 63 GSI for CPU devices such as timers/perf monitor etc
+ * 64 - GSI for external devices
+ */
+#define VIRT_PCH_PIC_IRQ_NUM 32
+#define VIRT_GSI_BASE 64
+#define VIRT_DEVICE_IRQS 16
+#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2)
+#define VIRT_UART_BASE 0x1fe001e0
+#define VIRT_UART_SIZE 0X100
+#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 3)
+#define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000)
+#define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100)
+#define VIRT_RTC_LEN 0x100
+#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 4)
+
+#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000
+#define VIRT_PLATFORM_BUS_SIZE 0x2000000
+#define VIRT_PLATFORM_BUS_NUM_IRQS 2
+#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 5)
+#endif
diff --git a/include/hw/pci-host/mv64361.h b/include/hw/pci-host/mv64361.h
new file mode 100644
index 0000000000..9cdb35cb3c
--- /dev/null
+++ b/include/hw/pci-host/mv64361.h
@@ -0,0 +1,8 @@
+#ifndef MV64361_H
+#define MV64361_H
+
+#define TYPE_MV64361 "mv64361"
+
+PCIBus *mv64361_get_pci_bus(DeviceState *dev, int n);
+
+#endif
diff --git a/include/hw/pci-host/pam.h b/include/hw/pci-host/pam.h
index 6116c638f9..005916f826 100644
--- a/include/hw/pci-host/pam.h
+++ b/include/hw/pci-host/pam.h
@@ -50,7 +50,6 @@
* 0xf0000 - 0xfffff System BIOS Area Memory Segments
*/
-#include "qemu-common.h"
#include "exec/memory.h"
#define SMRAM_C_BASE 0xa0000
@@ -81,13 +80,16 @@
#define SMRAM_C_BASE_SEG_MASK ((uint8_t)0x7)
#define SMRAM_C_BASE_SEG ((uint8_t)0x2) /* hardwired to b010 */
+#define PAM_REGIONS_COUNT 13
+
typedef struct PAMMemoryRegion {
MemoryRegion alias[4]; /* index = PAM value */
unsigned current;
} PAMMemoryRegion;
-void init_pam(DeviceState *dev, MemoryRegion *ram, MemoryRegion *system,
- MemoryRegion *pci, PAMMemoryRegion *mem, uint32_t start, uint32_t size);
+void init_pam(PAMMemoryRegion *mem, Object *owner, MemoryRegion *ram,
+ MemoryRegion *system, MemoryRegion *pci,
+ uint32_t start, uint32_t size);
void pam_update(PAMMemoryRegion *mem, int idx, uint8_t val);
#endif /* QEMU_PAM_H */
diff --git a/include/hw/pci-host/pnv_phb3.h b/include/hw/pci-host/pnv_phb3.h
new file mode 100644
index 0000000000..d62b3091ac
--- /dev/null
+++ b/include/hw/pci-host/pnv_phb3.h
@@ -0,0 +1,170 @@
+/*
+ * QEMU PowerPC PowerNV (POWER8) PHB3 model
+ *
+ * Copyright (c) 2014-2020, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PCI_HOST_PNV_PHB3_H
+#define PCI_HOST_PNV_PHB3_H
+
+#include "hw/ppc/xics.h"
+#include "qom/object.h"
+#include "hw/pci-host/pnv_phb.h"
+
+typedef struct PnvPHB3 PnvPHB3;
+
+/*
+ * PHB3 XICS Source for MSIs
+ */
+#define TYPE_PHB3_MSI "phb3-msi"
+typedef struct Phb3MsiState Phb3MsiState;
+DECLARE_INSTANCE_CHECKER(Phb3MsiState, PHB3_MSI,
+ TYPE_PHB3_MSI)
+
+#define PHB3_MAX_MSI 2048
+
+struct Phb3MsiState {
+ ICSState ics;
+ qemu_irq *qirqs;
+
+ PnvPHB3 *phb;
+ uint64_t rba[PHB3_MAX_MSI / 64];
+ uint32_t rba_sum;
+};
+
+void pnv_phb3_msi_update_config(Phb3MsiState *msis, uint32_t base,
+ uint32_t count);
+void pnv_phb3_msi_send(Phb3MsiState *msis, uint64_t addr, uint16_t data,
+ int32_t dev_pe);
+void pnv_phb3_msi_ffi(Phb3MsiState *msis, uint64_t val);
+void pnv_phb3_msi_pic_print_info(Phb3MsiState *msis, Monitor *mon);
+
+
+/*
+ * We have one such address space wrapper per possible device under
+ * the PHB since they need to be assigned statically at qemu device
+ * creation time. The relationship to a PE is done later dynamically.
+ * This means we can potentially create a lot of these guys. Q35
+ * stores them as some kind of radix tree but we never really need to
+ * do fast lookups so instead we simply keep a QLIST of them for now,
+ * we can add the radix if needed later on.
+ *
+ * We do cache the PE number to speed things up a bit though.
+ */
+typedef struct PnvPhb3DMASpace {
+ PCIBus *bus;
+ uint8_t devfn;
+ int pe_num; /* Cached PE number */
+#define PHB_INVALID_PE (-1)
+ PnvPHB3 *phb;
+ AddressSpace dma_as;
+ IOMMUMemoryRegion dma_mr;
+ MemoryRegion msi32_mr;
+ MemoryRegion msi64_mr;
+ QLIST_ENTRY(PnvPhb3DMASpace) list;
+} PnvPhb3DMASpace;
+
+/*
+ * PHB3 Power Bus Common Queue
+ */
+#define TYPE_PNV_PBCQ "pnv-pbcq"
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPBCQState, PNV_PBCQ)
+
+struct PnvPBCQState {
+ DeviceState parent;
+
+ uint32_t nest_xbase;
+ uint32_t spci_xbase;
+ uint32_t pci_xbase;
+#define PBCQ_NEST_REGS_COUNT 0x46
+#define PBCQ_PCI_REGS_COUNT 0x15
+#define PBCQ_SPCI_REGS_COUNT 0x5
+
+ uint64_t nest_regs[PBCQ_NEST_REGS_COUNT];
+ uint64_t spci_regs[PBCQ_SPCI_REGS_COUNT];
+ uint64_t pci_regs[PBCQ_PCI_REGS_COUNT];
+ MemoryRegion mmbar0;
+ MemoryRegion mmbar1;
+ MemoryRegion phbbar;
+ uint64_t mmio0_base;
+ uint64_t mmio0_size;
+ uint64_t mmio1_base;
+ uint64_t mmio1_size;
+ PnvPHB3 *phb;
+
+ MemoryRegion xscom_nest_regs;
+ MemoryRegion xscom_pci_regs;
+ MemoryRegion xscom_spci_regs;
+};
+
+/*
+ * PHB3 PCIe Root Bus
+ */
+#define TYPE_PNV_PHB3_ROOT_BUS "pnv-phb3-root"
+struct PnvPHB3RootBus {
+ PCIBus parent;
+
+ uint32_t chip_id;
+ uint32_t phb_id;
+};
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB3RootBus, PNV_PHB3_ROOT_BUS)
+
+/*
+ * PHB3 PCIe Host Bridge for PowerNV machines (POWER8)
+ */
+#define TYPE_PNV_PHB3 "pnv-phb3"
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB3, PNV_PHB3)
+
+#define PNV_PHB3_NUM_M64 16
+#define PNV_PHB3_NUM_REGS (0x1000 >> 3)
+#define PNV_PHB3_NUM_LSI 8
+#define PNV_PHB3_NUM_PE 256
+
+#define PCI_MMIO_TOTAL_SIZE (0x1ull << 60)
+
+struct PnvPHB3 {
+ DeviceState parent;
+
+ PnvPHB *phb_base;
+
+ uint32_t chip_id;
+ uint32_t phb_id;
+ char bus_path[8];
+
+ uint64_t regs[PNV_PHB3_NUM_REGS];
+ MemoryRegion mr_regs;
+
+ MemoryRegion mr_m32;
+ MemoryRegion mr_m64[PNV_PHB3_NUM_M64];
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_io;
+
+ uint64_t ioda_LIST[8];
+ uint64_t ioda_LXIVT[8];
+ uint64_t ioda_TVT[512];
+ uint64_t ioda_M64BT[16];
+ uint64_t ioda_MDT[256];
+ uint64_t ioda_PEEV[4];
+
+ uint32_t total_irq;
+ ICSState lsis;
+ qemu_irq *qirqs;
+ Phb3MsiState msis;
+
+ PnvPBCQState pbcq;
+
+ QLIST_HEAD(, PnvPhb3DMASpace) dma_spaces;
+
+ PnvChip *chip;
+};
+
+uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size);
+void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size);
+void pnv_phb3_update_regions(PnvPHB3 *phb);
+void pnv_phb3_remap_irqs(PnvPHB3 *phb);
+void pnv_phb3_bus_init(DeviceState *dev, PnvPHB3 *phb);
+
+#endif /* PCI_HOST_PNV_PHB3_H */
diff --git a/include/hw/pci-host/pnv_phb3_regs.h b/include/hw/pci-host/pnv_phb3_regs.h
new file mode 100644
index 0000000000..38f8ce9d74
--- /dev/null
+++ b/include/hw/pci-host/pnv_phb3_regs.h
@@ -0,0 +1,434 @@
+/*
+ * QEMU PowerPC PowerNV (POWER8) PHB3 model
+ *
+ * Copyright (c) 2013-2020, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PCI_HOST_PNV_PHB3_REGS_H
+#define PCI_HOST_PNV_PHB3_REGS_H
+
+#include "qemu/host-utils.h"
+
+/*
+ * PBCQ XSCOM registers
+ */
+
+#define PBCQ_NEST_IRSN_COMPARE 0x1a
+#define PBCQ_NEST_IRSN_COMP PPC_BITMASK(0, 18)
+#define PBCQ_NEST_IRSN_MASK 0x1b
+#define PBCQ_NEST_LSI_SRC_ID 0x1f
+#define PBCQ_NEST_LSI_SRC PPC_BITMASK(0, 7)
+#define PBCQ_NEST_REGS_COUNT 0x46
+#define PBCQ_NEST_MMIO_BAR0 0x40
+#define PBCQ_NEST_MMIO_BAR1 0x41
+#define PBCQ_NEST_PHB_BAR 0x42
+#define PBCQ_NEST_MMIO_MASK0 0x43
+#define PBCQ_NEST_MMIO_MASK1 0x44
+#define PBCQ_NEST_BAR_EN 0x45
+#define PBCQ_NEST_BAR_EN_MMIO0 PPC_BIT(0)
+#define PBCQ_NEST_BAR_EN_MMIO1 PPC_BIT(1)
+#define PBCQ_NEST_BAR_EN_PHB PPC_BIT(2)
+#define PBCQ_NEST_BAR_EN_IRSN_RX PPC_BIT(3)
+#define PBCQ_NEST_BAR_EN_IRSN_TX PPC_BIT(4)
+
+#define PBCQ_PCI_REGS_COUNT 0x15
+#define PBCQ_PCI_BAR2 0x0b
+
+#define PBCQ_SPCI_REGS_COUNT 0x5
+#define PBCQ_SPCI_ASB_ADDR 0x0
+#define PBCQ_SPCI_ASB_STATUS 0x1
+#define PBCQ_SPCI_ASB_DATA 0x2
+#define PBCQ_SPCI_AIB_CAPP_EN 0x3
+#define PBCQ_SPCI_CAPP_SEC_TMR 0x4
+
+/*
+ * PHB MMIO registers
+ */
+
+/* PHB Fundamental register set A */
+#define PHB_LSI_SOURCE_ID 0x100
+#define PHB_LSI_SRC_ID PPC_BITMASK(5, 12)
+#define PHB_DMA_CHAN_STATUS 0x110
+#define PHB_DMA_CHAN_ANY_ERR PPC_BIT(27)
+#define PHB_DMA_CHAN_ANY_ERR1 PPC_BIT(28)
+#define PHB_DMA_CHAN_ANY_FREEZE PPC_BIT(29)
+#define PHB_CPU_LOADSTORE_STATUS 0x120
+#define PHB_CPU_LS_ANY_ERR PPC_BIT(27)
+#define PHB_CPU_LS_ANY_ERR1 PPC_BIT(28)
+#define PHB_CPU_LS_ANY_FREEZE PPC_BIT(29)
+#define PHB_DMA_MSI_NODE_ID 0x128
+#define PHB_DMAMSI_NID_FIXED PPC_BIT(0)
+#define PHB_DMAMSI_NID PPC_BITMASK(24, 31)
+#define PHB_CONFIG_DATA 0x130
+#define PHB_LOCK0 0x138
+#define PHB_CONFIG_ADDRESS 0x140
+#define PHB_CA_ENABLE PPC_BIT(0)
+#define PHB_CA_BUS PPC_BITMASK(4, 11)
+#define PHB_CA_DEV PPC_BITMASK(12, 16)
+#define PHB_CA_FUNC PPC_BITMASK(17, 19)
+#define PHB_CA_REG PPC_BITMASK(20, 31)
+#define PHB_CA_PE PPC_BITMASK(40, 47)
+#define PHB_LOCK1 0x148
+#define PHB_IVT_BAR 0x150
+#define PHB_IVT_BAR_ENABLE PPC_BIT(0)
+#define PHB_IVT_BASE_ADDRESS_MASK PPC_BITMASK(14, 48)
+#define PHB_IVT_LENGTH_MASK PPC_BITMASK(52, 63)
+#define PHB_RBA_BAR 0x158
+#define PHB_RBA_BAR_ENABLE PPC_BIT(0)
+#define PHB_RBA_BASE_ADDRESS PPC_BITMASK(14, 55)
+#define PHB_PHB3_CONFIG 0x160
+#define PHB_PHB3C_64B_TCE_EN PPC_BIT(2)
+#define PHB_PHB3C_32BIT_MSI_EN PPC_BIT(8)
+#define PHB_PHB3C_64BIT_MSI_EN PPC_BIT(14)
+#define PHB_PHB3C_M32_EN PPC_BIT(16)
+#define PHB_RTT_BAR 0x168
+#define PHB_RTT_BAR_ENABLE PPC_BIT(0)
+#define PHB_RTT_BASE_ADDRESS_MASK PPC_BITMASK(14, 46)
+#define PHB_PELTV_BAR 0x188
+#define PHB_PELTV_BAR_ENABLE PPC_BIT(0)
+#define PHB_PELTV_BASE_ADDRESS PPC_BITMASK(14, 50)
+#define PHB_M32_BASE_ADDR 0x190
+#define PHB_M32_BASE_MASK 0x198
+#define PHB_M32_START_ADDR 0x1a0
+#define PHB_PEST_BAR 0x1a8
+#define PHB_PEST_BAR_ENABLE PPC_BIT(0)
+#define PHB_PEST_BASE_ADDRESS PPC_BITMASK(14, 51)
+#define PHB_M64_UPPER_BITS 0x1f0
+#define PHB_INTREP_TIMER 0x1f8
+#define PHB_DMARD_SYNC 0x200
+#define PHB_DMARD_SYNC_START PPC_BIT(0)
+#define PHB_DMARD_SYNC_COMPLETE PPC_BIT(1)
+#define PHB_RTC_INVALIDATE 0x208
+#define PHB_RTC_INVALIDATE_ALL PPC_BIT(0)
+#define PHB_RTC_INVALIDATE_RID PPC_BITMASK(16, 31)
+#define PHB_TCE_KILL 0x210
+#define PHB_TCE_KILL_ALL PPC_BIT(0)
+#define PHB_TCE_SPEC_CTL 0x218
+#define PHB_IODA_ADDR 0x220
+#define PHB_IODA_AD_AUTOINC PPC_BIT(0)
+#define PHB_IODA_AD_TSEL PPC_BITMASK(11, 15)
+#define PHB_IODA_AD_TADR PPC_BITMASK(55, 63)
+#define PHB_IODA_DATA0 0x228
+#define PHB_FFI_REQUEST 0x238
+#define PHB_FFI_LOCK_CLEAR PPC_BIT(3)
+#define PHB_FFI_REQUEST_ISN PPC_BITMASK(49, 59)
+#define PHB_FFI_LOCK 0x240
+#define PHB_FFI_LOCK_STATE PPC_BIT(0)
+#define PHB_XIVE_UPDATE 0x248 /* Broken in DD1 */
+#define PHB_PHB3_GEN_CAP 0x250
+#define PHB_PHB3_TCE_CAP 0x258
+#define PHB_PHB3_IRQ_CAP 0x260
+#define PHB_PHB3_EEH_CAP 0x268
+#define PHB_IVC_INVALIDATE 0x2a0
+#define PHB_IVC_INVALIDATE_ALL PPC_BIT(0)
+#define PHB_IVC_INVALIDATE_SID PPC_BITMASK(16, 31)
+#define PHB_IVC_UPDATE 0x2a8
+#define PHB_IVC_UPDATE_ENABLE_P PPC_BIT(0)
+#define PHB_IVC_UPDATE_ENABLE_Q PPC_BIT(1)
+#define PHB_IVC_UPDATE_ENABLE_SERVER PPC_BIT(2)
+#define PHB_IVC_UPDATE_ENABLE_PRI PPC_BIT(3)
+#define PHB_IVC_UPDATE_ENABLE_GEN PPC_BIT(4)
+#define PHB_IVC_UPDATE_ENABLE_CON PPC_BIT(5)
+#define PHB_IVC_UPDATE_GEN_MATCH PPC_BITMASK(6, 7)
+#define PHB_IVC_UPDATE_SERVER PPC_BITMASK(8, 23)
+#define PHB_IVC_UPDATE_PRI PPC_BITMASK(24, 31)
+#define PHB_IVC_UPDATE_GEN PPC_BITMASK(32, 33)
+#define PHB_IVC_UPDATE_P PPC_BITMASK(34, 34)
+#define PHB_IVC_UPDATE_Q PPC_BITMASK(35, 35)
+#define PHB_IVC_UPDATE_SID PPC_BITMASK(48, 63)
+#define PHB_PAPR_ERR_INJ_CTL 0x2b0
+#define PHB_PAPR_ERR_INJ_CTL_INB PPC_BIT(0)
+#define PHB_PAPR_ERR_INJ_CTL_OUTB PPC_BIT(1)
+#define PHB_PAPR_ERR_INJ_CTL_STICKY PPC_BIT(2)
+#define PHB_PAPR_ERR_INJ_CTL_CFG PPC_BIT(3)
+#define PHB_PAPR_ERR_INJ_CTL_RD PPC_BIT(4)
+#define PHB_PAPR_ERR_INJ_CTL_WR PPC_BIT(5)
+#define PHB_PAPR_ERR_INJ_CTL_FREEZE PPC_BIT(6)
+#define PHB_PAPR_ERR_INJ_ADDR 0x2b8
+#define PHB_PAPR_ERR_INJ_ADDR_MMIO PPC_BITMASK(16, 63)
+#define PHB_PAPR_ERR_INJ_MASK 0x2c0
+#define PHB_PAPR_ERR_INJ_MASK_CFG PPC_BITMASK(4, 11)
+#define PHB_PAPR_ERR_INJ_MASK_MMIO PPC_BITMASK(16, 63)
+#define PHB_ETU_ERR_SUMMARY 0x2c8
+
+/* UTL registers */
+#define UTL_SYS_BUS_CONTROL 0x400
+#define UTL_STATUS 0x408
+#define UTL_SYS_BUS_AGENT_STATUS 0x410
+#define UTL_SYS_BUS_AGENT_ERR_SEVERITY 0x418
+#define UTL_SYS_BUS_AGENT_IRQ_EN 0x420
+#define UTL_SYS_BUS_BURST_SZ_CONF 0x440
+#define UTL_REVISION_ID 0x448
+#define UTL_BCLK_DOMAIN_DBG1 0x460
+#define UTL_BCLK_DOMAIN_DBG2 0x468
+#define UTL_BCLK_DOMAIN_DBG3 0x470
+#define UTL_BCLK_DOMAIN_DBG4 0x478
+#define UTL_BCLK_DOMAIN_DBG5 0x480
+#define UTL_BCLK_DOMAIN_DBG6 0x488
+#define UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
+#define UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
+#define UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
+#define UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
+#define UTL_OUT_NP_BUF_ALLOC 0x500
+#define UTL_IN_NP_BUF_ALLOC 0x510
+#define UTL_PCIE_TAGS_ALLOC 0x520
+#define UTL_GBIF_READ_TAGS_ALLOC 0x530
+#define UTL_PCIE_PORT_CONTROL 0x540
+#define UTL_PCIE_PORT_STATUS 0x548
+#define UTL_PCIE_PORT_ERROR_SEV 0x550
+#define UTL_PCIE_PORT_IRQ_EN 0x558
+#define UTL_RC_STATUS 0x560
+#define UTL_RC_ERR_SEVERITY 0x568
+#define UTL_RC_IRQ_EN 0x570
+#define UTL_EP_STATUS 0x578
+#define UTL_EP_ERR_SEVERITY 0x580
+#define UTL_EP_ERR_IRQ_EN 0x588
+#define UTL_PCI_PM_CTRL1 0x590
+#define UTL_PCI_PM_CTRL2 0x598
+#define UTL_GP_CTL1 0x5a0
+#define UTL_GP_CTL2 0x5a8
+#define UTL_PCLK_DOMAIN_DBG1 0x5b0
+#define UTL_PCLK_DOMAIN_DBG2 0x5b8
+#define UTL_PCLK_DOMAIN_DBG3 0x5c0
+#define UTL_PCLK_DOMAIN_DBG4 0x5c8
+
+/* PCI-E Stack registers */
+#define PHB_PCIE_SYSTEM_CONFIG 0x600
+#define PHB_PCIE_BUS_NUMBER 0x608
+#define PHB_PCIE_SYSTEM_TEST 0x618
+#define PHB_PCIE_LINK_MANAGEMENT 0x630
+#define PHB_PCIE_LM_LINK_ACTIVE PPC_BIT(8)
+#define PHB_PCIE_DLP_TRAIN_CTL 0x640
+#define PHB_PCIE_DLP_TCTX_DISABLE PPC_BIT(1)
+#define PHB_PCIE_DLP_TCRX_DISABLED PPC_BIT(16)
+#define PHB_PCIE_DLP_INBAND_PRESENCE PPC_BIT(19)
+#define PHB_PCIE_DLP_TC_DL_LINKUP PPC_BIT(21)
+#define PHB_PCIE_DLP_TC_DL_PGRESET PPC_BIT(22)
+#define PHB_PCIE_DLP_TC_DL_LINKACT PPC_BIT(23)
+#define PHB_PCIE_SLOP_LOOPBACK_STATUS 0x648
+#define PHB_PCIE_SYS_LINK_INIT 0x668
+#define PHB_PCIE_UTL_CONFIG 0x670
+#define PHB_PCIE_DLP_CONTROL 0x678
+#define PHB_PCIE_UTL_ERRLOG1 0x680
+#define PHB_PCIE_UTL_ERRLOG2 0x688
+#define PHB_PCIE_UTL_ERRLOG3 0x690
+#define PHB_PCIE_UTL_ERRLOG4 0x698
+#define PHB_PCIE_DLP_ERRLOG1 0x6a0
+#define PHB_PCIE_DLP_ERRLOG2 0x6a8
+#define PHB_PCIE_DLP_ERR_STATUS 0x6b0
+#define PHB_PCIE_DLP_ERR_COUNTERS 0x6b8
+#define PHB_PCIE_UTL_ERR_INJECT 0x6c0
+#define PHB_PCIE_TLDLP_ERR_INJECT 0x6c8
+#define PHB_PCIE_LANE_EQ_CNTL0 0x6d0
+#define PHB_PCIE_LANE_EQ_CNTL1 0x6d8
+#define PHB_PCIE_LANE_EQ_CNTL2 0x6e0
+#define PHB_PCIE_LANE_EQ_CNTL3 0x6e8
+#define PHB_PCIE_STRAPPING 0x700
+
+/* Fundamental register set B */
+#define PHB_VERSION 0x800
+#define PHB_RESET 0x808
+#define PHB_CONTROL 0x810
+#define PHB_CTRL_IVE_128_BYTES PPC_BIT(24)
+#define PHB_AIB_RX_CRED_INIT_TIMER 0x818
+#define PHB_AIB_RX_CMD_CRED 0x820
+#define PHB_AIB_RX_DATA_CRED 0x828
+#define PHB_AIB_TX_CMD_CRED 0x830
+#define PHB_AIB_TX_DATA_CRED 0x838
+#define PHB_AIB_TX_CHAN_MAPPING 0x840
+#define PHB_AIB_TAG_ENABLE 0x858
+#define PHB_AIB_FENCE_CTRL 0x860
+#define PHB_TCE_TAG_ENABLE 0x868
+#define PHB_TCE_WATERMARK 0x870
+#define PHB_TIMEOUT_CTRL1 0x878
+#define PHB_TIMEOUT_CTRL2 0x880
+#define PHB_Q_DMA_R 0x888
+#define PHB_Q_DMA_R_QUIESCE_DMA PPC_BIT(0)
+#define PHB_Q_DMA_R_AUTORESET PPC_BIT(1)
+#define PHB_Q_DMA_R_DMA_RESP_STATUS PPC_BIT(4)
+#define PHB_Q_DMA_R_MMIO_RESP_STATUS PPC_BIT(5)
+#define PHB_Q_DMA_R_TCE_RESP_STATUS PPC_BIT(6)
+#define PHB_AIB_TAG_STATUS 0x900
+#define PHB_TCE_TAG_STATUS 0x908
+
+/* FIR & Error registers */
+#define PHB_LEM_FIR_ACCUM 0xc00
+#define PHB_LEM_FIR_AND_MASK 0xc08
+#define PHB_LEM_FIR_OR_MASK 0xc10
+#define PHB_LEM_ERROR_MASK 0xc18
+#define PHB_LEM_ERROR_AND_MASK 0xc20
+#define PHB_LEM_ERROR_OR_MASK 0xc28
+#define PHB_LEM_ACTION0 0xc30
+#define PHB_LEM_ACTION1 0xc38
+#define PHB_LEM_WOF 0xc40
+#define PHB_ERR_STATUS 0xc80
+#define PHB_ERR1_STATUS 0xc88
+#define PHB_ERR_INJECT 0xc90
+#define PHB_ERR_LEM_ENABLE 0xc98
+#define PHB_ERR_IRQ_ENABLE 0xca0
+#define PHB_ERR_FREEZE_ENABLE 0xca8
+#define PHB_ERR_AIB_FENCE_ENABLE 0xcb0
+#define PHB_ERR_LOG_0 0xcc0
+#define PHB_ERR_LOG_1 0xcc8
+#define PHB_ERR_STATUS_MASK 0xcd0
+#define PHB_ERR1_STATUS_MASK 0xcd8
+
+#define PHB_OUT_ERR_STATUS 0xd00
+#define PHB_OUT_ERR1_STATUS 0xd08
+#define PHB_OUT_ERR_INJECT 0xd10
+#define PHB_OUT_ERR_LEM_ENABLE 0xd18
+#define PHB_OUT_ERR_IRQ_ENABLE 0xd20
+#define PHB_OUT_ERR_FREEZE_ENABLE 0xd28
+#define PHB_OUT_ERR_AIB_FENCE_ENABLE 0xd30
+#define PHB_OUT_ERR_LOG_0 0xd40
+#define PHB_OUT_ERR_LOG_1 0xd48
+#define PHB_OUT_ERR_STATUS_MASK 0xd50
+#define PHB_OUT_ERR1_STATUS_MASK 0xd58
+
+#define PHB_INA_ERR_STATUS 0xd80
+#define PHB_INA_ERR1_STATUS 0xd88
+#define PHB_INA_ERR_INJECT 0xd90
+#define PHB_INA_ERR_LEM_ENABLE 0xd98
+#define PHB_INA_ERR_IRQ_ENABLE 0xda0
+#define PHB_INA_ERR_FREEZE_ENABLE 0xda8
+#define PHB_INA_ERR_AIB_FENCE_ENABLE 0xdb0
+#define PHB_INA_ERR_LOG_0 0xdc0
+#define PHB_INA_ERR_LOG_1 0xdc8
+#define PHB_INA_ERR_STATUS_MASK 0xdd0
+#define PHB_INA_ERR1_STATUS_MASK 0xdd8
+
+#define PHB_INB_ERR_STATUS 0xe00
+#define PHB_INB_ERR1_STATUS 0xe08
+#define PHB_INB_ERR_INJECT 0xe10
+#define PHB_INB_ERR_LEM_ENABLE 0xe18
+#define PHB_INB_ERR_IRQ_ENABLE 0xe20
+#define PHB_INB_ERR_FREEZE_ENABLE 0xe28
+#define PHB_INB_ERR_AIB_FENCE_ENABLE 0xe30
+#define PHB_INB_ERR_LOG_0 0xe40
+#define PHB_INB_ERR_LOG_1 0xe48
+#define PHB_INB_ERR_STATUS_MASK 0xe50
+#define PHB_INB_ERR1_STATUS_MASK 0xe58
+
+/* Performance monitor & Debug registers */
+#define PHB_TRACE_CONTROL 0xf80
+#define PHB_PERFMON_CONFIG 0xf88
+#define PHB_PERFMON_CTR0 0xf90
+#define PHB_PERFMON_CTR1 0xf98
+#define PHB_PERFMON_CTR2 0xfa0
+#define PHB_PERFMON_CTR3 0xfa8
+#define PHB_HOTPLUG_OVERRIDE 0xfb0
+#define PHB_HPOVR_FORCE_RESAMPLE PPC_BIT(9)
+#define PHB_HPOVR_PRESENCE_A PPC_BIT(10)
+#define PHB_HPOVR_PRESENCE_B PPC_BIT(11)
+#define PHB_HPOVR_LINK_ACTIVE PPC_BIT(12)
+#define PHB_HPOVR_LINK_BIFURCATED PPC_BIT(13)
+#define PHB_HPOVR_LINK_LANE_SWAPPED PPC_BIT(14)
+
+/*
+ * IODA2 on-chip tables
+ */
+
+#define IODA2_TBL_LIST 1
+#define IODA2_TBL_LXIVT 2
+#define IODA2_TBL_IVC_CAM 3
+#define IODA2_TBL_RBA 4
+#define IODA2_TBL_RCAM 5
+#define IODA2_TBL_MRT 6
+#define IODA2_TBL_PESTA 7
+#define IODA2_TBL_PESTB 8
+#define IODA2_TBL_TVT 9
+#define IODA2_TBL_TCAM 10
+#define IODA2_TBL_TDR 11
+#define IODA2_TBL_M64BT 16
+#define IODA2_TBL_M32DT 17
+#define IODA2_TBL_PEEV 20
+
+/* LXIVT */
+#define IODA2_LXIVT_SERVER PPC_BITMASK(8, 23)
+#define IODA2_LXIVT_PRIORITY PPC_BITMASK(24, 31)
+#define IODA2_LXIVT_NODE_ID PPC_BITMASK(56, 63)
+
+/* IVT */
+#define IODA2_IVT_SERVER PPC_BITMASK(0, 23)
+#define IODA2_IVT_PRIORITY PPC_BITMASK(24, 31)
+#define IODA2_IVT_GEN PPC_BITMASK(37, 38)
+#define IODA2_IVT_P PPC_BITMASK(39, 39)
+#define IODA2_IVT_Q PPC_BITMASK(47, 47)
+#define IODA2_IVT_PE PPC_BITMASK(48, 63)
+
+/* TVT */
+#define IODA2_TVT_TABLE_ADDR PPC_BITMASK(0, 47)
+#define IODA2_TVT_NUM_LEVELS PPC_BITMASK(48, 50)
+#define IODA2_TVE_1_LEVEL 0
+#define IODA2_TVE_2_LEVELS 1
+#define IODA2_TVE_3_LEVELS 2
+#define IODA2_TVE_4_LEVELS 3
+#define IODA2_TVE_5_LEVELS 4
+#define IODA2_TVT_TCE_TABLE_SIZE PPC_BITMASK(51, 55)
+#define IODA2_TVT_IO_PSIZE PPC_BITMASK(59, 63)
+
+/* PESTA */
+#define IODA2_PESTA_MMIO_FROZEN PPC_BIT(0)
+
+/* PESTB */
+#define IODA2_PESTB_DMA_STOPPED PPC_BIT(0)
+
+/* M32DT */
+#define IODA2_M32DT_PE PPC_BITMASK(8, 15)
+
+/* M64BT */
+#define IODA2_M64BT_ENABLE PPC_BIT(0)
+#define IODA2_M64BT_SINGLE_PE PPC_BIT(1)
+#define IODA2_M64BT_BASE PPC_BITMASK(2, 31)
+#define IODA2_M64BT_MASK PPC_BITMASK(34, 63)
+#define IODA2_M64BT_SINGLE_BASE PPC_BITMASK(2, 26)
+#define IODA2_M64BT_PE_HI PPC_BITMASK(27, 31)
+#define IODA2_M64BT_SINGLE_MASK PPC_BITMASK(34, 58)
+#define IODA2_M64BT_PE_LOW PPC_BITMASK(59, 63)
+
+/*
+ * IODA2 in-memory tables
+ */
+
+/*
+ * PEST
+ *
+ * 2x8 bytes entries, PEST0 and PEST1
+ */
+
+#define IODA2_PEST0_MMIO_CAUSE PPC_BIT(2)
+#define IODA2_PEST0_CFG_READ PPC_BIT(3)
+#define IODA2_PEST0_CFG_WRITE PPC_BIT(4)
+#define IODA2_PEST0_TTYPE PPC_BITMASK(5, 7)
+#define PEST_TTYPE_DMA_WRITE 0
+#define PEST_TTYPE_MSI 1
+#define PEST_TTYPE_DMA_READ 2
+#define PEST_TTYPE_DMA_READ_RESP 3
+#define PEST_TTYPE_MMIO_LOAD 4
+#define PEST_TTYPE_MMIO_STORE 5
+#define PEST_TTYPE_OTHER 7
+#define IODA2_PEST0_CA_RETURN PPC_BIT(8)
+#define IODA2_PEST0_UTL_RTOS_TIMEOUT PPC_BIT(8) /* Same bit as CA return */
+#define IODA2_PEST0_UR_RETURN PPC_BIT(9)
+#define IODA2_PEST0_UTL_NONFATAL PPC_BIT(10)
+#define IODA2_PEST0_UTL_FATAL PPC_BIT(11)
+#define IODA2_PEST0_PARITY_UE PPC_BIT(13)
+#define IODA2_PEST0_UTL_CORRECTABLE PPC_BIT(14)
+#define IODA2_PEST0_UTL_INTERRUPT PPC_BIT(15)
+#define IODA2_PEST0_MMIO_XLATE PPC_BIT(16)
+#define IODA2_PEST0_IODA2_ERROR PPC_BIT(16) /* Same bit as MMIO xlate */
+#define IODA2_PEST0_TCE_PAGE_FAULT PPC_BIT(18)
+#define IODA2_PEST0_TCE_ACCESS_FAULT PPC_BIT(19)
+#define IODA2_PEST0_DMA_RESP_TIMEOUT PPC_BIT(20)
+#define IODA2_PEST0_AIB_SIZE_INVALID PPC_BIT(21)
+#define IODA2_PEST0_LEM_BIT PPC_BITMASK(26, 31)
+#define IODA2_PEST0_RID PPC_BITMASK(32, 47)
+#define IODA2_PEST0_MSI_DATA PPC_BITMASK(48, 63)
+
+#define IODA2_PEST1_FAIL_ADDR PPC_BITMASK(3, 63)
+
+
+#endif /* PCI_HOST_PNV_PHB3_REGS_H */
diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h
new file mode 100644
index 0000000000..3212e68160
--- /dev/null
+++ b/include/hw/pci-host/pnv_phb4.h
@@ -0,0 +1,226 @@
+/*
+ * QEMU PowerPC PowerNV (POWER9) PHB4 model
+ *
+ * Copyright (c) 2018-2020, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PCI_HOST_PNV_PHB4_H
+#define PCI_HOST_PNV_PHB4_H
+
+#include "hw/pci-host/pnv_phb.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/xive.h"
+#include "qom/object.h"
+
+typedef struct PnvPhb4PecStack PnvPhb4PecStack;
+typedef struct PnvPHB4 PnvPHB4;
+
+/*
+ * We have one such address space wrapper per possible device under
+ * the PHB since they need to be assigned statically at qemu device
+ * creation time. The relationship to a PE is done later
+ * dynamically. This means we can potentially create a lot of these
+ * guys. Q35 stores them as some kind of radix tree but we never
+ * really need to do fast lookups so instead we simply keep a QLIST of
+ * them for now, we can add the radix if needed later on.
+ *
+ * We do cache the PE number to speed things up a bit though.
+ */
+typedef struct PnvPhb4DMASpace {
+ PCIBus *bus;
+ uint8_t devfn;
+ int pe_num; /* Cached PE number */
+#define PHB_INVALID_PE (-1)
+ PnvPHB4 *phb;
+ AddressSpace dma_as;
+ IOMMUMemoryRegion dma_mr;
+ MemoryRegion msi32_mr;
+ MemoryRegion msi64_mr;
+ QLIST_ENTRY(PnvPhb4DMASpace) list;
+} PnvPhb4DMASpace;
+
+/*
+ * PHB4 PCIe Root Bus
+ */
+#define TYPE_PNV_PHB4_ROOT_BUS "pnv-phb4-root"
+struct PnvPHB4RootBus {
+ PCIBus parent;
+
+ uint32_t chip_id;
+ uint32_t phb_id;
+};
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB4RootBus, PNV_PHB4_ROOT_BUS)
+
+/*
+ * PHB4 PCIe Host Bridge for PowerNV machines (POWER9)
+ */
+#define TYPE_PNV_PHB4 "pnv-phb4"
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB4, PNV_PHB4)
+
+#define PNV_PHB4_MAX_LSIs 8
+#define PNV_PHB4_MAX_INTs 4096
+#define PNV_PHB4_MAX_MIST (PNV_PHB4_MAX_INTs >> 2)
+#define PNV_PHB4_MAX_MMIO_WINDOWS 32
+#define PNV_PHB4_MIN_MMIO_WINDOWS 16
+#define PNV_PHB4_NUM_REGS (0x3000 >> 3)
+#define PNV_PHB4_MAX_PEs 512
+#define PNV_PHB4_MAX_TVEs (PNV_PHB4_MAX_PEs * 2)
+#define PNV_PHB4_MAX_PEEVs (PNV_PHB4_MAX_PEs / 64)
+#define PNV_PHB4_MAX_MBEs (PNV_PHB4_MAX_MMIO_WINDOWS * 2)
+
+#define PNV_PHB4_VERSION 0x000000a400000002ull
+#define PNV_PHB4_DEVICE_ID 0x04c1
+
+#define PCI_MMIO_TOTAL_SIZE (0x1ull << 60)
+
+struct PnvPHB4 {
+ DeviceState parent;
+
+ PnvPHB *phb_base;
+
+ uint32_t chip_id;
+ uint32_t phb_id;
+
+ /* The owner PEC */
+ PnvPhb4PecState *pec;
+
+ char bus_path[8];
+
+ /* Main register images */
+ uint64_t regs[PNV_PHB4_NUM_REGS];
+ MemoryRegion mr_regs;
+
+ /* Extra SCOM-only register */
+ uint64_t scom_hv_ind_addr_reg;
+
+ /*
+ * Geometry of the PHB. There are two types, small and big PHBs, a
+ * number of resources (number of PEs, windows etc...) are doubled
+ * for a big PHB
+ */
+ bool big_phb;
+
+ /* Memory regions for MMIO space */
+ MemoryRegion mr_mmio[PNV_PHB4_MAX_MMIO_WINDOWS];
+
+ /* PCI side space */
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_io;
+
+ /* PCI registers (excluding pass-through) */
+#define PHB4_PEC_PCI_STK_REGS_COUNT 0xf
+ uint64_t pci_regs[PHB4_PEC_PCI_STK_REGS_COUNT];
+ MemoryRegion pci_regs_mr;
+
+ /* Nest registers */
+#define PHB4_PEC_NEST_STK_REGS_COUNT 0x18
+ uint64_t nest_regs[PHB4_PEC_NEST_STK_REGS_COUNT];
+ MemoryRegion nest_regs_mr;
+
+ /* PHB pass-through XSCOM */
+ MemoryRegion phb_regs_mr;
+
+ /* Memory windows from PowerBus to PHB */
+ MemoryRegion phbbar;
+ MemoryRegion intbar;
+ MemoryRegion mmbar0;
+ MemoryRegion mmbar1;
+ uint64_t mmio0_base;
+ uint64_t mmio0_size;
+ uint64_t mmio1_base;
+ uint64_t mmio1_size;
+
+ /* On-chip IODA tables */
+ uint64_t ioda_LIST[PNV_PHB4_MAX_LSIs];
+ uint64_t ioda_MIST[PNV_PHB4_MAX_MIST];
+ uint64_t ioda_TVT[PNV_PHB4_MAX_TVEs];
+ uint64_t ioda_MBT[PNV_PHB4_MAX_MBEs];
+ uint64_t ioda_MDT[PNV_PHB4_MAX_PEs];
+ uint64_t ioda_PEEV[PNV_PHB4_MAX_PEEVs];
+
+ /*
+ * The internal PESTA/B is 2 bits per PE split into two tables, we
+ * store them in a single array here to avoid wasting space.
+ */
+ uint8_t ioda_PEST_AB[PNV_PHB4_MAX_PEs];
+
+ /* P9 Interrupt generation */
+ XiveSource xsrc;
+ qemu_irq *qirqs;
+
+ QLIST_HEAD(, PnvPhb4DMASpace) dma_spaces;
+};
+
+void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon);
+int pnv_phb4_pec_get_phb_id(PnvPhb4PecState *pec, int stack_index);
+PnvPhb4PecState *pnv_pec_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp);
+void pnv_phb4_bus_init(DeviceState *dev, PnvPHB4 *phb);
+extern const MemoryRegionOps pnv_phb4_xscom_ops;
+
+/*
+ * PHB4 PEC (PCI Express Controller)
+ */
+#define TYPE_PNV_PHB4_PEC "pnv-phb4-pec"
+OBJECT_DECLARE_TYPE(PnvPhb4PecState, PnvPhb4PecClass, PNV_PHB4_PEC)
+
+struct PnvPhb4PecState {
+ DeviceState parent;
+
+ /* PEC number in chip */
+ uint32_t index;
+ uint32_t chip_id;
+
+ /* Nest registers, excuding per-stack */
+#define PHB4_PEC_NEST_REGS_COUNT 0xf
+ uint64_t nest_regs[PHB4_PEC_NEST_REGS_COUNT];
+ MemoryRegion nest_regs_mr;
+
+ /* PCI registers, excluding per-stack */
+#define PHB4_PEC_PCI_REGS_COUNT 0x3
+ uint64_t pci_regs[PHB4_PEC_PCI_REGS_COUNT];
+ MemoryRegion pci_regs_mr;
+
+ /* PHBs */
+ uint32_t num_phbs;
+#define MAX_PHBS_PER_PEC 3
+ PnvPHB *phbs[MAX_PHBS_PER_PEC];
+
+ PnvChip *chip;
+};
+
+
+struct PnvPhb4PecClass {
+ DeviceClass parent_class;
+
+ uint32_t (*xscom_nest_base)(PnvPhb4PecState *pec);
+ uint32_t xscom_nest_size;
+ uint32_t (*xscom_pci_base)(PnvPhb4PecState *pec);
+ uint32_t xscom_pci_size;
+ const char *compat;
+ int compat_size;
+ const char *stk_compat;
+ int stk_compat_size;
+ uint64_t version;
+ const char *phb_type;
+ const uint32_t *num_phbs;
+};
+
+/*
+ * POWER10 definitions
+ */
+
+#define TYPE_PNV_PHB5 "pnv-phb5"
+#define PNV_PHB5(obj) \
+ OBJECT_CHECK(PnvPhb4, (obj), TYPE_PNV_PHB5)
+
+#define PNV_PHB5_VERSION 0x000000a500000002ull
+
+#define TYPE_PNV_PHB5_PEC "pnv-phb5-pec"
+#define PNV_PHB5_PEC(obj) \
+ OBJECT_CHECK(PnvPhb4PecState, (obj), TYPE_PNV_PHB5_PEC)
+
+#endif /* PCI_HOST_PNV_PHB4_H */
diff --git a/include/hw/pci-host/pnv_phb4_regs.h b/include/hw/pci-host/pnv_phb4_regs.h
new file mode 100644
index 0000000000..bea96f4d91
--- /dev/null
+++ b/include/hw/pci-host/pnv_phb4_regs.h
@@ -0,0 +1,558 @@
+/*
+ * QEMU PowerPC PowerNV (POWER9) PHB4 model
+ *
+ * Copyright (c) 2013-2020, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PCI_HOST_PNV_PHB4_REGS_H
+#define PCI_HOST_PNV_PHB4_REGS_H
+
+/*
+ * PEC XSCOM registers
+ *
+ * There a 3 PECs in P9. Each PEC can have several PHBs. Each PEC has some
+ * "global" registers and some "per-stack" (per-PHB) registers. Those are
+ * organized in two XSCOM ranges, the "Nest" range and the "PCI" range, each
+ * range contains both some "PEC" registers and some "per-stack" registers.
+ *
+ * Finally the PCI range also contains an additional range per stack that
+ * passes through to some of the PHB own registers.
+ *
+ * PEC0 can contain 1 PHB (PHB0)
+ * PEC1 can contain 2 PHBs (PHB1 and PHB2)
+ * PEC2 can contain 3 PHBs (PHB3, PHB4 and PHB5)
+ */
+
+/*
+ * This is the "stack" offset, it's the offset from a given range base
+ * to the first "per-stack" registers and also the stride between
+ * stacks, thus for PEC2, the global registers are at offset 0, the
+ * PHB3 registers at offset 0x40, the PHB4 at offset 0x80 etc....
+ *
+ * It is *also* the offset to the pass-through SCOM region but in this case
+ * it is 0 based, ie PHB3 is at 0x100 PHB4 is a 0x140 etc..
+ */
+#define PEC_STACK_OFFSET 0x40
+
+/* XSCOM Nest global registers */
+#define PEC_NEST_PBCQ_HW_CONFIG 0x00
+#define PEC_NEST_DROP_PRIO_CTRL 0x01
+#define PEC_NEST_PBCQ_ERR_INJECT 0x02
+#define PEC_NEST_PCI_NEST_CLK_TRACE_CTL 0x03
+#define PEC_NEST_PBCQ_PMON_CTRL 0x04
+#define PEC_NEST_PBCQ_PBUS_ADDR_EXT 0x05
+#define PEC_NEST_PBCQ_PRED_VEC_TIMEOUT 0x06
+#define PEC_NEST_CAPP_CTRL 0x07
+#define PEC_NEST_PBCQ_READ_STK_OVR 0x08
+#define PEC_NEST_PBCQ_WRITE_STK_OVR 0x09
+#define PEC_NEST_PBCQ_STORE_STK_OVR 0x0a
+#define PEC_NEST_PBCQ_RETRY_BKOFF_CTRL 0x0b
+
+/* XSCOM Nest per-stack registers */
+#define PEC_NEST_STK_PCI_NEST_FIR 0x00
+#define PEC_NEST_STK_PCI_NEST_FIR_CLR 0x01
+#define PEC_NEST_STK_PCI_NEST_FIR_SET 0x02
+#define PEC_NEST_STK_PCI_NEST_FIR_MSK 0x03
+#define PEC_NEST_STK_PCI_NEST_FIR_MSKC 0x04
+#define PEC_NEST_STK_PCI_NEST_FIR_MSKS 0x05
+#define PEC_NEST_STK_PCI_NEST_FIR_ACT0 0x06
+#define PEC_NEST_STK_PCI_NEST_FIR_ACT1 0x07
+#define PEC_NEST_STK_PCI_NEST_FIR_WOF 0x08
+#define PEC_NEST_STK_ERR_REPORT_0 0x0a
+#define PEC_NEST_STK_ERR_REPORT_1 0x0b
+#define PEC_NEST_STK_PBCQ_GNRL_STATUS 0x0c
+#define PEC_NEST_STK_PBCQ_MODE 0x0d
+#define PEC_NEST_STK_MMIO_BAR0 0x0e
+#define PEC_NEST_STK_MMIO_BAR0_MASK 0x0f
+#define PEC_NEST_STK_MMIO_BAR1 0x10
+#define PEC_NEST_STK_MMIO_BAR1_MASK 0x11
+#define PEC_NEST_STK_PHB_REGS_BAR 0x12
+#define PEC_NEST_STK_INT_BAR 0x13
+#define PEC_NEST_STK_BAR_EN 0x14
+#define PEC_NEST_STK_BAR_EN_MMIO0 PPC_BIT(0)
+#define PEC_NEST_STK_BAR_EN_MMIO1 PPC_BIT(1)
+#define PEC_NEST_STK_BAR_EN_PHB PPC_BIT(2)
+#define PEC_NEST_STK_BAR_EN_INT PPC_BIT(3)
+#define PEC_NEST_STK_DATA_FRZ_TYPE 0x15
+#define PEC_NEST_STK_PBCQ_SPARSE_PAGE 0x16 /* P10 */
+#define PEC_NEST_STK_PBCQ_CACHE_INJ 0x17 /* P10 */
+
+/* XSCOM PCI global registers */
+#define PEC_PCI_PBAIB_HW_CONFIG 0x00
+#define PEC_PCI_PBAIB_HW_OVR 0x01
+#define PEC_PCI_PBAIB_READ_STK_OVR 0x02
+
+/* XSCOM PCI per-stack registers */
+#define PEC_PCI_STK_PCI_FIR 0x00
+#define PEC_PCI_STK_PCI_FIR_CLR 0x01
+#define PEC_PCI_STK_PCI_FIR_SET 0x02
+#define PEC_PCI_STK_PCI_FIR_MSK 0x03
+#define PEC_PCI_STK_PCI_FIR_MSKC 0x04
+#define PEC_PCI_STK_PCI_FIR_MSKS 0x05
+#define PEC_PCI_STK_PCI_FIR_ACT0 0x06
+#define PEC_PCI_STK_PCI_FIR_ACT1 0x07
+#define PEC_PCI_STK_PCI_FIR_WOF 0x08
+#define PEC_PCI_STK_ETU_RESET 0x0a
+#define PEC_PCI_STK_PBAIB_ERR_REPORT 0x0b
+#define PEC_PCI_STK_PBAIB_TX_CMD_CRED 0x0d
+#define PEC_PCI_STK_PBAIB_TX_DAT_CRED 0x0e
+
+/*
+ * PHB "SCOM" registers. This is accessed via the above window
+ * and provides a backdoor to the PHB when the AIB bus is not
+ * functional. Some of these directly map some of the PHB MMIO
+ * registers, some are specific and allow indirect access to a
+ * wider range of PHB registers
+ */
+#define PHB_SCOM_HV_IND_ADDR 0x00
+#define PHB_SCOM_HV_IND_ADDR_VALID PPC_BIT(0)
+#define PHB_SCOM_HV_IND_ADDR_4B PPC_BIT(1)
+#define PHB_SCOM_HV_IND_ADDR_AUTOINC PPC_BIT(2)
+#define PHB_SCOM_HV_IND_ADDR_ADDR PPC_BITMASK(51, 63)
+#define PHB_SCOM_HV_IND_DATA 0x01
+#define PHB_SCOM_ETU_LEM_FIR 0x08
+#define PHB_SCOM_ETU_LEM_FIR_AND 0x09
+#define PHB_SCOM_ETU_LEM_FIR_OR 0x0a
+#define PHB_SCOM_ETU_LEM_FIR_MSK 0x0b
+#define PHB_SCOM_ETU_LEM_ERR_MSK_AND 0x0c
+#define PHB_SCOM_ETU_LEM_ERR_MSK_OR 0x0d
+#define PHB_SCOM_ETU_LEM_ACT0 0x0e
+#define PHB_SCOM_ETU_LEM_ACT1 0x0f
+#define PHB_SCOM_ETU_LEM_WOF 0x10
+#define PHB_SCOM_ETU_PMON_CONFIG 0x17
+#define PHB_SCOM_ETU_PMON_CTR0 0x18
+#define PHB_SCOM_ETU_PMON_CTR1 0x19
+#define PHB_SCOM_ETU_PMON_CTR2 0x1a
+#define PHB_SCOM_ETU_PMON_CTR3 0x1b
+
+
+/*
+ * PHB MMIO registers
+ */
+
+/* PHB Fundamental register set A */
+#define PHB_LSI_SOURCE_ID 0x100
+#define PHB_LSI_SRC_ID PPC_BITMASK(4, 12)
+#define PHB_DMA_CHAN_STATUS 0x110
+#define PHB_DMA_CHAN_ANY_ERR PPC_BIT(27)
+#define PHB_DMA_CHAN_ANY_ERR1 PPC_BIT(28)
+#define PHB_DMA_CHAN_ANY_FREEZE PPC_BIT(29)
+#define PHB_CPU_LOADSTORE_STATUS 0x120
+#define PHB_CPU_LS_ANY_ERR PPC_BIT(27)
+#define PHB_CPU_LS_ANY_ERR1 PPC_BIT(28)
+#define PHB_CPU_LS_ANY_FREEZE PPC_BIT(29)
+#define PHB_CONFIG_DATA 0x130
+#define PHB_LOCK0 0x138
+#define PHB_CONFIG_ADDRESS 0x140
+#define PHB_CA_ENABLE PPC_BIT(0)
+#define PHB_CA_STATUS PPC_BITMASK(1, 3)
+#define PHB_CA_STATUS_GOOD 0
+#define PHB_CA_STATUS_UR 1
+#define PHB_CA_STATUS_CRS 2
+#define PHB_CA_STATUS_CA 4
+#define PHB_CA_BUS PPC_BITMASK(4, 11)
+#define PHB_CA_DEV PPC_BITMASK(12, 16)
+#define PHB_CA_FUNC PPC_BITMASK(17, 19)
+#define PHB_CA_BDFN PPC_BITMASK(4, 19) /* bus,dev,func */
+#define PHB_CA_REG PPC_BITMASK(20, 31)
+#define PHB_CA_PE PPC_BITMASK(39, 47)
+#define PHB_LOCK1 0x148
+#define PHB_PHB4_CONFIG 0x160
+#define PHB_PHB4C_32BIT_MSI_EN PPC_BIT(8)
+#define PHB_PHB4C_64BIT_MSI_EN PPC_BIT(14)
+#define PHB_RTT_BAR 0x168
+#define PHB_RTT_BAR_ENABLE PPC_BIT(0)
+#define PHB_RTT_BASE_ADDRESS_MASK PPC_BITMASK(8, 46)
+#define PHB_PELTV_BAR 0x188
+#define PHB_PELTV_BAR_ENABLE PPC_BIT(0)
+#define PHB_PELTV_BASE_ADDRESS PPC_BITMASK(8, 50)
+#define PHB_M32_START_ADDR 0x1a0
+#define PHB_PEST_BAR 0x1a8
+#define PHB_PEST_BAR_ENABLE PPC_BIT(0)
+#define PHB_PEST_BASE_ADDRESS PPC_BITMASK(8, 51)
+#define PHB_ASN_CMPM 0x1C0
+#define PHB_ASN_CMPM_ENABLE PPC_BIT(63)
+#define PHB_CAPI_CMPM 0x1C8
+#define PHB_CAPI_CMPM_ENABLE PPC_BIT(63)
+#define PHB_M64_AOMASK 0x1d0
+#define PHB_M64_UPPER_BITS 0x1f0
+#define PHB_NXLATE_PREFIX 0x1f8
+#define PHB_DMARD_SYNC 0x200
+#define PHB_DMARD_SYNC_START PPC_BIT(0)
+#define PHB_DMARD_SYNC_COMPLETE PPC_BIT(1)
+#define PHB_RTC_INVALIDATE 0x208
+#define PHB_RTC_INVALIDATE_ALL PPC_BIT(0)
+#define PHB_RTC_INVALIDATE_RID PPC_BITMASK(16, 31)
+#define PHB_TCE_KILL 0x210
+#define PHB_TCE_KILL_ALL PPC_BIT(0)
+#define PHB_TCE_KILL_PE PPC_BIT(1)
+#define PHB_TCE_KILL_ONE PPC_BIT(2)
+#define PHB_TCE_KILL_PSEL PPC_BIT(3)
+#define PHB_TCE_KILL_64K 0x1000 /* Address override */
+#define PHB_TCE_KILL_2M 0x2000 /* Address override */
+#define PHB_TCE_KILL_1G 0x3000 /* Address override */
+#define PHB_TCE_KILL_PENUM PPC_BITMASK(55, 63)
+#define PHB_TCE_SPEC_CTL 0x218
+#define PHB_IODA_ADDR 0x220
+#define PHB_IODA_AD_AUTOINC PPC_BIT(0)
+#define PHB_IODA_AD_TSEL PPC_BITMASK(11, 15)
+#define PHB_IODA_AD_MIST_PWV PPC_BITMASK(28, 31)
+#define PHB_IODA_AD_TADR PPC_BITMASK(54, 63)
+#define PHB_IODA_DATA0 0x228
+#define PHB_PHB4_GEN_CAP 0x250
+#define PHB_PHB4_TCE_CAP 0x258
+#define PHB_PHB4_IRQ_CAP 0x260
+#define PHB_PHB4_EEH_CAP 0x268
+#define PHB_PAPR_ERR_INJ_CTL 0x2b0
+#define PHB_PAPR_ERR_INJ_CTL_INB PPC_BIT(0)
+#define PHB_PAPR_ERR_INJ_CTL_OUTB PPC_BIT(1)
+#define PHB_PAPR_ERR_INJ_CTL_STICKY PPC_BIT(2)
+#define PHB_PAPR_ERR_INJ_CTL_CFG PPC_BIT(3)
+#define PHB_PAPR_ERR_INJ_CTL_RD PPC_BIT(4)
+#define PHB_PAPR_ERR_INJ_CTL_WR PPC_BIT(5)
+#define PHB_PAPR_ERR_INJ_CTL_FREEZE PPC_BIT(6)
+#define PHB_PAPR_ERR_INJ_ADDR 0x2b8
+#define PHB_PAPR_ERR_INJ_ADDR_MMIO PPC_BITMASK(16, 63)
+#define PHB_PAPR_ERR_INJ_MASK 0x2c0
+#define PHB_PAPR_ERR_INJ_MASK_CFG PPC_BITMASK(4, 11)
+#define PHB_PAPR_ERR_INJ_MASK_CFG_ALL PPC_BITMASK(4, 19)
+#define PHB_PAPR_ERR_INJ_MASK_MMIO PPC_BITMASK(16, 63)
+#define PHB_ETU_ERR_SUMMARY 0x2c8
+#define PHB_INT_NOTIFY_ADDR 0x300
+#define PHB_INT_NOTIFY_ADDR_64K PPC_BIT(1) /* P10 */
+#define PHB_INT_NOTIFY_INDEX 0x308
+
+/* Fundamental register set B */
+#define PHB_VERSION 0x800
+#define PHB_CTRLR 0x810
+#define PHB_CTRLR_IRQ_PQ_DISABLE PPC_BIT(9) /* P10 */
+#define PHB_CTRLR_IRQ_ABT_MODE PPC_BIT(10) /* P10 */
+#define PHB_CTRLR_IRQ_PGSZ_64K PPC_BIT(11)
+#define PHB_CTRLR_IRQ_STORE_EOI PPC_BIT(12)
+#define PHB_CTRLR_MMIO_RD_STRICT PPC_BIT(13)
+#define PHB_CTRLR_MMIO_EEH_DISABLE PPC_BIT(14)
+#define PHB_CTRLR_CFG_EEH_BLOCK PPC_BIT(15)
+#define PHB_CTRLR_FENCE_LNKILL_DIS PPC_BIT(16)
+#define PHB_CTRLR_TVT_ADDR_SEL PPC_BITMASK(17, 19)
+#define TVT_DD1_1_PER_PE 0
+#define TVT_DD1_2_PER_PE 1
+#define TVT_DD1_4_PER_PE 2
+#define TVT_DD1_8_PER_PE 3
+#define TVT_DD1_16_PER_PE 4
+#define TVT_2_PER_PE 0
+#define TVT_4_PER_PE 1
+#define TVT_8_PER_PE 2
+#define TVT_16_PER_PE 3
+#define PHB_CTRLR_DMA_RD_SPACING PPC_BITMASK(28, 31)
+#define PHB_AIB_FENCE_CTRL 0x860
+#define PHB_TCE_TAG_ENABLE 0x868
+#define PHB_TCE_WATERMARK 0x870
+#define PHB_TIMEOUT_CTRL1 0x878
+#define PHB_TIMEOUT_CTRL2 0x880
+#define PHB_Q_DMA_R 0x888
+#define PHB_Q_DMA_R_QUIESCE_DMA PPC_BIT(0)
+#define PHB_Q_DMA_R_AUTORESET PPC_BIT(1)
+#define PHB_Q_DMA_R_DMA_RESP_STATUS PPC_BIT(4)
+#define PHB_Q_DMA_R_MMIO_RESP_STATUS PPC_BIT(5)
+#define PHB_Q_DMA_R_TCE_RESP_STATUS PPC_BIT(6)
+#define PHB_Q_DMA_R_TCE_KILL_STATUS PPC_BIT(7)
+#define PHB_TCE_TAG_STATUS 0x908
+
+/* FIR & Error registers */
+#define PHB_LEM_FIR_ACCUM 0xc00
+#define PHB_LEM_FIR_AND_MASK 0xc08
+#define PHB_LEM_FIR_OR_MASK 0xc10
+#define PHB_LEM_ERROR_MASK 0xc18
+#define PHB_LEM_ERROR_AND_MASK 0xc20
+#define PHB_LEM_ERROR_OR_MASK 0xc28
+#define PHB_LEM_ACTION0 0xc30
+#define PHB_LEM_ACTION1 0xc38
+#define PHB_LEM_WOF 0xc40
+#define PHB_ERR_STATUS 0xc80
+#define PHB_ERR1_STATUS 0xc88
+#define PHB_ERR_INJECT 0xc90
+#define PHB_ERR_LEM_ENABLE 0xc98
+#define PHB_ERR_IRQ_ENABLE 0xca0
+#define PHB_ERR_FREEZE_ENABLE 0xca8
+#define PHB_ERR_AIB_FENCE_ENABLE 0xcb0
+#define PHB_ERR_LOG_0 0xcc0
+#define PHB_ERR_LOG_1 0xcc8
+#define PHB_ERR_STATUS_MASK 0xcd0
+#define PHB_ERR1_STATUS_MASK 0xcd8
+
+#define PHB_TXE_ERR_STATUS 0xd00
+#define PHB_TXE_ERR1_STATUS 0xd08
+#define PHB_TXE_ERR_INJECT 0xd10
+#define PHB_TXE_ERR_LEM_ENABLE 0xd18
+#define PHB_TXE_ERR_IRQ_ENABLE 0xd20
+#define PHB_TXE_ERR_FREEZE_ENABLE 0xd28
+#define PHB_TXE_ERR_AIB_FENCE_ENABLE 0xd30
+#define PHB_TXE_ERR_LOG_0 0xd40
+#define PHB_TXE_ERR_LOG_1 0xd48
+#define PHB_TXE_ERR_STATUS_MASK 0xd50
+#define PHB_TXE_ERR1_STATUS_MASK 0xd58
+
+#define PHB_RXE_ARB_ERR_STATUS 0xd80
+#define PHB_RXE_ARB_ERR1_STATUS 0xd88
+#define PHB_RXE_ARB_ERR_INJECT 0xd90
+#define PHB_RXE_ARB_ERR_LEM_ENABLE 0xd98
+#define PHB_RXE_ARB_ERR_IRQ_ENABLE 0xda0
+#define PHB_RXE_ARB_ERR_FREEZE_ENABLE 0xda8
+#define PHB_RXE_ARB_ERR_AIB_FENCE_ENABLE 0xdb0
+#define PHB_RXE_ARB_ERR_LOG_0 0xdc0
+#define PHB_RXE_ARB_ERR_LOG_1 0xdc8
+#define PHB_RXE_ARB_ERR_STATUS_MASK 0xdd0
+#define PHB_RXE_ARB_ERR1_STATUS_MASK 0xdd8
+
+#define PHB_RXE_MRG_ERR_STATUS 0xe00
+#define PHB_RXE_MRG_ERR1_STATUS 0xe08
+#define PHB_RXE_MRG_ERR_INJECT 0xe10
+#define PHB_RXE_MRG_ERR_LEM_ENABLE 0xe18
+#define PHB_RXE_MRG_ERR_IRQ_ENABLE 0xe20
+#define PHB_RXE_MRG_ERR_FREEZE_ENABLE 0xe28
+#define PHB_RXE_MRG_ERR_AIB_FENCE_ENABLE 0xe30
+#define PHB_RXE_MRG_ERR_LOG_0 0xe40
+#define PHB_RXE_MRG_ERR_LOG_1 0xe48
+#define PHB_RXE_MRG_ERR_STATUS_MASK 0xe50
+#define PHB_RXE_MRG_ERR1_STATUS_MASK 0xe58
+
+#define PHB_RXE_TCE_ERR_STATUS 0xe80
+#define PHB_RXE_TCE_ERR1_STATUS 0xe88
+#define PHB_RXE_TCE_ERR_INJECT 0xe90
+#define PHB_RXE_TCE_ERR_LEM_ENABLE 0xe98
+#define PHB_RXE_TCE_ERR_IRQ_ENABLE 0xea0
+#define PHB_RXE_TCE_ERR_FREEZE_ENABLE 0xea8
+#define PHB_RXE_TCE_ERR_AIB_FENCE_ENABLE 0xeb0
+#define PHB_RXE_TCE_ERR_LOG_0 0xec0
+#define PHB_RXE_TCE_ERR_LOG_1 0xec8
+#define PHB_RXE_TCE_ERR_STATUS_MASK 0xed0
+#define PHB_RXE_TCE_ERR1_STATUS_MASK 0xed8
+
+/* Performance monitor & Debug registers */
+#define PHB_TRACE_CONTROL 0xf80
+#define PHB_PERFMON_CONFIG 0xf88
+#define PHB_PERFMON_CTR0 0xf90
+#define PHB_PERFMON_CTR1 0xf98
+#define PHB_PERFMON_CTR2 0xfa0
+#define PHB_PERFMON_CTR3 0xfa8
+
+/* Root complex config space memory mapped */
+#define PHB_RC_CONFIG_BASE 0x1000
+#define PHB_RC_CONFIG_SIZE 0x800
+
+/* PHB4 REGB registers */
+
+/* PBL core */
+#define PHB_PBL_CONTROL 0x1800
+#define PHB_PBL_TIMEOUT_CTRL 0x1810
+#define PHB_PBL_NPTAG_ENABLE 0x1820
+#define PHB_PBL_NBW_CMP_MASK 0x1830
+#define PHB_PBL_NBW_MASK_ENABLE PPC_BIT(63)
+#define PHB_PBL_SYS_LINK_INIT 0x1838
+#define PHB_PBL_BUF_STATUS 0x1840
+#define PHB_PBL_ERR_STATUS 0x1900
+#define PHB_PBL_ERR1_STATUS 0x1908
+#define PHB_PBL_ERR_INJECT 0x1910
+#define PHB_PBL_ERR_INF_ENABLE 0x1920
+#define PHB_PBL_ERR_ERC_ENABLE 0x1928
+#define PHB_PBL_ERR_FAT_ENABLE 0x1930
+#define PHB_PBL_ERR_LOG_0 0x1940
+#define PHB_PBL_ERR_LOG_1 0x1948
+#define PHB_PBL_ERR_STATUS_MASK 0x1950
+#define PHB_PBL_ERR1_STATUS_MASK 0x1958
+
+/* PCI-E stack */
+#define PHB_PCIE_SCR 0x1A00
+#define PHB_PCIE_SCR_SLOT_CAP PPC_BIT(15)
+#define PHB_PCIE_SCR_MAXLINKSPEED PPC_BITMASK(32, 35)
+
+
+#define PHB_PCIE_CRESET 0x1A10
+#define PHB_PCIE_CRESET_CFG_CORE PPC_BIT(0)
+#define PHB_PCIE_CRESET_TLDLP PPC_BIT(1)
+#define PHB_PCIE_CRESET_PBL PPC_BIT(2)
+#define PHB_PCIE_CRESET_PERST_N PPC_BIT(3)
+#define PHB_PCIE_CRESET_PIPE_N PPC_BIT(4)
+
+
+#define PHB_PCIE_HOTPLUG_STATUS 0x1A20
+#define PHB_PCIE_HPSTAT_PRESENCE PPC_BIT(10)
+
+#define PHB_PCIE_DLP_TRAIN_CTL 0x1A40
+#define PHB_PCIE_DLP_LINK_WIDTH PPC_BITMASK(30, 35)
+#define PHB_PCIE_DLP_LINK_SPEED PPC_BITMASK(36, 39)
+#define PHB_PCIE_DLP_LTSSM_TRC PPC_BITMASK(24, 27)
+#define PHB_PCIE_DLP_LTSSM_RESET 0
+#define PHB_PCIE_DLP_LTSSM_DETECT 1
+#define PHB_PCIE_DLP_LTSSM_POLLING 2
+#define PHB_PCIE_DLP_LTSSM_CONFIG 3
+#define PHB_PCIE_DLP_LTSSM_L0 4
+#define PHB_PCIE_DLP_LTSSM_REC 5
+#define PHB_PCIE_DLP_LTSSM_L1 6
+#define PHB_PCIE_DLP_LTSSM_L2 7
+#define PHB_PCIE_DLP_LTSSM_HOTRESET 8
+#define PHB_PCIE_DLP_LTSSM_DISABLED 9
+#define PHB_PCIE_DLP_LTSSM_LOOPBACK 10
+#define PHB_PCIE_DLP_TL_LINKACT PPC_BIT(23)
+#define PHB_PCIE_DLP_DL_PGRESET PPC_BIT(22)
+#define PHB_PCIE_DLP_TRAINING PPC_BIT(20)
+#define PHB_PCIE_DLP_INBAND_PRESENCE PPC_BIT(19)
+
+#define PHB_PCIE_DLP_CTL 0x1A78
+#define PHB_PCIE_DLP_CTL_BYPASS_PH2 PPC_BIT(4)
+#define PHB_PCIE_DLP_CTL_BYPASS_PH3 PPC_BIT(5)
+
+#define PHB_PCIE_DLP_TRWCTL 0x1A80
+#define PHB_PCIE_DLP_TRWCTL_EN PPC_BIT(0)
+
+#define PHB_PCIE_DLP_ERRLOG1 0x1AA0
+#define PHB_PCIE_DLP_ERRLOG2 0x1AA8
+#define PHB_PCIE_DLP_ERR_STATUS 0x1AB0
+#define PHB_PCIE_DLP_ERR_COUNTERS 0x1AB8
+
+#define PHB_PCIE_LANE_EQ_CNTL0 0x1AD0
+#define PHB_PCIE_LANE_EQ_CNTL1 0x1AD8
+#define PHB_PCIE_LANE_EQ_CNTL2 0x1AE0
+#define PHB_PCIE_LANE_EQ_CNTL3 0x1AE8
+#define PHB_PCIE_LANE_EQ_CNTL20 0x1AF0
+#define PHB_PCIE_LANE_EQ_CNTL21 0x1AF8
+#define PHB_PCIE_LANE_EQ_CNTL22 0x1B00 /* DD1 only */
+#define PHB_PCIE_LANE_EQ_CNTL23 0x1B08 /* DD1 only */
+#define PHB_PCIE_TRACE_CTRL 0x1B20
+#define PHB_PCIE_MISC_STRAP 0x1B30
+
+/* Error */
+#define PHB_REGB_ERR_STATUS 0x1C00
+#define PHB_REGB_ERR1_STATUS 0x1C08
+#define PHB_REGB_ERR_INJECT 0x1C10
+#define PHB_REGB_ERR_INF_ENABLE 0x1C20
+#define PHB_REGB_ERR_ERC_ENABLE 0x1C28
+#define PHB_REGB_ERR_FAT_ENABLE 0x1C30
+#define PHB_REGB_ERR_LOG_0 0x1C40
+#define PHB_REGB_ERR_LOG_1 0x1C48
+#define PHB_REGB_ERR_STATUS_MASK 0x1C50
+#define PHB_REGB_ERR1_STATUS_MASK 0x1C58
+
+/*
+ * IODA3 on-chip tables
+ */
+
+#define IODA3_TBL_LIST 1
+#define IODA3_TBL_MIST 2
+#define IODA3_TBL_RCAM 5
+#define IODA3_TBL_MRT 6
+#define IODA3_TBL_PESTA 7
+#define IODA3_TBL_PESTB 8
+#define IODA3_TBL_TVT 9
+#define IODA3_TBL_TCR 10
+#define IODA3_TBL_TDR 11
+#define IODA3_TBL_MBT 16
+#define IODA3_TBL_MDT 17
+#define IODA3_TBL_PEEV 20
+
+/* LIST */
+#define IODA3_LIST_P PPC_BIT(6)
+#define IODA3_LIST_Q PPC_BIT(7)
+#define IODA3_LIST_STATE PPC_BIT(14)
+
+/* MIST */
+#define IODA3_MIST_P3 PPC_BIT(48 + 0)
+#define IODA3_MIST_Q3 PPC_BIT(48 + 1)
+#define IODA3_MIST_PE3 PPC_BITMASK(48 + 4, 48 + 15)
+
+/* TVT */
+#define IODA3_TVT_TABLE_ADDR PPC_BITMASK(0, 47)
+#define IODA3_TVT_NUM_LEVELS PPC_BITMASK(48, 50)
+#define IODA3_TVE_1_LEVEL 0
+#define IODA3_TVE_2_LEVELS 1
+#define IODA3_TVE_3_LEVELS 2
+#define IODA3_TVE_4_LEVELS 3
+#define IODA3_TVE_5_LEVELS 4
+#define IODA3_TVT_TCE_TABLE_SIZE PPC_BITMASK(51, 55)
+#define IODA3_TVT_NON_TRANSLATE_50 PPC_BIT(56)
+#define IODA3_TVT_IO_PSIZE PPC_BITMASK(59, 63)
+
+/* PESTA */
+#define IODA3_PESTA_MMIO_FROZEN PPC_BIT(0)
+#define IODA3_PESTA_TRANS_TYPE PPC_BITMASK(5, 7)
+#define IODA3_PESTA_TRANS_TYPE_MMIOLOAD 0x4
+#define IODA3_PESTA_CA_CMPLT_TMT PPC_BIT(8)
+#define IODA3_PESTA_UR PPC_BIT(9)
+
+/* PESTB */
+#define IODA3_PESTB_DMA_STOPPED PPC_BIT(0)
+
+/* MDT */
+/* FIXME: check this field with Eric and add a B, C and D */
+#define IODA3_MDT_PE_A PPC_BITMASK(0, 15)
+#define IODA3_MDT_PE_B PPC_BITMASK(16, 31)
+#define IODA3_MDT_PE_C PPC_BITMASK(32, 47)
+#define IODA3_MDT_PE_D PPC_BITMASK(48, 63)
+
+/* MBT */
+#define IODA3_MBT0_ENABLE PPC_BIT(0)
+#define IODA3_MBT0_TYPE PPC_BIT(1)
+#define IODA3_MBT0_TYPE_M32 IODA3_MBT0_TYPE
+#define IODA3_MBT0_TYPE_M64 0
+#define IODA3_MBT0_MODE PPC_BITMASK(2, 3)
+#define IODA3_MBT0_MODE_PE_SEG 0
+#define IODA3_MBT0_MODE_MDT 1
+#define IODA3_MBT0_MODE_SINGLE_PE 2
+#define IODA3_MBT0_SEG_DIV PPC_BITMASK(4, 5)
+#define IODA3_MBT0_SEG_DIV_MAX 0
+#define IODA3_MBT0_SEG_DIV_128 1
+#define IODA3_MBT0_SEG_DIV_64 2
+#define IODA3_MBT0_SEG_DIV_8 3
+#define IODA3_MBT0_MDT_COLUMN PPC_BITMASK(4, 5)
+#define IODA3_MBT0_BASE_ADDR PPC_BITMASK(8, 51)
+
+#define IODA3_MBT1_ENABLE PPC_BIT(0)
+#define IODA3_MBT1_MASK PPC_BITMASK(8, 51)
+#define IODA3_MBT1_SEG_BASE PPC_BITMASK(55, 63)
+#define IODA3_MBT1_SINGLE_PE_NUM PPC_BITMASK(55, 63)
+
+/*
+ * IODA3 in-memory tables
+ */
+
+/*
+ * PEST
+ *
+ * 2x8 bytes entries, PEST0 and PEST1
+ */
+
+#define IODA3_PEST0_MMIO_CAUSE PPC_BIT(2)
+#define IODA3_PEST0_CFG_READ PPC_BIT(3)
+#define IODA3_PEST0_CFG_WRITE PPC_BIT(4)
+#define IODA3_PEST0_TTYPE PPC_BITMASK(5, 7)
+#define PEST_TTYPE_DMA_WRITE 0
+#define PEST_TTYPE_MSI 1
+#define PEST_TTYPE_DMA_READ 2
+#define PEST_TTYPE_DMA_READ_RESP 3
+#define PEST_TTYPE_MMIO_LOAD 4
+#define PEST_TTYPE_MMIO_STORE 5
+#define PEST_TTYPE_OTHER 7
+#define IODA3_PEST0_CA_RETURN PPC_BIT(8)
+#define IODA3_PEST0_UR_RETURN PPC_BIT(9)
+#define IODA3_PEST0_PCIE_NONFATAL PPC_BIT(10)
+#define IODA3_PEST0_PCIE_FATAL PPC_BIT(11)
+#define IODA3_PEST0_PARITY_UE PPC_BIT(13)
+#define IODA3_PEST0_PCIE_CORRECTABLE PPC_BIT(14)
+#define IODA3_PEST0_PCIE_INTERRUPT PPC_BIT(15)
+#define IODA3_PEST0_MMIO_XLATE PPC_BIT(16)
+#define IODA3_PEST0_IODA3_ERROR PPC_BIT(16) /* Same bit as MMIO xlate */
+#define IODA3_PEST0_TCE_PAGE_FAULT PPC_BIT(18)
+#define IODA3_PEST0_TCE_ACCESS_FAULT PPC_BIT(19)
+#define IODA3_PEST0_DMA_RESP_TIMEOUT PPC_BIT(20)
+#define IODA3_PEST0_AIB_SIZE_INVALID PPC_BIT(21)
+#define IODA3_PEST0_LEM_BIT PPC_BITMASK(26, 31)
+#define IODA3_PEST0_RID PPC_BITMASK(32, 47)
+#define IODA3_PEST0_MSI_DATA PPC_BITMASK(48, 63)
+
+#define IODA3_PEST1_FAIL_ADDR PPC_BITMASK(3, 63)
+
+
+#endif /* PCI_HOST_PNV_PHB4_REGS_H */
diff --git a/include/hw/pci-host/ppc4xx.h b/include/hw/pci-host/ppc4xx.h
new file mode 100644
index 0000000000..32396417fc
--- /dev/null
+++ b/include/hw/pci-host/ppc4xx.h
@@ -0,0 +1,17 @@
+/*
+ * QEMU PowerPC 4xx PCI-host definitions
+ *
+ * Copyright (c) 2018-2023 BALATON Zoltan
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PCIHOST_PPC4XX_H
+#define HW_PCIHOST_PPC4XX_H
+
+#define TYPE_PPC4xx_HOST_BRIDGE "ppc4xx-host-bridge"
+#define TYPE_PPC4xx_PCI_HOST "ppc4xx-pci-host"
+#define TYPE_PPC440_PCIX_HOST "ppc440-pcix-host"
+#define TYPE_PPC460EX_PCIE_HOST "ppc460ex-pcie-host"
+
+#endif
diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h
index 8f4ddde393..22fadfa3ed 100644
--- a/include/hw/pci-host/q35.h
+++ b/include/hw/pci-host/q35.h
@@ -15,34 +15,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>
*/
#ifndef HW_Q35_H
#define HW_Q35_H
-#include "hw/hw.h"
-#include "hw/isa/isa.h"
-#include "hw/sysbus.h"
-#include "hw/i386/pc.h"
-#include "hw/isa/apm.h"
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci/pcie_host.h"
-#include "hw/acpi/acpi.h"
-#include "hw/acpi/ich9.h"
#include "hw/pci-host/pam.h"
-#include "hw/i386/intel_iommu.h"
+#include "qemu/units.h"
+#include "qemu/range.h"
+#include "qom/object.h"
#define TYPE_Q35_HOST_DEVICE "q35-pcihost"
-#define Q35_HOST_DEVICE(obj) \
- OBJECT_CHECK(Q35PCIHost, (obj), TYPE_Q35_HOST_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(Q35PCIHost, Q35_HOST_DEVICE)
#define TYPE_MCH_PCI_DEVICE "mch"
-#define MCH_PCI_DEVICE(obj) \
- OBJECT_CHECK(MCHPCIState, (obj), TYPE_MCH_PCI_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(MCHPCIState, MCH_PCI_DEVICE)
-typedef struct MCHPCIState {
+struct MCHPCIState {
/*< private >*/
PCIDevice parent_obj;
/*< public >*/
@@ -51,26 +44,28 @@ typedef struct MCHPCIState {
MemoryRegion *pci_address_space;
MemoryRegion *system_memory;
MemoryRegion *address_space_io;
- PAMMemoryRegion pam_regions[13];
+ PAMMemoryRegion pam_regions[PAM_REGIONS_COUNT];
MemoryRegion smram_region, open_high_smram;
MemoryRegion smram, low_smram, high_smram;
MemoryRegion tseg_blackhole, tseg_window;
+ MemoryRegion smbase_blackhole, smbase_window;
+ bool has_smram_at_smbase;
+ bool has_smm_ranges;
Range pci_hole;
uint64_t below_4g_mem_size;
uint64_t above_4g_mem_size;
uint64_t pci_hole64_size;
- uint32_t short_root_bus;
uint16_t ext_tseg_mbytes;
-} MCHPCIState;
+};
-typedef struct Q35PCIHost {
+struct Q35PCIHost {
/*< private >*/
PCIExpressHost parent_obj;
/*< public >*/
bool pci_hole64_fix;
MCHPCIState mch;
-} Q35PCIHost;
+};
#define Q35_MASK(bit, ms_bit, ls_bit) \
((uint##bit##_t)(((1ULL << ((ms_bit) + 1)) - 1) & ~((1ULL << ls_bit) - 1)))
@@ -79,11 +74,6 @@ typedef struct Q35PCIHost {
* gmch part
*/
-#define MCH_HOST_PROP_RAM_MEM "ram-mem"
-#define MCH_HOST_PROP_PCI_MEM "pci-mem"
-#define MCH_HOST_PROP_SYSTEM_MEM "system-mem"
-#define MCH_HOST_PROP_IO_MEM "io-mem"
-
/* PCI configuration */
#define MCH_HOST_BRIDGE "MCH"
@@ -98,6 +88,13 @@ typedef struct Q35PCIHost {
#define MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY 0xffff
#define MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_MAX 0xfff
+#define MCH_HOST_BRIDGE_SMBASE_SIZE (128 * KiB)
+#define MCH_HOST_BRIDGE_SMBASE_ADDR 0x30000
+#define MCH_HOST_BRIDGE_F_SMBASE 0x9c
+#define MCH_HOST_BRIDGE_F_SMBASE_QUERY 0xff
+#define MCH_HOST_BRIDGE_F_SMBASE_IN_RAM 0x01
+#define MCH_HOST_BRIDGE_F_SMBASE_LCK 0x02
+
#define MCH_HOST_BRIDGE_PCIEXBAR 0x60 /* 64bit register */
#define MCH_HOST_BRIDGE_PCIEXBAR_SIZE 8 /* 64bit register */
#define MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT 0xb0000000
diff --git a/include/hw/pci-host/remote.h b/include/hw/pci-host/remote.h
new file mode 100644
index 0000000000..690a01f0fe
--- /dev/null
+++ b/include/hw/pci-host/remote.h
@@ -0,0 +1,30 @@
+/*
+ * PCI Host for remote device
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PCI_HOST_REMOTE_H
+#define PCI_HOST_REMOTE_H
+
+#include "exec/memory.h"
+#include "hw/pci/pcie_host.h"
+
+#define TYPE_REMOTE_PCIHOST "remote-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(RemotePCIHost, REMOTE_PCIHOST)
+
+struct RemotePCIHost {
+ /*< private >*/
+ PCIExpressHost parent_obj;
+ /*< public >*/
+
+ MemoryRegion *mr_pci_mem;
+ MemoryRegion *mr_sys_io;
+ MemoryRegion *mr_sys_mem;
+};
+
+#endif
diff --git a/include/hw/pci-host/sabre.h b/include/hw/pci-host/sabre.h
index 0f2ccc01c6..d12de84ea2 100644
--- a/include/hw/pci-host/sabre.h
+++ b/include/hw/pci-host/sabre.h
@@ -1,7 +1,10 @@
-#ifndef PCI_HOST_APB_H
-#define PCI_HOST_APB_H
+#ifndef HW_PCI_HOST_SABRE_H
+#define HW_PCI_HOST_SABRE_H
+#include "hw/pci/pci_device.h"
+#include "hw/pci/pci_host.h"
#include "hw/sparc/sun4u_iommu.h"
+#include "qom/object.h"
#define MAX_IVEC 0x40
@@ -14,15 +17,14 @@
#define OBIO_MSE_IRQ 0x2a
#define OBIO_SER_IRQ 0x2b
-typedef struct SabrePCIState {
+struct SabrePCIState {
PCIDevice parent_obj;
-} SabrePCIState;
+};
#define TYPE_SABRE_PCI_DEVICE "sabre-pci"
-#define SABRE_PCI_DEVICE(obj) \
- OBJECT_CHECK(SabrePCIState, (obj), TYPE_SABRE_PCI_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(SabrePCIState, SABRE_PCI_DEVICE)
-typedef struct SabreState {
+struct SabreState {
PCIHostState parent_obj;
hwaddr special_base;
@@ -43,10 +45,9 @@ typedef struct SabreState {
unsigned int irq_request;
uint32_t reset_control;
unsigned int nr_resets;
-} SabreState;
+};
#define TYPE_SABRE "sabre"
-#define SABRE_DEVICE(obj) \
- OBJECT_CHECK(SabreState, (obj), TYPE_SABRE)
+OBJECT_DECLARE_SIMPLE_TYPE(SabreState, SABRE)
#endif
diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
index a5a7bf4837..3778aac27b 100644
--- a/include/hw/pci-host/spapr.h
+++ b/include/hw/pci-host/spapr.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,27 +24,30 @@
#include "hw/pci/pci.h"
#include "hw/pci/pci_host.h"
#include "hw/ppc/xics.h"
+#include "qom/object.h"
#define TYPE_SPAPR_PCI_HOST_BRIDGE "spapr-pci-host-bridge"
-#define SPAPR_PCI_HOST_BRIDGE(obj) \
- OBJECT_CHECK(sPAPRPHBState, (obj), TYPE_SPAPR_PCI_HOST_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprPhbState, SPAPR_PCI_HOST_BRIDGE)
#define SPAPR_PCI_DMA_MAX_WINDOWS 2
-typedef struct sPAPRPHBState sPAPRPHBState;
-typedef struct spapr_pci_msi {
+typedef struct SpaprPciMsi {
uint32_t first_irq;
uint32_t num;
-} spapr_pci_msi;
+} SpaprPciMsi;
-typedef struct spapr_pci_msi_mig {
+typedef struct SpaprPciMsiMig {
uint32_t key;
- spapr_pci_msi value;
-} spapr_pci_msi_mig;
+ SpaprPciMsi value;
+} SpaprPciMsiMig;
-struct sPAPRPHBState {
+typedef struct SpaprPciLsi {
+ uint32_t irq;
+} SpaprPciLsi;
+
+struct SpaprPhbState {
PCIHostState parent_obj;
uint32_t index;
@@ -63,16 +66,14 @@ struct sPAPRPHBState {
AddressSpace iommu_as;
MemoryRegion iommu_root;
- struct spapr_pci_lsi {
- uint32_t irq;
- } lsi_table[PCI_NUM_PINS];
+ SpaprPciLsi lsi_table[PCI_NUM_PINS];
GHashTable *msi;
/* Temporary cache for migration purposes */
int32_t msi_devs_num;
- spapr_pci_msi_mig *msi_devs;
+ SpaprPciMsiMig *msi_devs;
- QLIST_ENTRY(sPAPRPHBState) list;
+ QLIST_ENTRY(SpaprPhbState) list;
bool ddw_enabled;
uint64_t page_size_mask;
@@ -87,6 +88,7 @@ struct sPAPRPHBState {
uint32_t mig_liobn;
hwaddr mig_mem_win_addr, mig_mem_win_size;
hwaddr mig_io_win_addr, mig_io_win_size;
+ bool pre_5_1_assoc;
};
#define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL
@@ -105,54 +107,49 @@ struct sPAPRPHBState {
#define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL
-static inline qemu_irq spapr_phb_lsi_qirq(struct sPAPRPHBState *phb, int pin)
-{
- sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
-
- return spapr_qirq(spapr, phb->lsi_table[pin].irq);
-}
-
-int spapr_populate_pci_dt(sPAPRPHBState *phb, uint32_t xics_phandle, void *fdt,
- uint32_t nr_msis);
+int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb,
+ uint32_t intc_phandle, void *fdt, int *node_offset);
void spapr_pci_rtas_init(void);
-sPAPRPHBState *spapr_pci_find_phb(sPAPRMachineState *spapr, uint64_t buid);
-PCIDevice *spapr_pci_find_dev(sPAPRMachineState *spapr, uint64_t buid,
+SpaprPhbState *spapr_pci_find_phb(SpaprMachineState *spapr, uint64_t buid);
+PCIDevice *spapr_pci_find_dev(SpaprMachineState *spapr, uint64_t buid,
uint32_t config_addr);
-/* PCI release callback. */
+/* DRC callbacks */
void spapr_phb_remove_pci_device_cb(DeviceState *dev);
+int spapr_pci_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
+ void *fdt, int *fdt_start_offset, Error **errp);
/* VFIO EEH hooks */
#ifdef CONFIG_LINUX
-bool spapr_phb_eeh_available(sPAPRPHBState *sphb);
-int spapr_phb_vfio_eeh_set_option(sPAPRPHBState *sphb,
+bool spapr_phb_eeh_available(SpaprPhbState *sphb);
+int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb,
unsigned int addr, int option);
-int spapr_phb_vfio_eeh_get_state(sPAPRPHBState *sphb, int *state);
-int spapr_phb_vfio_eeh_reset(sPAPRPHBState *sphb, int option);
-int spapr_phb_vfio_eeh_configure(sPAPRPHBState *sphb);
+int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb, int *state);
+int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option);
+int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb);
void spapr_phb_vfio_reset(DeviceState *qdev);
#else
-static inline bool spapr_phb_eeh_available(sPAPRPHBState *sphb)
+static inline bool spapr_phb_eeh_available(SpaprPhbState *sphb)
{
return false;
}
-static inline int spapr_phb_vfio_eeh_set_option(sPAPRPHBState *sphb,
+static inline int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb,
unsigned int addr, int option)
{
return RTAS_OUT_HW_ERROR;
}
-static inline int spapr_phb_vfio_eeh_get_state(sPAPRPHBState *sphb,
+static inline int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb,
int *state)
{
return RTAS_OUT_HW_ERROR;
}
-static inline int spapr_phb_vfio_eeh_reset(sPAPRPHBState *sphb, int option)
+static inline int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option)
{
return RTAS_OUT_HW_ERROR;
}
-static inline int spapr_phb_vfio_eeh_configure(sPAPRPHBState *sphb)
+static inline int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb)
{
return RTAS_OUT_HW_ERROR;
}
@@ -161,6 +158,13 @@ static inline void spapr_phb_vfio_reset(DeviceState *qdev)
}
#endif
-void spapr_phb_dma_reset(sPAPRPHBState *sphb);
+void spapr_phb_dma_reset(SpaprPhbState *sphb);
+
+static inline unsigned spapr_phb_windows_supported(SpaprPhbState *sphb)
+{
+ return sphb->ddw_enabled ? SPAPR_PCI_DMA_MAX_WINDOWS : 1;
+}
+
+char *spapr_pci_fw_dev_name(PCIDevice *dev);
#endif /* PCI_HOST_SPAPR_H */
diff --git a/include/hw/pci-host/uninorth.h b/include/hw/pci-host/uninorth.h
index 060324536a..62bd81e721 100644
--- a/include/hw/pci-host/uninorth.h
+++ b/include/hw/pci-host/uninorth.h
@@ -25,9 +25,8 @@
#ifndef UNINORTH_H
#define UNINORTH_H
-#include "hw/hw.h"
-
-#include "hw/ppc/openpic.h"
+#include "hw/pci/pci_host.h"
+#include "qom/object.h"
/* UniNorth version */
#define UNINORTH_VERSION_10A 0x7
@@ -37,34 +36,33 @@
#define TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE "uni-north-internal-pci-pcihost"
#define TYPE_U3_AGP_HOST_BRIDGE "u3-agp-pcihost"
-#define UNI_NORTH_PCI_HOST_BRIDGE(obj) \
- OBJECT_CHECK(UNINHostState, (obj), TYPE_UNI_NORTH_PCI_HOST_BRIDGE)
-#define UNI_NORTH_AGP_HOST_BRIDGE(obj) \
- OBJECT_CHECK(UNINHostState, (obj), TYPE_UNI_NORTH_AGP_HOST_BRIDGE)
-#define UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE(obj) \
- OBJECT_CHECK(UNINHostState, (obj), TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE)
-#define U3_AGP_HOST_BRIDGE(obj) \
- OBJECT_CHECK(UNINHostState, (obj), TYPE_U3_AGP_HOST_BRIDGE)
+typedef struct UNINHostState UNINHostState;
+DECLARE_INSTANCE_CHECKER(UNINHostState, UNI_NORTH_PCI_HOST_BRIDGE,
+ TYPE_UNI_NORTH_PCI_HOST_BRIDGE)
+DECLARE_INSTANCE_CHECKER(UNINHostState, UNI_NORTH_AGP_HOST_BRIDGE,
+ TYPE_UNI_NORTH_AGP_HOST_BRIDGE)
+DECLARE_INSTANCE_CHECKER(UNINHostState, UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE,
+ TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE)
+DECLARE_INSTANCE_CHECKER(UNINHostState, U3_AGP_HOST_BRIDGE,
+ TYPE_U3_AGP_HOST_BRIDGE)
-typedef struct UNINHostState {
+struct UNINHostState {
PCIHostState parent_obj;
uint32_t ofw_addr;
- OpenPICState *pic;
qemu_irq irqs[4];
MemoryRegion pci_mmio;
MemoryRegion pci_hole;
MemoryRegion pci_io;
-} UNINHostState;
+};
-typedef struct UNINState {
+struct UNINState {
SysBusDevice parent_obj;
MemoryRegion mem;
-} UNINState;
+};
#define TYPE_UNI_NORTH "uni-north"
-#define UNI_NORTH(obj) \
- OBJECT_CHECK(UNINState, (obj), TYPE_UNI_NORTH)
+OBJECT_DECLARE_SIMPLE_TYPE(UNINState, UNI_NORTH)
#endif /* UNINORTH_H */
diff --git a/include/hw/pci-host/xilinx-pcie.h b/include/hw/pci-host/xilinx-pcie.h
index 74c04dc9bb..e1b3c1c280 100644
--- a/include/hw/pci-host/xilinx-pcie.h
+++ b/include/hw/pci-host/xilinx-pcie.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,30 +20,27 @@
#ifndef HW_XILINX_PCIE_H
#define HW_XILINX_PCIE_H
-#include "hw/hw.h"
#include "hw/sysbus.h"
-#include "hw/pci/pci.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pcie_host.h"
+#include "qom/object.h"
#define TYPE_XILINX_PCIE_HOST "xilinx-pcie-host"
-#define XILINX_PCIE_HOST(obj) \
- OBJECT_CHECK(XilinxPCIEHost, (obj), TYPE_XILINX_PCIE_HOST)
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxPCIEHost, XILINX_PCIE_HOST)
#define TYPE_XILINX_PCIE_ROOT "xilinx-pcie-root"
-#define XILINX_PCIE_ROOT(obj) \
- OBJECT_CHECK(XilinxPCIERoot, (obj), TYPE_XILINX_PCIE_ROOT)
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxPCIERoot, XILINX_PCIE_ROOT)
-typedef struct XilinxPCIERoot {
+struct XilinxPCIERoot {
PCIBridge parent_obj;
-} XilinxPCIERoot;
+};
typedef struct XilinxPCIEInt {
uint32_t fifo_reg1;
uint32_t fifo_reg2;
} XilinxPCIEInt;
-typedef struct XilinxPCIEHost {
+struct XilinxPCIEHost {
PCIExpressHost parent_obj;
char name[16];
@@ -63,6 +60,6 @@ typedef struct XilinxPCIEHost {
XilinxPCIEInt intr_fifo[16];
unsigned int intr_fifo_r, intr_fifo_w;
uint32_t rpscr;
-} XilinxPCIEHost;
+};
#endif /* HW_XILINX_PCIE_H */
diff --git a/include/hw/pci/msi.h b/include/hw/pci/msi.h
index 4837bcf490..abcfd13925 100644
--- a/include/hw/pci/msi.h
+++ b/include/hw/pci/msi.h
@@ -21,8 +21,7 @@
#ifndef QEMU_MSI_H
#define QEMU_MSI_H
-#include "qemu-common.h"
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
struct MSIMessage {
uint64_t address;
@@ -34,15 +33,18 @@ extern bool msi_nonbroken;
void msi_set_message(PCIDevice *dev, MSIMessage msg);
MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector);
bool msi_enabled(const PCIDevice *dev);
+void msi_set_enabled(PCIDevice *dev);
int msi_init(struct PCIDevice *dev, uint8_t offset,
unsigned int nr_vectors, bool msi64bit,
bool msi_per_vector_mask, Error **errp);
void msi_uninit(struct PCIDevice *dev);
void msi_reset(PCIDevice *dev);
+bool msi_is_masked(const PCIDevice *dev, unsigned int vector);
void msi_notify(PCIDevice *dev, unsigned int vector);
void msi_send_message(PCIDevice *dev, MSIMessage msg);
void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len);
unsigned int msi_nr_vectors_allocated(const PCIDevice *dev);
+void msi_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp);
static inline bool msi_present(const PCIDevice *dev)
{
diff --git a/include/hw/pci/msix.h b/include/hw/pci/msix.h
index 1f27658d35..0e6f257e45 100644
--- a/include/hw/pci/msix.h
+++ b/include/hw/pci/msix.h
@@ -1,9 +1,10 @@
#ifndef QEMU_MSIX_H
#define QEMU_MSIX_H
-#include "qemu-common.h"
#include "hw/pci/pci.h"
+#define MSIX_CAP_LENGTH 12
+
void msix_set_message(PCIDevice *dev, int vector, MSIMessage msg);
MSIMessage msix_get_message(PCIDevice *dev, unsigned int vector);
int msix_init(PCIDevice *dev, unsigned short nentries,
@@ -32,9 +33,10 @@ bool msix_is_masked(PCIDevice *dev, unsigned vector);
void msix_set_pending(PCIDevice *dev, unsigned vector);
void msix_clr_pending(PCIDevice *dev, int vector);
-int msix_vector_use(PCIDevice *dev, unsigned vector);
+void msix_vector_use(PCIDevice *dev, unsigned vector);
void msix_vector_unuse(PCIDevice *dev, unsigned vector);
void msix_unuse_all_vectors(PCIDevice *dev);
+void msix_set_mask(PCIDevice *dev, int vector, bool mask);
void msix_notify(PCIDevice *dev, unsigned vector);
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index d87f5f93e9..eaa3fc99d8 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -1,15 +1,12 @@
#ifndef QEMU_PCI_H
#define QEMU_PCI_H
-#include "hw/qdev.h"
#include "exec/memory.h"
#include "sysemu/dma.h"
/* PCI includes legacy ISA access. */
#include "hw/isa/isa.h"
-#include "hw/pci/pcie.h"
-
extern bool pci_available;
/* PCI bus */
@@ -19,6 +16,7 @@ extern bool pci_available;
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
#define PCI_FUNC(devfn) ((devfn) & 0x07)
#define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn))
+#define PCI_BDF_TO_DEVFN(x) ((x) & 0xff)
#define PCI_BUS_MAX 256
#define PCI_DEVFN_MAX 256
#define PCI_SLOT_MAX 32
@@ -56,6 +54,7 @@ extern bool pci_available;
/* QEMU/Bochs VGA (0x1234) */
#define PCI_VENDOR_ID_QEMU 0x1234
#define PCI_DEVICE_ID_QEMU_VGA 0x1111
+#define PCI_DEVICE_ID_QEMU_IPMI 0x1112
/* VMWare (0x15ad) */
#define PCI_VENDOR_ID_VMWARE 0x15ad
@@ -77,6 +76,7 @@ extern bool pci_available;
#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBDEVICE_ID_QEMU 0x1100
+/* legacy virtio-pci devices */
#define PCI_DEVICE_ID_VIRTIO_NET 0x1000
#define PCI_DEVICE_ID_VIRTIO_BLOCK 0x1001
#define PCI_DEVICE_ID_VIRTIO_BALLOON 0x1002
@@ -86,6 +86,15 @@ extern bool pci_available;
#define PCI_DEVICE_ID_VIRTIO_9P 0x1009
#define PCI_DEVICE_ID_VIRTIO_VSOCK 0x1012
+/*
+ * modern virtio-pci devices get their id assigned automatically,
+ * there is no need to add #defines here. It gets calculated as
+ *
+ * PCI_DEVICE_ID = PCI_DEVICE_ID_VIRTIO_10_BASE +
+ * virtio_bus_get_vdev_id(bus)
+ */
+#define PCI_DEVICE_ID_VIRTIO_10_BASE 0x1040
+
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_DEVICE_ID_REDHAT_BRIDGE 0x0001
#define PCI_DEVICE_ID_REDHAT_SERIAL 0x0002
@@ -102,6 +111,10 @@ extern bool pci_available;
#define PCI_DEVICE_ID_REDHAT_XHCI 0x000d
#define PCI_DEVICE_ID_REDHAT_PCIE_BRIDGE 0x000e
#define PCI_DEVICE_ID_REDHAT_MDPY 0x000f
+#define PCI_DEVICE_ID_REDHAT_NVME 0x0010
+#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
+#define PCI_DEVICE_ID_REDHAT_ACPI_ERST 0x0012
+#define PCI_DEVICE_ID_REDHAT_UFS 0x0013
#define PCI_DEVICE_ID_REDHAT_QXL 0x0100
#define FMT_PCIBUS PRIx64
@@ -123,6 +136,10 @@ typedef void PCIMapIORegionFunc(PCIDevice *pci_dev, int region_num,
pcibus_t addr, pcibus_t size, int type);
typedef void PCIUnregisterFunc(PCIDevice *pci_dev);
+typedef void MSITriggerFunc(PCIDevice *dev, MSIMessage msg);
+typedef MSIMessage MSIPrepareMessageFunc(PCIDevice *dev, unsigned vector);
+typedef MSIMessage MSIxPrepareMessageFunc(PCIDevice *dev, unsigned vector);
+
typedef struct PCIIORegion {
pcibus_t addr; /* current PCI mapping address. -1 means not mapped */
#define PCI_BAR_UNMAPPED (~(pcibus_t)0)
@@ -173,7 +190,7 @@ enum {
#define QEMU_PCI_CAP_MULTIFUNCTION_BITNR 3
QEMU_PCI_CAP_MULTIFUNCTION = (1 << QEMU_PCI_CAP_MULTIFUNCTION_BITNR),
- /* command register SERR bit enabled */
+ /* command register SERR bit enabled - unused since QEMU v5.0 */
#define QEMU_PCI_CAP_SERR_BITNR 4
QEMU_PCI_CAP_SERR = (1 << QEMU_PCI_CAP_SERR_BITNR),
/* Standard hot plug controller. */
@@ -189,22 +206,14 @@ enum {
QEMU_PCIE_LNKSTA_DLLLA = (1 << QEMU_PCIE_LNKSTA_DLLLA_BITNR),
#define QEMU_PCIE_EXTCAP_INIT_BITNR 9
QEMU_PCIE_EXTCAP_INIT = (1 << QEMU_PCIE_EXTCAP_INIT_BITNR),
+#define QEMU_PCIE_CXL_BITNR 10
+ QEMU_PCIE_CAP_CXL = (1 << QEMU_PCIE_CXL_BITNR),
+#define QEMU_PCIE_ERR_UNC_MASK_BITNR 11
+ QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR),
+#define QEMU_PCIE_ARI_NEXTFN_1_BITNR 12
+ QEMU_PCIE_ARI_NEXTFN_1 = (1 << QEMU_PCIE_ARI_NEXTFN_1_BITNR),
};
-#define TYPE_PCI_DEVICE "pci-device"
-#define PCI_DEVICE(obj) \
- OBJECT_CHECK(PCIDevice, (obj), TYPE_PCI_DEVICE)
-#define PCI_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PCIDeviceClass, (klass), TYPE_PCI_DEVICE)
-#define PCI_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCIDeviceClass, (obj), TYPE_PCI_DEVICE)
-
-/* Implemented by devices that can be plugged on PCI Express buses */
-#define INTERFACE_PCIE_DEVICE "pci-express-device"
-
-/* Implemented by devices that can be plugged on Conventional PCI buses */
-#define INTERFACE_CONVENTIONAL_PCI_DEVICE "conventional-pci-device"
-
typedef struct PCIINTxRoute {
enum {
PCI_INTX_ENABLED,
@@ -214,32 +223,6 @@ typedef struct PCIINTxRoute {
int irq;
} PCIINTxRoute;
-typedef struct PCIDeviceClass {
- DeviceClass parent_class;
-
- void (*realize)(PCIDevice *dev, Error **errp);
- PCIUnregisterFunc *exit;
- PCIConfigReadFunc *config_read;
- PCIConfigWriteFunc *config_write;
-
- uint16_t vendor_id;
- uint16_t device_id;
- uint8_t revision;
- uint16_t class_id;
- uint16_t subsystem_vendor_id; /* only for header type = 0 */
- uint16_t subsystem_id; /* only for header type = 0 */
-
- /*
- * pci-to-pci bridge or normal device.
- * This doesn't mean pci host switch.
- * When card bus bridge is supported, this would be enhanced.
- */
- int is_bridge;
-
- /* rom bar */
- const char *romfile;
-} PCIDeviceClass;
-
typedef void (*PCIINTxRoutingNotifier)(PCIDevice *dev);
typedef int (*MSIVectorUseNotifier)(PCIDevice *dev, unsigned int vector,
MSIMessage msg);
@@ -248,111 +231,6 @@ typedef void (*MSIVectorPollNotifier)(PCIDevice *dev,
unsigned int vector_start,
unsigned int vector_end);
-enum PCIReqIDType {
- PCI_REQ_ID_INVALID = 0,
- PCI_REQ_ID_BDF,
- PCI_REQ_ID_SECONDARY_BUS,
- PCI_REQ_ID_MAX,
-};
-typedef enum PCIReqIDType PCIReqIDType;
-
-struct PCIReqIDCache {
- PCIDevice *dev;
- PCIReqIDType type;
-};
-typedef struct PCIReqIDCache PCIReqIDCache;
-
-struct PCIDevice {
- DeviceState qdev;
-
- /* PCI config space */
- uint8_t *config;
-
- /* Used to enable config checks on load. Note that writable bits are
- * never checked even if set in cmask. */
- uint8_t *cmask;
-
- /* Used to implement R/W bytes */
- uint8_t *wmask;
-
- /* Used to implement RW1C(Write 1 to Clear) bytes */
- uint8_t *w1cmask;
-
- /* Used to allocate config space for capabilities. */
- uint8_t *used;
-
- /* the following fields are read only */
- int32_t devfn;
- /* Cached device to fetch requester ID from, to avoid the PCI
- * tree walking every time we invoke PCI request (e.g.,
- * MSI). For conventional PCI root complex, this field is
- * meaningless. */
- PCIReqIDCache requester_id_cache;
- char name[64];
- PCIIORegion io_regions[PCI_NUM_REGIONS];
- AddressSpace bus_master_as;
- MemoryRegion bus_master_container_region;
- MemoryRegion bus_master_enable_region;
-
- /* do not access the following fields */
- PCIConfigReadFunc *config_read;
- PCIConfigWriteFunc *config_write;
-
- /* Legacy PCI VGA regions */
- MemoryRegion *vga_regions[QEMU_PCI_VGA_NUM_REGIONS];
- bool has_vga;
-
- /* Current IRQ levels. Used internally by the generic PCI code. */
- uint8_t irq_state;
-
- /* Capability bits */
- uint32_t cap_present;
-
- /* Offset of MSI-X capability in config space */
- uint8_t msix_cap;
-
- /* MSI-X entries */
- int msix_entries_nr;
-
- /* Space to store MSIX table & pending bit array */
- uint8_t *msix_table;
- uint8_t *msix_pba;
- /* MemoryRegion container for msix exclusive BAR setup */
- MemoryRegion msix_exclusive_bar;
- /* Memory Regions for MSIX table and pending bit entries. */
- MemoryRegion msix_table_mmio;
- MemoryRegion msix_pba_mmio;
- /* Reference-count for entries actually in use by driver. */
- unsigned *msix_entry_used;
- /* MSIX function mask set or MSIX disabled */
- bool msix_function_masked;
- /* Version id needed for VMState */
- int32_t version_id;
-
- /* Offset of MSI capability in config space */
- uint8_t msi_cap;
-
- /* PCI Express */
- PCIExpressDevice exp;
-
- /* SHPC */
- SHPCDevice *shpc;
-
- /* Location of option rom */
- char *romfile;
- bool has_rom;
- MemoryRegion rom;
- uint32_t rom_bar;
-
- /* INTx routing notifier */
- PCIINTxRoutingNotifier intx_routing_notifier;
-
- /* MSI-X notifiers */
- MSIVectorUseNotifier msix_vector_use_notifier;
- MSIVectorReleaseNotifier msix_vector_release_notifier;
- MSIVectorPollNotifier msix_vector_poll_notifier;
-};
-
void pci_register_bar(PCIDevice *pci_dev, int region_num,
uint8_t attr, MemoryRegion *memory);
void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
@@ -389,34 +267,42 @@ typedef int (*pci_map_irq_fn)(PCIDevice *pci_dev, int irq_num);
typedef PCIINTxRoute (*pci_route_irq_fn)(void *opaque, int pin);
#define TYPE_PCI_BUS "PCI"
-#define PCI_BUS(obj) OBJECT_CHECK(PCIBus, (obj), TYPE_PCI_BUS)
-#define PCI_BUS_CLASS(klass) OBJECT_CLASS_CHECK(PCIBusClass, (klass), TYPE_PCI_BUS)
-#define PCI_BUS_GET_CLASS(obj) OBJECT_GET_CLASS(PCIBusClass, (obj), TYPE_PCI_BUS)
+OBJECT_DECLARE_TYPE(PCIBus, PCIBusClass, PCI_BUS)
#define TYPE_PCIE_BUS "PCIE"
+#define TYPE_CXL_BUS "CXL"
-bool pci_bus_is_express(PCIBus *bus);
-bool pci_bus_is_root(PCIBus *bus);
-void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent,
- const char *name,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
- uint8_t devfn_min, const char *typename);
+typedef void (*pci_bus_dev_fn)(PCIBus *b, PCIDevice *d, void *opaque);
+typedef void (*pci_bus_fn)(PCIBus *b, void *opaque);
+typedef void *(*pci_bus_ret_fn)(PCIBus *b, void *opaque);
+
+bool pci_bus_is_express(const PCIBus *bus);
+
+void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
+ const char *name,
+ MemoryRegion *mem, MemoryRegion *io,
+ uint8_t devfn_min, const char *typename);
PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, const char *typename);
void pci_root_bus_cleanup(PCIBus *bus);
-void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
+void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
void *irq_opaque, int nirq);
+void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq);
void pci_bus_irqs_cleanup(PCIBus *bus);
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
+uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus);
+void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask);
+void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask);
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
+static inline int pci_swizzle(int slot, int pin)
+{
+ return (slot + pin) % PCI_NUM_PINS;
+}
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
void *irq_opaque,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, int nirq,
const char *typename);
void pci_unregister_root_bus(PCIBus *bus);
@@ -428,10 +314,9 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev,
PCIINTxRoutingNotifier notifier);
void pci_device_reset(PCIDevice *dev);
-PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
- const char *default_model,
- const char *default_devaddr);
-
+void pci_init_nic_devices(PCIBus *bus, const char *default_model);
+bool pci_init_nic_in_slot(PCIBus *rootbus, const char *default_model,
+ const char *alias, const char *devaddr);
PCIDevice *pci_vga_init(PCIBus *bus);
static inline PCIBus *pci_get_bus(const PCIDevice *dev)
@@ -439,6 +324,7 @@ static inline PCIBus *pci_get_bus(const PCIDevice *dev)
return PCI_BUS(qdev_get_parent_bus(DEVICE(dev)));
}
int pci_bus_num(PCIBus *s);
+void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus);
static inline int pci_dev_bus_num(const PCIDevice *dev)
{
return pci_bus_num(pci_get_bus(dev));
@@ -446,39 +332,75 @@ static inline int pci_dev_bus_num(const PCIDevice *dev)
int pci_bus_numa_node(PCIBus *bus);
void pci_for_each_device(PCIBus *bus, int bus_num,
- void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque),
+ pci_bus_dev_fn fn,
void *opaque);
void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
- void (*fn)(PCIBus *bus, PCIDevice *d,
- void *opaque),
+ pci_bus_dev_fn fn,
void *opaque);
-void pci_for_each_bus_depth_first(PCIBus *bus,
- void *(*begin)(PCIBus *bus, void *parent_state),
- void (*end)(PCIBus *bus, void *state),
- void *parent_state);
+void pci_for_each_device_under_bus(PCIBus *bus,
+ pci_bus_dev_fn fn, void *opaque);
+void pci_for_each_device_under_bus_reverse(PCIBus *bus,
+ pci_bus_dev_fn fn,
+ void *opaque);
+void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
+ pci_bus_fn end, void *parent_state);
PCIDevice *pci_get_function_0(PCIDevice *pci_dev);
/* Use this wrapper when specific scan order is not required. */
static inline
-void pci_for_each_bus(PCIBus *bus,
- void (*fn)(PCIBus *bus, void *opaque),
- void *opaque)
+void pci_for_each_bus(PCIBus *bus, pci_bus_fn fn, void *opaque)
{
pci_for_each_bus_depth_first(bus, NULL, fn, opaque);
}
PCIBus *pci_device_root_bus(const PCIDevice *d);
const char *pci_root_bus_path(PCIDevice *dev);
+bool pci_bus_bypass_iommu(PCIBus *bus);
PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn);
int pci_qdev_find_device(const char *id, PCIDevice **pdev);
void pci_bus_get_w64_range(PCIBus *bus, Range *range);
void pci_device_deassert_intx(PCIDevice *dev);
-typedef AddressSpace *(*PCIIOMMUFunc)(PCIBus *, void *, int);
+
+/**
+ * struct PCIIOMMUOps: callbacks structure for specific IOMMU handlers
+ * of a PCIBus
+ *
+ * Allows to modify the behavior of some IOMMU operations of the PCI
+ * framework for a set of devices on a PCI bus.
+ */
+typedef struct PCIIOMMUOps {
+ /**
+ * @get_address_space: get the address space for a set of devices
+ * on a PCI bus.
+ *
+ * Mandatory callback which returns a pointer to an #AddressSpace
+ *
+ * @bus: the #PCIBus being accessed.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number
+ */
+ AddressSpace * (*get_address_space)(PCIBus *bus, void *opaque, int devfn);
+} PCIIOMMUOps;
AddressSpace *pci_device_iommu_address_space(PCIDevice *dev);
-void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque);
+
+/**
+ * pci_setup_iommu: Initialize specific IOMMU handlers for a PCIBus
+ *
+ * Let PCI host bridges define specific operations.
+ *
+ * @bus: the #PCIBus being updated.
+ * @ops: the #PCIIOMMUOps
+ * @opaque: passed to callbacks of the @ops structure.
+ */
+void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque);
+
+pcibus_t pci_bar_address(PCIDevice *d,
+ int reg, uint8_t type, pcibus_t size);
static inline void
pci_set_byte(uint8_t *config, uint8_t val)
@@ -646,68 +568,52 @@ static inline void
pci_set_byte_by_mask(uint8_t *config, uint8_t mask, uint8_t reg)
{
uint8_t val = pci_get_byte(config);
- uint8_t rval = reg << ctz32(mask);
- pci_set_byte(config, (~mask & val) | (mask & rval));
-}
+ uint8_t rval;
-static inline uint8_t
-pci_get_byte_by_mask(uint8_t *config, uint8_t mask)
-{
- uint8_t val = pci_get_byte(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_byte(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_word_by_mask(uint8_t *config, uint16_t mask, uint16_t reg)
{
uint16_t val = pci_get_word(config);
- uint16_t rval = reg << ctz32(mask);
- pci_set_word(config, (~mask & val) | (mask & rval));
-}
+ uint16_t rval;
-static inline uint16_t
-pci_get_word_by_mask(uint8_t *config, uint16_t mask)
-{
- uint16_t val = pci_get_word(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_word(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_long_by_mask(uint8_t *config, uint32_t mask, uint32_t reg)
{
uint32_t val = pci_get_long(config);
- uint32_t rval = reg << ctz32(mask);
- pci_set_long(config, (~mask & val) | (mask & rval));
-}
+ uint32_t rval;
-static inline uint32_t
-pci_get_long_by_mask(uint8_t *config, uint32_t mask)
-{
- uint32_t val = pci_get_long(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_long(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_quad_by_mask(uint8_t *config, uint64_t mask, uint64_t reg)
{
uint64_t val = pci_get_quad(config);
- uint64_t rval = reg << ctz32(mask);
+ uint64_t rval;
+
+ assert(mask);
+ rval = reg << ctz32(mask);
pci_set_quad(config, (~mask & val) | (mask & rval));
}
-static inline uint64_t
-pci_get_quad_by_mask(uint8_t *config, uint64_t mask)
-{
- uint64_t val = pci_get_quad(config);
- return (val & mask) >> ctz32(mask);
-}
+PCIDevice *pci_new_multifunction(int devfn, const char *name);
+PCIDevice *pci_new(int devfn, const char *name);
+bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp);
-PCIDevice *pci_create_multifunction(PCIBus *bus, int devfn, bool multifunction,
- const char *name);
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
- bool multifunction,
const char *name);
-PCIDevice *pci_create(PCIBus *bus, int devfn, const char *name);
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name);
void lsi53c8xx_handle_legacy_cmdline(DeviceState *lsi_dev);
@@ -735,122 +641,7 @@ static inline void pci_irq_pulse(PCIDevice *pci_dev)
pci_irq_deassert(pci_dev);
}
-static inline int pci_is_express(const PCIDevice *d)
-{
- return d->cap_present & QEMU_PCI_CAP_EXPRESS;
-}
-
-static inline int pci_is_express_downstream_port(const PCIDevice *d)
-{
- uint8_t type;
-
- if (!pci_is_express(d) || !d->exp.exp_cap) {
- return 0;
- }
-
- type = pcie_cap_get_type(d);
-
- return type == PCI_EXP_TYPE_DOWNSTREAM || type == PCI_EXP_TYPE_ROOT_PORT;
-}
-
-static inline uint32_t pci_config_size(const PCIDevice *d)
-{
- return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE;
-}
-
-static inline uint16_t pci_get_bdf(PCIDevice *dev)
-{
- return PCI_BUILD_BDF(pci_bus_num(pci_get_bus(dev)), dev->devfn);
-}
-
-uint16_t pci_requester_id(PCIDevice *dev);
-
-/* DMA access functions */
-static inline AddressSpace *pci_get_address_space(PCIDevice *dev)
-{
- return &dev->bus_master_as;
-}
-
-static inline int pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
- void *buf, dma_addr_t len, DMADirection dir)
-{
- dma_memory_rw(pci_get_address_space(dev), addr, buf, len, dir);
- return 0;
-}
-
-static inline int pci_dma_read(PCIDevice *dev, dma_addr_t addr,
- void *buf, dma_addr_t len)
-{
- return pci_dma_rw(dev, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
-}
-
-static inline int pci_dma_write(PCIDevice *dev, dma_addr_t addr,
- const void *buf, dma_addr_t len)
-{
- return pci_dma_rw(dev, addr, (void *) buf, len, DMA_DIRECTION_FROM_DEVICE);
-}
-
-#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
- static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
- dma_addr_t addr) \
- { \
- return ld##_l##_dma(pci_get_address_space(dev), addr); \
- } \
- static inline void st##_s##_pci_dma(PCIDevice *dev, \
- dma_addr_t addr, uint##_bits##_t val) \
- { \
- st##_s##_dma(pci_get_address_space(dev), addr, val); \
- }
-
-PCI_DMA_DEFINE_LDST(ub, b, 8);
-PCI_DMA_DEFINE_LDST(uw_le, w_le, 16)
-PCI_DMA_DEFINE_LDST(l_le, l_le, 32);
-PCI_DMA_DEFINE_LDST(q_le, q_le, 64);
-PCI_DMA_DEFINE_LDST(uw_be, w_be, 16)
-PCI_DMA_DEFINE_LDST(l_be, l_be, 32);
-PCI_DMA_DEFINE_LDST(q_be, q_be, 64);
-
-#undef PCI_DMA_DEFINE_LDST
-
-static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
- dma_addr_t *plen, DMADirection dir)
-{
- void *buf;
-
- buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir);
- return buf;
-}
-
-static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len,
- DMADirection dir, dma_addr_t access_len)
-{
- dma_memory_unmap(pci_get_address_space(dev), buffer, len, dir, access_len);
-}
-
-static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev,
- int alloc_hint)
-{
- qemu_sglist_init(qsg, DEVICE(dev), alloc_hint, pci_get_address_space(dev));
-}
-
-extern const VMStateDescription vmstate_pci_device;
-
-#define VMSTATE_PCI_DEVICE(_field, _state) { \
- .name = (stringify(_field)), \
- .size = sizeof(PCIDevice), \
- .vmsd = &vmstate_pci_device, \
- .flags = VMS_STRUCT, \
- .offset = vmstate_offset_value(_state, _field, PCIDevice), \
-}
-
-#define VMSTATE_PCI_DEVICE_POINTER(_field, _state) { \
- .name = (stringify(_field)), \
- .size = sizeof(PCIDevice), \
- .vmsd = &vmstate_pci_device, \
- .flags = VMS_STRUCT|VMS_POINTER, \
- .offset = vmstate_offset_pointer(_state, _field, PCIDevice), \
-}
-
MSIMessage pci_get_msi_message(PCIDevice *dev, int vector);
+void pci_set_power(PCIDevice *pci_dev, bool state);
#endif
diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h
index ba488818d2..5cd452115a 100644
--- a/include/hw/pci/pci_bridge.h
+++ b/include/hw/pci/pci_bridge.h
@@ -26,8 +26,10 @@
#ifndef QEMU_PCI_BRIDGE_H
#define QEMU_PCI_BRIDGE_H
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci/pci_bus.h"
+#include "hw/cxl/cxl.h"
+#include "qom/object.h"
typedef struct PCIBridgeWindows PCIBridgeWindows;
@@ -50,7 +52,8 @@ struct PCIBridgeWindows {
};
#define TYPE_PCI_BRIDGE "base-pci-bridge"
-#define PCI_BRIDGE(obj) OBJECT_CHECK(PCIBridge, (obj), TYPE_PCI_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIBridge, PCI_BRIDGE)
+#define IS_PCI_BRIDGE(dev) object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)
struct PCIBridge {
/*< private >*/
@@ -70,15 +73,49 @@ struct PCIBridge {
MemoryRegion address_space_mem;
MemoryRegion address_space_io;
- PCIBridgeWindows *windows;
+ PCIBridgeWindows windows;
pci_map_irq_fn map_irq;
const char *bus_name;
+
+ /* SLT is RO for PCIE to PCIE bridges, but old QEMU versions had it RW */
+ bool pcie_writeable_slt_bug;
};
#define PCI_BRIDGE_DEV_PROP_CHASSIS_NR "chassis_nr"
#define PCI_BRIDGE_DEV_PROP_MSI "msi"
#define PCI_BRIDGE_DEV_PROP_SHPC "shpc"
+typedef struct CXLHost CXLHost;
+
+typedef struct PXBDev {
+ /*< private >*/
+ PCIDevice parent_obj;
+ /*< public >*/
+
+ uint8_t bus_nr;
+ uint16_t numa_node;
+ bool bypass_iommu;
+} PXBDev;
+
+typedef struct PXBPCIEDev {
+ /*< private >*/
+ PXBDev parent_obj;
+} PXBPCIEDev;
+
+#define TYPE_PXB_DEV "pxb"
+OBJECT_DECLARE_SIMPLE_TYPE(PXBDev, PXB_DEV)
+
+typedef struct PXBCXLDev {
+ /*< private >*/
+ PXBPCIEDev parent_obj;
+ /*< public >*/
+
+ bool hdm_for_passthrough;
+ CXLHost *cxl_host_bridge; /* Pointer to a CXLHost */
+} PXBCXLDev;
+
+#define TYPE_PXB_CXL_DEV "pxb-cxl"
+OBJECT_DECLARE_SIMPLE_TYPE(PXBCXLDev, PXB_CXL_DEV)
int pci_bridge_ssvid_init(PCIDevice *dev, uint8_t offset,
uint16_t svid, uint16_t ssid,
@@ -108,18 +145,18 @@ void pci_bridge_dev_unplug_request_cb(HotplugHandler *hotplug_dev,
/*
* before qdev initialization(qdev_init()), this function sets bus_name and
- * map_irq callback which are necessry for pci_bridge_initfn() to
+ * map_irq callback which are necessary for pci_bridge_initfn() to
* initialize bus.
*/
void pci_bridge_map_irq(PCIBridge *br, const char* bus_name,
pci_map_irq_fn map_irq);
/* TODO: add this define to pci_regs.h in linux and then in qemu. */
-#define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */
-#define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */
-#define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */
-#define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */
-#define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */
+#define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */
+#define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */
+#define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */
+#define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */
+#define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */
typedef struct PCIBridgeQemuCap {
uint8_t id; /* Standard PCI capability header field */
@@ -137,6 +174,7 @@ typedef struct PCIBridgeQemuCap {
uint64_t mem_pref_64; /* Prefetchable memory to reserve (64-bit MMIO) */
} PCIBridgeQemuCap;
+#define REDHAT_PCI_CAP_TYPE_OFFSET 3
#define REDHAT_PCI_CAP_RESOURCE_RESERVE 1
/*
@@ -151,6 +189,13 @@ typedef struct PCIResReserve {
uint64_t mem_pref_64;
} PCIResReserve;
+#define REDHAT_PCI_CAP_RES_RESERVE_BUS_RES 4
+#define REDHAT_PCI_CAP_RES_RESERVE_IO 8
+#define REDHAT_PCI_CAP_RES_RESERVE_MEM 16
+#define REDHAT_PCI_CAP_RES_RESERVE_PREF_MEM_32 20
+#define REDHAT_PCI_CAP_RES_RESERVE_PREF_MEM_64 24
+#define REDHAT_PCI_CAP_RES_RESERVE_CAP_SIZE 32
+
int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset,
PCIResReserve res_reserve, Error **errp);
diff --git a/include/hw/pci/pci_bus.h b/include/hw/pci/pci_bus.h
index dfb75752cb..2261312546 100644
--- a/include/hw/pci/pci_bus.h
+++ b/include/hw/pci/pci_bus.h
@@ -10,19 +10,30 @@
* use accessor functions in pci.h
*/
-typedef struct PCIBusClass {
+struct PCIBusClass {
/*< private >*/
BusClass parent_class;
/*< public >*/
- bool (*is_root)(PCIBus *bus);
int (*bus_num)(PCIBus *bus);
uint16_t (*numa_node)(PCIBus *bus);
-} PCIBusClass;
+};
+
+enum PCIBusFlags {
+ /* This bus is the root of a PCI domain */
+ PCI_BUS_IS_ROOT = 0x0001,
+ /* PCIe extended configuration space is accessible on this bus */
+ PCI_BUS_EXTENDED_CONFIG_SPACE = 0x0002,
+ /* This is a CXL Type BUS */
+ PCI_BUS_CXL = 0x0004,
+};
+
+#define PCI_NO_PASID UINT32_MAX
struct PCIBus {
BusState qbus;
- PCIIOMMUFunc iommu_fn;
+ enum PCIBusFlags flags;
+ const PCIIOMMUOps *iommu_ops;
void *iommu_opaque;
uint8_t devfn_min;
uint32_t slot_reserved_mask;
@@ -46,4 +57,19 @@ struct PCIBus {
Notifier machine_done;
};
+static inline bool pci_bus_is_cxl(PCIBus *bus)
+{
+ return !!(bus->flags & PCI_BUS_CXL);
+}
+
+static inline bool pci_bus_is_root(PCIBus *bus)
+{
+ return !!(bus->flags & PCI_BUS_IS_ROOT);
+}
+
+static inline bool pci_bus_allows_extended_config_space(PCIBus *bus)
+{
+ return !!(bus->flags & PCI_BUS_EXTENDED_CONFIG_SPACE);
+}
+
#endif /* QEMU_PCI_BUS_H */
diff --git a/include/hw/pci/pci_device.h b/include/hw/pci/pci_device.h
new file mode 100644
index 0000000000..d3dd0f64b2
--- /dev/null
+++ b/include/hw/pci/pci_device.h
@@ -0,0 +1,350 @@
+#ifndef QEMU_PCI_DEVICE_H
+#define QEMU_PCI_DEVICE_H
+
+#include "hw/pci/pci.h"
+#include "hw/pci/pcie.h"
+
+#define TYPE_PCI_DEVICE "pci-device"
+typedef struct PCIDeviceClass PCIDeviceClass;
+DECLARE_OBJ_CHECKERS(PCIDevice, PCIDeviceClass,
+ PCI_DEVICE, TYPE_PCI_DEVICE)
+
+/*
+ * Implemented by devices that can be plugged on CXL buses. In the spec, this is
+ * actually a "CXL Component, but we name it device to match the PCI naming.
+ */
+#define INTERFACE_CXL_DEVICE "cxl-device"
+
+/* Implemented by devices that can be plugged on PCI Express buses */
+#define INTERFACE_PCIE_DEVICE "pci-express-device"
+
+/* Implemented by devices that can be plugged on Conventional PCI buses */
+#define INTERFACE_CONVENTIONAL_PCI_DEVICE "conventional-pci-device"
+
+struct PCIDeviceClass {
+ DeviceClass parent_class;
+
+ void (*realize)(PCIDevice *dev, Error **errp);
+ PCIUnregisterFunc *exit;
+ PCIConfigReadFunc *config_read;
+ PCIConfigWriteFunc *config_write;
+
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint8_t revision;
+ uint16_t class_id;
+ uint16_t subsystem_vendor_id; /* only for header type = 0 */
+ uint16_t subsystem_id; /* only for header type = 0 */
+
+ const char *romfile; /* rom bar */
+};
+
+enum PCIReqIDType {
+ PCI_REQ_ID_INVALID = 0,
+ PCI_REQ_ID_BDF,
+ PCI_REQ_ID_SECONDARY_BUS,
+ PCI_REQ_ID_MAX,
+};
+typedef enum PCIReqIDType PCIReqIDType;
+
+struct PCIReqIDCache {
+ PCIDevice *dev;
+ PCIReqIDType type;
+};
+typedef struct PCIReqIDCache PCIReqIDCache;
+
+struct PCIDevice {
+ DeviceState qdev;
+ bool partially_hotplugged;
+ bool has_power;
+
+ /* PCI config space */
+ uint8_t *config;
+
+ /*
+ * Used to enable config checks on load. Note that writable bits are
+ * never checked even if set in cmask.
+ */
+ uint8_t *cmask;
+
+ /* Used to implement R/W bytes */
+ uint8_t *wmask;
+
+ /* Used to implement RW1C(Write 1 to Clear) bytes */
+ uint8_t *w1cmask;
+
+ /* Used to allocate config space for capabilities. */
+ uint8_t *used;
+
+ /* the following fields are read only */
+ int32_t devfn;
+ /*
+ * Cached device to fetch requester ID from, to avoid the PCI tree
+ * walking every time we invoke PCI request (e.g., MSI). For
+ * conventional PCI root complex, this field is meaningless.
+ */
+ PCIReqIDCache requester_id_cache;
+ char name[64];
+ PCIIORegion io_regions[PCI_NUM_REGIONS];
+ AddressSpace bus_master_as;
+ MemoryRegion bus_master_container_region;
+ MemoryRegion bus_master_enable_region;
+
+ /* do not access the following fields */
+ PCIConfigReadFunc *config_read;
+ PCIConfigWriteFunc *config_write;
+
+ /* Legacy PCI VGA regions */
+ MemoryRegion *vga_regions[QEMU_PCI_VGA_NUM_REGIONS];
+ bool has_vga;
+
+ /* Current IRQ levels. Used internally by the generic PCI code. */
+ uint8_t irq_state;
+
+ /* Capability bits */
+ uint32_t cap_present;
+
+ /* Offset of MSI-X capability in config space */
+ uint8_t msix_cap;
+
+ /* MSI-X entries */
+ int msix_entries_nr;
+
+ /* Space to store MSIX table & pending bit array */
+ uint8_t *msix_table;
+ uint8_t *msix_pba;
+
+ /* May be used by INTx or MSI during interrupt notification */
+ void *irq_opaque;
+
+ MSITriggerFunc *msi_trigger;
+ MSIPrepareMessageFunc *msi_prepare_message;
+ MSIxPrepareMessageFunc *msix_prepare_message;
+
+ /* MemoryRegion container for msix exclusive BAR setup */
+ MemoryRegion msix_exclusive_bar;
+ /* Memory Regions for MSIX table and pending bit entries. */
+ MemoryRegion msix_table_mmio;
+ MemoryRegion msix_pba_mmio;
+ /* Reference-count for entries actually in use by driver. */
+ unsigned *msix_entry_used;
+ /* MSIX function mask set or MSIX disabled */
+ bool msix_function_masked;
+ /* Version id needed for VMState */
+ int32_t version_id;
+
+ /* Offset of MSI capability in config space */
+ uint8_t msi_cap;
+
+ /* PCI Express */
+ PCIExpressDevice exp;
+
+ /* SHPC */
+ SHPCDevice *shpc;
+
+ /* Location of option rom */
+ char *romfile;
+ uint32_t romsize;
+ bool has_rom;
+ MemoryRegion rom;
+ uint32_t rom_bar;
+
+ /* INTx routing notifier */
+ PCIINTxRoutingNotifier intx_routing_notifier;
+
+ /* MSI-X notifiers */
+ MSIVectorUseNotifier msix_vector_use_notifier;
+ MSIVectorReleaseNotifier msix_vector_release_notifier;
+ MSIVectorPollNotifier msix_vector_poll_notifier;
+
+ /* ID of standby device in net_failover pair */
+ char *failover_pair_id;
+ uint32_t acpi_index;
+};
+
+static inline int pci_intx(PCIDevice *pci_dev)
+{
+ return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1;
+}
+
+static inline int pci_is_cxl(const PCIDevice *d)
+{
+ return d->cap_present & QEMU_PCIE_CAP_CXL;
+}
+
+static inline int pci_is_express(const PCIDevice *d)
+{
+ return d->cap_present & QEMU_PCI_CAP_EXPRESS;
+}
+
+static inline int pci_is_express_downstream_port(const PCIDevice *d)
+{
+ uint8_t type;
+
+ if (!pci_is_express(d) || !d->exp.exp_cap) {
+ return 0;
+ }
+
+ type = pcie_cap_get_type(d);
+
+ return type == PCI_EXP_TYPE_DOWNSTREAM || type == PCI_EXP_TYPE_ROOT_PORT;
+}
+
+static inline int pci_is_vf(const PCIDevice *d)
+{
+ return d->exp.sriov_vf.pf != NULL;
+}
+
+static inline uint32_t pci_config_size(const PCIDevice *d)
+{
+ return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE;
+}
+
+static inline uint16_t pci_get_bdf(PCIDevice *dev)
+{
+ return PCI_BUILD_BDF(pci_bus_num(pci_get_bus(dev)), dev->devfn);
+}
+
+uint16_t pci_requester_id(PCIDevice *dev);
+
+/* DMA access functions */
+static inline AddressSpace *pci_get_address_space(PCIDevice *dev)
+{
+ return &dev->bus_master_as;
+}
+
+/**
+ * pci_dma_rw: Read from or write to an address space from PCI device.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @dev: #PCIDevice doing the memory access
+ * @addr: address within the #PCIDevice address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
+ * @dir: indicates the transfer direction
+ */
+static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir, MemTxAttrs attrs)
+{
+ return dma_memory_rw(pci_get_address_space(dev), addr, buf, len,
+ dir, attrs);
+}
+
+/**
+ * pci_dma_read: Read from an address space from PCI device.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault). Called within RCU critical section.
+ *
+ * @dev: #PCIDevice doing the memory access
+ * @addr: address within the #PCIDevice address space
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ */
+static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ return pci_dma_rw(dev, addr, buf, len,
+ DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
+}
+
+/**
+ * pci_dma_write: Write to address space from PCI device.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @dev: #PCIDevice doing the memory access
+ * @addr: address within the #PCIDevice address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ */
+static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+{
+ return pci_dma_rw(dev, addr, (void *) buf, len,
+ DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
+}
+
+#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
+ static inline MemTxResult ld##_l##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr, \
+ uint##_bits##_t *val, \
+ MemTxAttrs attrs) \
+ { \
+ return ld##_l##_dma(pci_get_address_space(dev), addr, val, attrs); \
+ } \
+ static inline MemTxResult st##_s##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr, \
+ uint##_bits##_t val, \
+ MemTxAttrs attrs) \
+ { \
+ return st##_s##_dma(pci_get_address_space(dev), addr, val, attrs); \
+ }
+
+PCI_DMA_DEFINE_LDST(ub, b, 8);
+PCI_DMA_DEFINE_LDST(uw_le, w_le, 16)
+PCI_DMA_DEFINE_LDST(l_le, l_le, 32);
+PCI_DMA_DEFINE_LDST(q_le, q_le, 64);
+PCI_DMA_DEFINE_LDST(uw_be, w_be, 16)
+PCI_DMA_DEFINE_LDST(l_be, l_be, 32);
+PCI_DMA_DEFINE_LDST(q_be, q_be, 64);
+
+#undef PCI_DMA_DEFINE_LDST
+
+/**
+ * pci_dma_map: Map device PCI address space range into host virtual address
+ * @dev: #PCIDevice to be accessed
+ * @addr: address within that device's address space
+ * @plen: pointer to length of buffer; updated on return to indicate
+ * if only a subset of the requested range has been mapped
+ * @dir: indicates the transfer direction
+ *
+ * Return: A host pointer, or %NULL if the resources needed to
+ * perform the mapping are exhausted (in that case *@plen
+ * is set to zero).
+ */
+static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
+ dma_addr_t *plen, DMADirection dir)
+{
+ return dma_memory_map(pci_get_address_space(dev), addr, plen, dir,
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len)
+{
+ dma_memory_unmap(pci_get_address_space(dev), buffer, len, dir, access_len);
+}
+
+static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev,
+ int alloc_hint)
+{
+ qemu_sglist_init(qsg, DEVICE(dev), alloc_hint, pci_get_address_space(dev));
+}
+
+extern const VMStateDescription vmstate_pci_device;
+
+#define VMSTATE_PCI_DEVICE(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(PCIDevice), \
+ .vmsd = &vmstate_pci_device, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, PCIDevice), \
+}
+
+#define VMSTATE_PCI_DEVICE_POINTER(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(PCIDevice), \
+ .vmsd = &vmstate_pci_device, \
+ .flags = VMS_STRUCT | VMS_POINTER, \
+ .offset = vmstate_offset_pointer(_state, _field, PCIDevice), \
+}
+
+#endif
diff --git a/include/hw/pci/pci_host.h b/include/hw/pci/pci_host.h
index ba31595fc7..e52d8ec2cd 100644
--- a/include/hw/pci/pci_host.h
+++ b/include/hw/pci/pci_host.h
@@ -29,14 +29,12 @@
#define PCI_HOST_H
#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define PCI_HOST_BYPASS_IOMMU "bypass-iommu"
#define TYPE_PCI_HOST_BRIDGE "pci-host-bridge"
-#define PCI_HOST_BRIDGE(obj) \
- OBJECT_CHECK(PCIHostState, (obj), TYPE_PCI_HOST_BRIDGE)
-#define PCI_HOST_BRIDGE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PCIHostBridgeClass, (klass), TYPE_PCI_HOST_BRIDGE)
-#define PCI_HOST_BRIDGE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCIHostBridgeClass, (obj), TYPE_PCI_HOST_BRIDGE)
+OBJECT_DECLARE_TYPE(PCIHostState, PCIHostBridgeClass, PCI_HOST_BRIDGE)
struct PCIHostState {
SysBusDevice busdev;
@@ -45,16 +43,18 @@ struct PCIHostState {
MemoryRegion data_mem;
MemoryRegion mmcfg;
uint32_t config_reg;
+ bool mig_enabled;
PCIBus *bus;
+ bool bypass_iommu;
QLIST_ENTRY(PCIHostState) next;
};
-typedef struct PCIHostBridgeClass {
+struct PCIHostBridgeClass {
SysBusDeviceClass parent_class;
const char *(*root_bus_path)(PCIHostState *, PCIBus *);
-} PCIHostBridgeClass;
+};
/* common internal helpers for PCI/PCIe hosts, cut off overflows */
void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr,
@@ -62,8 +62,8 @@ void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr,
uint32_t pci_host_config_read_common(PCIDevice *pci_dev, uint32_t addr,
uint32_t limit, uint32_t len);
-void pci_data_write(PCIBus *s, uint32_t addr, uint32_t val, int len);
-uint32_t pci_data_read(PCIBus *s, uint32_t addr, int len);
+void pci_data_write(PCIBus *s, uint32_t addr, uint32_t val, unsigned len);
+uint32_t pci_data_read(PCIBus *s, uint32_t addr, unsigned len);
extern const MemoryRegionOps pci_host_conf_le_ops;
extern const MemoryRegionOps pci_host_conf_be_ops;
diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h
index eeb33018ad..f1a53fea8d 100644
--- a/include/hw/pci/pci_ids.h
+++ b/include/hw/pci/pci_ids.h
@@ -26,6 +26,7 @@
#define PCI_CLASS_STORAGE_SATA 0x0106
#define PCI_CLASS_STORAGE_SAS 0x0107
#define PCI_CLASS_STORAGE_EXPRESS 0x0108
+#define PCI_CLASS_STORAGE_UFS 0x0109
#define PCI_CLASS_STORAGE_OTHER 0x0180
#define PCI_BASE_CLASS_NETWORK 0x02
@@ -53,6 +54,7 @@
#define PCI_BASE_CLASS_MEMORY 0x05
#define PCI_CLASS_MEMORY_RAM 0x0500
#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_CXL 0x0502
#define PCI_CLASS_MEMORY_OTHER 0x0580
#define PCI_BASE_CLASS_BRIDGE 0x06
@@ -156,6 +158,9 @@
/* Vendors and devices. Sort key: vendor first, device next. */
+/* Ref: PCIe r6.0 Table 6-32 */
+#define PCI_VENDOR_ID_PCI_SIG 0x0001
+
#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
#define PCI_DEVICE_ID_LSI_53C810 0x0001
#define PCI_DEVICE_ID_LSI_53C895A 0x0012
@@ -164,7 +169,7 @@
#define PCI_DEVICE_ID_LSI_SAS0079 0x0079
#define PCI_VENDOR_ID_DEC 0x1011
-#define PCI_DEVICE_ID_DEC_21154 0x0026
+#define PCI_DEVICE_ID_DEC_21143 0x0019
#define PCI_VENDOR_ID_CIRRUS 0x1013
@@ -174,6 +179,8 @@
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+#define PCI_VENDOR_ID_HP 0x103c
+
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_VENDOR_ID_MOTOROLA 0x1057
@@ -191,6 +198,9 @@
#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+#define PCI_VENDOR_ID_ORACLE 0x108e
+#define PCI_DEVICE_ID_REMOTE_IOHUB 0xb000
+
#define PCI_VENDOR_ID_CMD 0x1095
#define PCI_DEVICE_ID_CMD_646 0x0646
@@ -200,14 +210,17 @@
#define PCI_VENDOR_ID_XILINX 0x10ee
#define PCI_VENDOR_ID_VIA 0x1106
-#define PCI_DEVICE_ID_VIA_ISA_BRIDGE 0x0686
+#define PCI_DEVICE_ID_VIA_82C686B_ISA 0x0686
#define PCI_DEVICE_ID_VIA_IDE 0x0571
#define PCI_DEVICE_ID_VIA_UHCI 0x3038
-#define PCI_DEVICE_ID_VIA_ACPI 0x3057
+#define PCI_DEVICE_ID_VIA_82C686B_PM 0x3057
#define PCI_DEVICE_ID_VIA_AC97 0x3058
#define PCI_DEVICE_ID_VIA_MC97 0x3068
+#define PCI_DEVICE_ID_VIA_8231_ISA 0x8231
+#define PCI_DEVICE_ID_VIA_8231_PM 0x8235
#define PCI_VENDOR_ID_MARVELL 0x11ab
+#define PCI_DEVICE_ID_MARVELL_MV6436X 0x6460
#define PCI_VENDOR_ID_SILICON_MOTION 0x126f
#define PCI_DEVICE_ID_SM501 0x0501
@@ -220,6 +233,9 @@
#define PCI_VENDOR_ID_FREESCALE 0x1957
#define PCI_DEVICE_ID_MPC8533E 0x0030
+#define PCI_VENDOR_ID_BAIDU 0x1d22
+#define PCI_DEVICE_ID_KUNLUN_VF 0x3685
+
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_82378 0x0484
#define PCI_DEVICE_ID_INTEL_82441 0x1237
@@ -227,6 +243,7 @@
#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e
#define PCI_DEVICE_ID_INTEL_82801D 0x24CD
#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_NVME 0x5845
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
@@ -271,4 +288,6 @@
#define PCI_VENDOR_ID_SYNOPSYS 0x16C3
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+
#endif
diff --git a/include/hw/pci/pci_regs.h b/include/hw/pci/pci_regs.h
index 7a83142578..a590140962 100644
--- a/include/hw/pci/pci_regs.h
+++ b/include/hw/pci/pci_regs.h
@@ -1,3 +1,9 @@
+#ifndef HW_PCI_PCI_REGS_H
+#define HW_PCI_PCI_REGS_H
+
#include "standard-headers/linux/pci_regs.h"
#define PCI_PM_CAP_VER_1_1 0x0002 /* PCI PM spec ver. 1.1 */
+#define PCI_PM_CAP_VER_1_2 0x0003 /* PCI PM spec ver. 1.2 */
+
+#endif
diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h
index cd318646a2..11f5a91bbb 100644
--- a/include/hw/pci/pcie.h
+++ b/include/hw/pci/pcie.h
@@ -21,21 +21,13 @@
#ifndef QEMU_PCIE_H
#define QEMU_PCIE_H
-#include "hw/hw.h"
#include "hw/pci/pci_regs.h"
#include "hw/pci/pcie_regs.h"
#include "hw/pci/pcie_aer.h"
+#include "hw/pci/pcie_sriov.h"
#include "hw/hotplug.h"
typedef enum {
- /* for attention and power indicator */
- PCI_EXP_HP_IND_RESERVED = PCI_EXP_SLTCTL_IND_RESERVED,
- PCI_EXP_HP_IND_ON = PCI_EXP_SLTCTL_IND_ON,
- PCI_EXP_HP_IND_BLINK = PCI_EXP_SLTCTL_IND_BLINK,
- PCI_EXP_HP_IND_OFF = PCI_EXP_SLTCTL_IND_OFF,
-} PCIExpressIndicator;
-
-typedef enum {
/* these bits must match the bits in Slot Control/Status registers.
* PCI_EXP_HP_EV_xxx = PCI_EXP_SLTCTL_xxxE = PCI_EXP_SLTSTA_xxx
*
@@ -79,6 +71,14 @@ struct PCIExpressDevice {
/* Offset of ATS capability in config space */
uint16_t ats_cap;
+
+ /* ACS */
+ uint16_t acs_cap;
+
+ /* SR/IOV */
+ uint16_t sriov_cap;
+ PCIESriovPF sriov_pf;
+ PCIESriovVF sriov_vf;
};
#define COMPAT_PROP_PCP "power_controller_present"
@@ -93,6 +93,7 @@ void pcie_cap_exit(PCIDevice *dev);
int pcie_endpoint_cap_v1_init(PCIDevice *dev, uint8_t offset);
void pcie_cap_v1_exit(PCIDevice *dev);
uint8_t pcie_cap_get_type(const PCIDevice *dev);
+uint8_t pcie_cap_get_version(const PCIDevice *dev);
void pcie_cap_flags_set_vector(PCIDevice *dev, uint8_t vector);
uint8_t pcie_cap_flags_get_vector(PCIDevice *dev);
@@ -102,12 +103,15 @@ void pcie_cap_deverr_reset(PCIDevice *dev);
void pcie_cap_lnkctl_init(PCIDevice *dev);
void pcie_cap_lnkctl_reset(PCIDevice *dev);
-void pcie_cap_slot_init(PCIDevice *dev, uint16_t slot);
+void pcie_cap_slot_init(PCIDevice *dev, PCIESlot *s);
void pcie_cap_slot_reset(PCIDevice *dev);
+void pcie_cap_slot_get(PCIDevice *dev, uint16_t *slt_ctl, uint16_t *slt_sta);
void pcie_cap_slot_write_config(PCIDevice *dev,
+ uint16_t old_slt_ctl, uint16_t old_slt_sta,
uint32_t addr, uint32_t val, int len);
int pcie_cap_slot_post_load(void *opaque, int version_id);
void pcie_cap_slot_push_attention_button(PCIDevice *dev);
+void pcie_cap_slot_enable_power(PCIDevice *dev);
void pcie_cap_root_init(PCIDevice *dev);
void pcie_cap_root_reset(PCIDevice *dev);
@@ -128,10 +132,15 @@ void pcie_add_capability(PCIDevice *dev,
uint16_t offset, uint16_t size);
void pcie_sync_bridge_lnk(PCIDevice *dev);
-void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn);
+void pcie_acs_init(PCIDevice *dev, uint16_t offset);
+void pcie_acs_reset(PCIDevice *dev);
+
+void pcie_ari_init(PCIDevice *dev, uint16_t offset);
void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num);
-void pcie_ats_init(PCIDevice *dev, uint16_t offset);
+void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned);
+void pcie_cap_slot_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp);
void pcie_cap_slot_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
void pcie_cap_slot_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
diff --git a/include/hw/pci/pcie_aer.h b/include/hw/pci/pcie_aer.h
index 729a9439c8..4a9f0ea69d 100644
--- a/include/hw/pci/pcie_aer.h
+++ b/include/hw/pci/pcie_aer.h
@@ -21,7 +21,7 @@
#ifndef QEMU_PCIE_AER_H
#define QEMU_PCIE_AER_H
-#include "hw/hw.h"
+#include "hw/pci/pci_regs.h"
/* definitions which PCIExpressDevice uses */
@@ -40,7 +40,7 @@ struct PCIEAERLog {
* The specified value will be clipped down to PCIE_AER_LOG_MAX_LIMIT
* to avoid unreasonable memory usage.
* I bet that 128 log size would be big enough, otherwise too many errors
- * for system to function normaly. But could consecutive errors occur?
+ * for system to function normally. But could consecutive errors occur?
*/
#define PCIE_AER_LOG_MAX_DEFAULT 8
#define PCIE_AER_LOG_MAX_LIMIT 128
@@ -100,4 +100,5 @@ void pcie_aer_root_write_config(PCIDevice *dev,
uint32_t addr, uint32_t val, int len,
uint32_t root_cmd_prev);
+int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err);
#endif /* QEMU_PCIE_AER_H */
diff --git a/include/hw/pci/pcie_doe.h b/include/hw/pci/pcie_doe.h
new file mode 100644
index 0000000000..87dc17dcef
--- /dev/null
+++ b/include/hw/pci/pcie_doe.h
@@ -0,0 +1,122 @@
+/*
+ * PCIe Data Object Exchange
+ *
+ * Copyright (C) 2021 Avery Design Systems, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef PCIE_DOE_H
+#define PCIE_DOE_H
+
+#include "qemu/range.h"
+#include "hw/register.h"
+
+/*
+ * Reference:
+ * PCIe r6.0 - 7.9.24 Data Object Exchange Extended Capability
+ */
+/* Capabilities Register - r6.0 7.9.24.2 */
+#define PCI_EXP_DOE_CAP 0x04
+REG32(PCI_DOE_CAP_REG, 0)
+ FIELD(PCI_DOE_CAP_REG, INTR_SUPP, 0, 1)
+ FIELD(PCI_DOE_CAP_REG, DOE_INTR_MSG_NUM, 1, 11)
+
+/* Control Register - r6.0 7.9.24.3 */
+#define PCI_EXP_DOE_CTRL 0x08
+REG32(PCI_DOE_CAP_CONTROL, 0)
+ FIELD(PCI_DOE_CAP_CONTROL, DOE_ABORT, 0, 1)
+ FIELD(PCI_DOE_CAP_CONTROL, DOE_INTR_EN, 1, 1)
+ FIELD(PCI_DOE_CAP_CONTROL, DOE_GO, 31, 1)
+
+/* Status Register - r6.0 7.9.24.4 */
+#define PCI_EXP_DOE_STATUS 0x0c
+REG32(PCI_DOE_CAP_STATUS, 0)
+ FIELD(PCI_DOE_CAP_STATUS, DOE_BUSY, 0, 1)
+ FIELD(PCI_DOE_CAP_STATUS, DOE_INTR_STATUS, 1, 1)
+ FIELD(PCI_DOE_CAP_STATUS, DOE_ERROR, 2, 1)
+ FIELD(PCI_DOE_CAP_STATUS, DATA_OBJ_RDY, 31, 1)
+
+/* Write Data Mailbox Register - r6.0 7.9.24.5 */
+#define PCI_EXP_DOE_WR_DATA_MBOX 0x10
+
+/* Read Data Mailbox Register - 7.9.xx.6 */
+#define PCI_EXP_DOE_RD_DATA_MBOX 0x14
+
+/* PCI-SIG defined Data Object Types - r6.0 Table 6-32 */
+#define PCI_SIG_DOE_DISCOVERY 0x00
+
+#define PCI_DOE_DW_SIZE_MAX (1 << 18)
+#define PCI_DOE_PROTOCOL_NUM_MAX 256
+
+#define DATA_OBJ_BUILD_HEADER1(v, p) (((p) << 16) | (v))
+#define DATA_OBJ_LEN_MASK(len) ((len) & (PCI_DOE_DW_SIZE_MAX - 1))
+
+typedef struct DOEHeader DOEHeader;
+typedef struct DOEProtocol DOEProtocol;
+typedef struct DOECap DOECap;
+
+struct DOEHeader {
+ uint16_t vendor_id;
+ uint8_t data_obj_type;
+ uint8_t reserved;
+ uint32_t length;
+} QEMU_PACKED;
+
+/* Protocol infos and rsp function callback */
+struct DOEProtocol {
+ uint16_t vendor_id;
+ uint8_t data_obj_type;
+ bool (*handle_request)(DOECap *);
+};
+
+struct DOECap {
+ /* Owner */
+ PCIDevice *pdev;
+
+ uint16_t offset;
+
+ struct {
+ bool intr;
+ uint16_t vec;
+ } cap;
+
+ struct {
+ bool abort;
+ bool intr;
+ bool go;
+ } ctrl;
+
+ struct {
+ bool busy;
+ bool intr;
+ bool error;
+ bool ready;
+ } status;
+
+ uint32_t *write_mbox;
+ uint32_t *read_mbox;
+
+ /* Mailbox position indicator */
+ uint32_t read_mbox_idx;
+ uint32_t read_mbox_len;
+ uint32_t write_mbox_len;
+
+ /* Protocols and its callback response */
+ DOEProtocol *protocols;
+ uint16_t protocol_num;
+};
+
+void pcie_doe_init(PCIDevice *pdev, DOECap *doe_cap, uint16_t offset,
+ DOEProtocol *protocols, bool intr, uint16_t vec);
+void pcie_doe_fini(DOECap *doe_cap);
+bool pcie_doe_read_config(DOECap *doe_cap, uint32_t addr, int size,
+ uint32_t *buf);
+void pcie_doe_write_config(DOECap *doe_cap, uint32_t addr,
+ uint32_t val, int size);
+uint32_t pcie_doe_build_protocol(DOEProtocol *p);
+void *pcie_doe_get_write_mbox_ptr(DOECap *doe_cap);
+void pcie_doe_set_rsp(DOECap *doe_cap, void *rsp);
+uint32_t pcie_doe_get_obj_len(void *obj);
+#endif /* PCIE_DOE_H */
diff --git a/include/hw/pci/pcie_host.h b/include/hw/pci/pcie_host.h
index 3f7b9886d1..82d92177da 100644
--- a/include/hw/pci/pcie_host.h
+++ b/include/hw/pci/pcie_host.h
@@ -23,10 +23,10 @@
#include "hw/pci/pci_host.h"
#include "exec/memory.h"
+#include "qom/object.h"
#define TYPE_PCIE_HOST_BRIDGE "pcie-host-bridge"
-#define PCIE_HOST_BRIDGE(obj) \
- OBJECT_CHECK(PCIExpressHost, (obj), TYPE_PCIE_HOST_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIExpressHost, PCIE_HOST_BRIDGE)
#define PCIE_HOST_MCFG_BASE "MCFG"
#define PCIE_HOST_MCFG_SIZE "mcfg_size"
@@ -60,15 +60,15 @@ void pcie_host_mmcfg_update(PCIExpressHost *e,
/*
* PCI express ECAM (Enhanced Configuration Address Mapping) format.
* AKA mmcfg address
- * bit 20 - 28: bus number
+ * bit 20 - 27: bus number
* bit 15 - 19: device number
* bit 12 - 14: function number
* bit 0 - 11: offset in configuration space of a given device
*/
-#define PCIE_MMCFG_SIZE_MAX (1ULL << 29)
+#define PCIE_MMCFG_SIZE_MAX (1ULL << 28)
#define PCIE_MMCFG_SIZE_MIN (1ULL << 20)
#define PCIE_MMCFG_BUS_BIT 20
-#define PCIE_MMCFG_BUS_MASK 0x1ff
+#define PCIE_MMCFG_BUS_MASK 0xff
#define PCIE_MMCFG_DEVFN_BIT 12
#define PCIE_MMCFG_DEVFN_MASK 0xff
#define PCIE_MMCFG_CONFOFFSET_MASK 0xfff
diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h
index df242a0caf..90e6cf45b8 100644
--- a/include/hw/pci/pcie_port.h
+++ b/include/hw/pci/pcie_port.h
@@ -23,9 +23,11 @@
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pci_bus.h"
+#include "hw/pci/pci_device.h"
+#include "qom/object.h"
#define TYPE_PCIE_PORT "pcie-port"
-#define PCIE_PORT(obj) OBJECT_CHECK(PCIEPort, (obj), TYPE_PCIE_PORT)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIEPort, PCIE_PORT)
struct PCIEPort {
/*< private >*/
@@ -38,8 +40,12 @@ struct PCIEPort {
void pcie_port_init_reg(PCIDevice *d);
+PCIDevice *pcie_find_port_by_pn(PCIBus *bus, uint8_t pn);
+PCIDevice *pcie_find_port_first(PCIBus *bus);
+int pcie_count_ds_ports(PCIBus *bus);
+
#define TYPE_PCIE_SLOT "pcie-slot"
-#define PCIE_SLOT(obj) OBJECT_CHECK(PCIESlot, (obj), TYPE_PCIE_SLOT)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIESlot, PCIE_SLOT)
struct PCIESlot {
/*< private >*/
@@ -53,6 +59,15 @@ struct PCIESlot {
PCIExpLinkSpeed speed;
PCIExpLinkWidth width;
+ /* Disable ACS (really for a pcie_root_port) */
+ bool disable_acs;
+
+ /* Indicates whether any type of hot-plug is allowed on the slot */
+ bool hotplug;
+
+ /* broken ACPI hotplug compat knob to preserve 6.1 ABI intact */
+ bool hide_native_hotplug_cap;
+
QLIST_ENTRY(PCIESlot) next;
};
@@ -62,14 +77,14 @@ int pcie_chassis_add_slot(struct PCIESlot *slot);
void pcie_chassis_del_slot(PCIESlot *s);
#define TYPE_PCIE_ROOT_PORT "pcie-root-port-base"
-#define PCIE_ROOT_PORT_CLASS(klass) \
- OBJECT_CLASS_CHECK(PCIERootPortClass, (klass), TYPE_PCIE_ROOT_PORT)
-#define PCIE_ROOT_PORT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCIERootPortClass, (obj), TYPE_PCIE_ROOT_PORT)
+typedef struct PCIERootPortClass PCIERootPortClass;
+DECLARE_CLASS_CHECKERS(PCIERootPortClass, PCIE_ROOT_PORT,
+ TYPE_PCIE_ROOT_PORT)
-typedef struct PCIERootPortClass {
+struct PCIERootPortClass {
PCIDeviceClass parent_class;
DeviceRealize parent_realize;
+ ResettablePhases parent_phases;
uint8_t (*aer_vector)(const PCIDevice *dev);
int (*interrupts_init)(PCIDevice *dev, Error **errp);
@@ -78,7 +93,8 @@ typedef struct PCIERootPortClass {
int exp_offset;
int aer_offset;
int ssvid_offset;
+ int acs_offset; /* If nonzero, optional ACS capability offset */
int ssid;
-} PCIERootPortClass;
+};
#endif /* QEMU_PCIE_PORT_H */
diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h
index ad4e7808b8..9d3b6868dc 100644
--- a/include/hw/pci/pcie_regs.h
+++ b/include/hw/pci/pcie_regs.h
@@ -39,6 +39,8 @@ typedef enum PCIExpLinkSpeed {
QEMU_PCI_EXP_LNK_5GT,
QEMU_PCI_EXP_LNK_8GT,
QEMU_PCI_EXP_LNK_16GT,
+ QEMU_PCI_EXP_LNK_32GT,
+ QEMU_PCI_EXP_LNK_64GT,
} PCIExpLinkSpeed;
#define QEMU_PCI_EXP_LNKCAP_MLS(speed) (speed)
@@ -66,20 +68,6 @@ typedef enum PCIExpLinkWidth {
#define PCI_EXP_SLTCAP_PSN_SHIFT ctz32(PCI_EXP_SLTCAP_PSN)
-#define PCI_EXP_SLTCTL_IND_RESERVED 0x0
-#define PCI_EXP_SLTCTL_IND_ON 0x1
-#define PCI_EXP_SLTCTL_IND_BLINK 0x2
-#define PCI_EXP_SLTCTL_IND_OFF 0x3
-#define PCI_EXP_SLTCTL_AIC_SHIFT ctz32(PCI_EXP_SLTCTL_AIC)
-#define PCI_EXP_SLTCTL_AIC_OFF \
- (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_AIC_SHIFT)
-
-#define PCI_EXP_SLTCTL_PIC_SHIFT ctz32(PCI_EXP_SLTCTL_PIC)
-#define PCI_EXP_SLTCTL_PIC_OFF \
- (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_PIC_SHIFT)
-#define PCI_EXP_SLTCTL_PIC_ON \
- (PCI_EXP_SLTCTL_IND_ON << PCI_EXP_SLTCTL_PIC_SHIFT)
-
#define PCI_EXP_SLTCTL_SUPPORTED \
(PCI_EXP_SLTCTL_ABPE | \
PCI_EXP_SLTCTL_PDCE | \
@@ -155,6 +143,9 @@ typedef enum PCIExpLinkWidth {
PCI_ERR_UNC_ATOP_EBLOCKED | \
PCI_ERR_UNC_TLP_PRF_BLOCKED)
+#define PCI_ERR_UNC_MASK_DEFAULT (PCI_ERR_UNC_INTN | \
+ PCI_ERR_UNC_TLP_PRF_BLOCKED)
+
#define PCI_ERR_UNC_SEVERITY_DEFAULT (PCI_ERR_UNC_DLP | \
PCI_ERR_UNC_SDN | \
PCI_ERR_UNC_FCP | \
@@ -175,4 +166,12 @@ typedef enum PCIExpLinkWidth {
PCI_ERR_COR_INTERNAL | \
PCI_ERR_COR_HL_OVERFLOW)
+/* ACS */
+#define PCI_ACS_VER 0x1
+#define PCI_ACS_SIZEOF 8
+
+/* DOE Capability Register Fields */
+#define PCI_DOE_VER 0x1
+#define PCI_DOE_SIZEOF 24
+
#endif /* QEMU_PCIE_REGS_H */
diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h
new file mode 100644
index 0000000000..b77eb7bf58
--- /dev/null
+++ b/include/hw/pci/pcie_sriov.h
@@ -0,0 +1,82 @@
+/*
+ * pcie_sriov.h:
+ *
+ * Implementation of SR/IOV emulation support.
+ *
+ * Copyright (c) 2015 Knut Omang <knut.omang@oracle.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_PCIE_SRIOV_H
+#define QEMU_PCIE_SRIOV_H
+
+#include "hw/pci/pci.h"
+
+struct PCIESriovPF {
+ uint16_t num_vfs; /* Number of virtual functions created */
+ uint8_t vf_bar_type[PCI_NUM_REGIONS]; /* Store type for each VF bar */
+ const char *vfname; /* Reference to the device type used for the VFs */
+ PCIDevice **vf; /* Pointer to an array of num_vfs VF devices */
+};
+
+struct PCIESriovVF {
+ PCIDevice *pf; /* Pointer back to owner physical function */
+ uint16_t vf_number; /* Logical VF number of this function */
+};
+
+void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
+ const char *vfname, uint16_t vf_dev_id,
+ uint16_t init_vfs, uint16_t total_vfs,
+ uint16_t vf_offset, uint16_t vf_stride);
+void pcie_sriov_pf_exit(PCIDevice *dev);
+
+/* Set up a VF bar in the SR/IOV bar area */
+void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num,
+ uint8_t type, dma_addr_t size);
+
+/* Instantiate a bar for a VF */
+void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num,
+ MemoryRegion *memory);
+
+/*
+ * Default (minimal) page size support values
+ * as required by the SR/IOV standard:
+ * 0x553 << 12 = 0x553000 = 4K + 8K + 64K + 256K + 1M + 4M
+ */
+#define SRIOV_SUP_PGSIZE_MINREQ 0x553
+
+/*
+ * Optionally add supported page sizes to the mask of supported page sizes
+ * Page size values are interpreted as opt_sup_pgsize << 12.
+ */
+void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize);
+
+/* SR/IOV capability config write handler */
+void pcie_sriov_config_write(PCIDevice *dev, uint32_t address,
+ uint32_t val, int len);
+
+/* Reset SR/IOV */
+void pcie_sriov_pf_reset(PCIDevice *dev);
+
+/* Get logical VF number of a VF - only valid for VFs */
+uint16_t pcie_sriov_vf_number(PCIDevice *dev);
+
+/*
+ * Get the physical function that owns this VF.
+ * Returns NULL if dev is not a virtual function
+ */
+PCIDevice *pcie_sriov_get_pf(PCIDevice *dev);
+
+/*
+ * Get the n-th VF of this physical function - only valid for PF.
+ * Returns NULL if index is invalid
+ */
+PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n);
+
+/* Returns the current number of virtual functions. */
+uint16_t pcie_sriov_num_vfs(PCIDevice *dev);
+
+#endif /* QEMU_PCIE_SRIOV_H */
diff --git a/include/hw/pci/shpc.h b/include/hw/pci/shpc.h
index 18f6ec1cd5..a0789df153 100644
--- a/include/hw/pci/shpc.h
+++ b/include/hw/pci/shpc.h
@@ -1,10 +1,10 @@
#ifndef SHPC_H
#define SHPC_H
-#include "qemu-common.h"
#include "exec/memory.h"
#include "hw/hotplug.h"
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
+#include "migration/vmstate.h"
struct SHPCDevice {
/* Capability offset in device's config space */
@@ -52,7 +52,7 @@ void shpc_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
void shpc_device_unplug_request_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
-extern VMStateInfo shpc_vmstate_info;
+extern const VMStateInfo shpc_vmstate_info;
#define SHPC_VMSTATE(_field, _type, _test) \
VMSTATE_BUFFER_UNSAFE_INFO_TEST(_field, _type, _test, 0, \
shpc_vmstate_info, 0)
diff --git a/include/hw/pci/slotid_cap.h b/include/hw/pci/slotid_cap.h
index a777ea0e49..8b4dc0ce83 100644
--- a/include/hw/pci/slotid_cap.h
+++ b/include/hw/pci/slotid_cap.h
@@ -1,7 +1,6 @@
#ifndef PCI_SLOTID_CAP_H
#define PCI_SLOTID_CAP_H
-#include "qemu-common.h"
int slotid_cap_init(PCIDevice *dev, int nslots,
uint8_t chassis,
diff --git a/include/hw/pcmcia.h b/include/hw/pcmcia.h
index 79cac9c761..ab26802751 100644
--- a/include/hw/pcmcia.h
+++ b/include/hw/pcmcia.h
@@ -3,7 +3,8 @@
/* PCMCIA/Cardbus */
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
typedef struct PCMCIASocket {
qemu_irq irq;
@@ -11,12 +12,7 @@ typedef struct PCMCIASocket {
} PCMCIASocket;
#define TYPE_PCMCIA_CARD "pcmcia-card"
-#define PCMCIA_CARD(obj) \
- OBJECT_CHECK(PCMCIACardState, (obj), TYPE_PCMCIA_CARD)
-#define PCMCIA_CARD_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCMCIACardClass, obj, TYPE_PCMCIA_CARD)
-#define PCMCIA_CARD_CLASS(cls) \
- OBJECT_CLASS_CHECK(PCMCIACardClass, cls, TYPE_PCMCIA_CARD)
+OBJECT_DECLARE_TYPE(PCMCIACardState, PCMCIACardClass, PCMCIA_CARD)
struct PCMCIACardState {
/*< private >*/
@@ -26,7 +22,7 @@ struct PCMCIACardState {
PCMCIASocket *slot;
};
-typedef struct PCMCIACardClass {
+struct PCMCIACardClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
@@ -45,24 +41,24 @@ typedef struct PCMCIACardClass {
uint32_t address, uint16_t value);
uint16_t (*io_read)(PCMCIACardState *card, uint32_t address);
void (*io_write)(PCMCIACardState *card, uint32_t address, uint16_t value);
-} PCMCIACardClass;
+};
-#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */
-#define CISTPL_NO_LINK 0x14 /* No Link Tuple */
-#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */
-#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */
-#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */
-#define CISTPL_CONFIG 0x1a /* Configuration Tuple */
-#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */
-#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */
-#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */
-#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */
-#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */
-#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */
-#define CISTPL_FUNCID 0x21 /* Function ID Tuple */
-#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */
-#define CISTPL_END 0xff /* Tuple End */
-#define CISTPL_ENDMARK 0xff
+#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */
+#define CISTPL_NO_LINK 0x14 /* No Link Tuple */
+#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */
+#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */
+#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */
+#define CISTPL_CONFIG 0x1a /* Configuration Tuple */
+#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */
+#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */
+#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */
+#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */
+#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */
+#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */
+#define CISTPL_FUNCID 0x21 /* Function ID Tuple */
+#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */
+#define CISTPL_END 0xff /* Tuple End */
+#define CISTPL_ENDMARK 0xff
/* dscm1xxxx.c */
PCMCIACardState *dscm1xxxx_init(DriveInfo *bdrv);
diff --git a/include/hw/platform-bus.h b/include/hw/platform-bus.h
index 19e20c57ce..44f30c5353 100644
--- a/include/hw/platform-bus.h
+++ b/include/hw/platform-bus.h
@@ -11,7 +11,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,16 +23,11 @@
*/
#include "hw/sysbus.h"
+#include "qom/object.h"
-typedef struct PlatformBusDevice PlatformBusDevice;
#define TYPE_PLATFORM_BUS_DEVICE "platform-bus-device"
-#define PLATFORM_BUS_DEVICE(obj) \
- OBJECT_CHECK(PlatformBusDevice, (obj), TYPE_PLATFORM_BUS_DEVICE)
-#define PLATFORM_BUS_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PlatformBusDeviceClass, (klass), TYPE_PLATFORM_BUS_DEVICE)
-#define PLATFORM_BUS_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PlatformBusDeviceClass, (obj), TYPE_PLATFORM_BUS_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(PlatformBusDevice, PLATFORM_BUS_DEVICE)
struct PlatformBusDevice {
/*< private >*/
diff --git a/include/hw/ppc/fdt.h b/include/hw/ppc/fdt.h
index a8cd85069f..b56ac2a8cb 100644
--- a/include/hw/ppc/fdt.h
+++ b/include/hw/ppc/fdt.h
@@ -15,10 +15,10 @@
#define _FDT(exp) \
do { \
- int ret = (exp); \
- if (ret < 0) { \
- error_report("error creating device tree: %s: %s", \
- #exp, fdt_strerror(ret)); \
+ int _ret = (exp); \
+ if (_ret < 0) { \
+ error_report("error creating device tree: %s: %s", \
+ #exp, fdt_strerror(_ret)); \
exit(1); \
} \
} while (0)
diff --git a/include/hw/ppc/mac_dbdma.h b/include/hw/ppc/mac_dbdma.h
index 26cc469de4..4a3f644516 100644
--- a/include/hw/ppc/mac_dbdma.h
+++ b/include/hw/ppc/mac_dbdma.h
@@ -27,6 +27,7 @@
#include "qemu/iov.h"
#include "sysemu/dma.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
typedef struct DBDMA_io DBDMA_io;
@@ -160,13 +161,14 @@ typedef struct DBDMA_channel {
dbdma_cmd current;
} DBDMA_channel;
-typedef struct {
+struct DBDMAState {
SysBusDevice parent_obj;
MemoryRegion mem;
DBDMA_channel channels[DBDMA_CHANNELS];
QEMUBH *bh;
-} DBDMAState;
+};
+typedef struct DBDMAState DBDMAState;
/* Externally callable functions */
@@ -176,6 +178,6 @@ void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
void DBDMA_kick(DBDMAState *dbdma);
#define TYPE_MAC_DBDMA "mac-dbdma"
-#define MAC_DBDMA(obj) OBJECT_CHECK(DBDMAState, (obj), TYPE_MAC_DBDMA)
+OBJECT_DECLARE_SIMPLE_TYPE(DBDMAState, MAC_DBDMA)
#endif
diff --git a/include/hw/ppc/openpic.h b/include/hw/ppc/openpic.h
index dad08fe9be..9c6af8e207 100644
--- a/include/hw/ppc/openpic.h
+++ b/include/hw/ppc/openpic.h
@@ -1,10 +1,9 @@
#ifndef OPENPIC_H
#define OPENPIC_H
-#include "qemu-common.h"
#include "hw/sysbus.h"
-#include "hw/qdev-core.h"
-#include "qom/cpu.h"
+#include "hw/core/cpu.h"
+#include "qom/object.h"
#define MAX_CPU 32
#define MAX_MSI 8
@@ -15,14 +14,13 @@ enum {
OPENPIC_OUTPUT_INT = 0, /* IRQ */
OPENPIC_OUTPUT_CINT, /* critical IRQ */
OPENPIC_OUTPUT_MCK, /* Machine check event */
- OPENPIC_OUTPUT_DEBUG, /* Inconditional debug event */
+ OPENPIC_OUTPUT_DEBUG, /* Unconditional debug event */
OPENPIC_OUTPUT_RESET, /* Core reset event */
OPENPIC_OUTPUT_NB,
};
typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines;
-#define OPENPIC_MODEL_RAVEN 0
#define OPENPIC_MODEL_FSL_MPIC_20 1
#define OPENPIC_MODEL_FSL_MPIC_42 2
#define OPENPIC_MODEL_KEYLARGO 3
@@ -33,13 +31,6 @@ typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines;
#define OPENPIC_MAX_IRQ (OPENPIC_MAX_SRC + OPENPIC_MAX_IPI + \
OPENPIC_MAX_TMR)
-/* Raven */
-#define RAVEN_MAX_CPU 2
-#define RAVEN_MAX_EXT 48
-#define RAVEN_MAX_IRQ 64
-#define RAVEN_MAX_TMR OPENPIC_MAX_TMR
-#define RAVEN_MAX_IPI OPENPIC_MAX_IPI
-
/* KeyLargo */
#define KEYLARGO_MAX_CPU 4
#define KEYLARGO_MAX_EXT 64
@@ -50,14 +41,6 @@ typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines;
/* Timers don't exist but this makes the code happy... */
#define KEYLARGO_TMR_IRQ (KEYLARGO_IPI_IRQ + KEYLARGO_MAX_IPI)
-/* Interrupt definitions */
-#define RAVEN_FE_IRQ (RAVEN_MAX_EXT) /* Internal functional IRQ */
-#define RAVEN_ERR_IRQ (RAVEN_MAX_EXT + 1) /* Error IRQ */
-#define RAVEN_TMR_IRQ (RAVEN_MAX_EXT + 2) /* First timer IRQ */
-#define RAVEN_IPI_IRQ (RAVEN_TMR_IRQ + RAVEN_MAX_TMR) /* First IPI IRQ */
-/* First doorbell IRQ */
-#define RAVEN_DBL_IRQ (RAVEN_IPI_IRQ + (RAVEN_MAX_CPU * RAVEN_MAX_IPI))
-
typedef struct FslMpicInfo {
int max_ext;
} FslMpicInfo;
@@ -68,10 +51,11 @@ typedef enum IRQType {
IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */
} IRQType;
-/* Round up to the nearest 64 IRQs so that the queue length
+/*
+ * Round up to the nearest 64 IRQs so that the queue length
* won't change when moving between 32 and 64 bit hosts.
*/
-#define IRQQUEUE_SIZE_BITS ((OPENPIC_MAX_IRQ + 63) & ~63)
+#define IRQQUEUE_SIZE_BITS ROUND_UP(OPENPIC_MAX_IRQ, 64)
typedef struct IRQQueue {
unsigned long *queue;
@@ -118,8 +102,10 @@ typedef struct OpenPICTimer {
bool qemu_timer_active; /* Is the qemu_timer is running? */
struct QEMUTimer *qemu_timer;
struct OpenPICState *opp; /* Device timer is part of. */
- /* The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last
- current_count written or read, only defined if qemu_timer_active. */
+ /*
+ * The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last
+ * current_count written or read, only defined if qemu_timer_active.
+ */
uint64_t origin_time;
} OpenPICTimer;
@@ -138,9 +124,9 @@ typedef struct IRQDest {
} IRQDest;
#define TYPE_OPENPIC "openpic"
-#define OPENPIC(obj) OBJECT_CHECK(OpenPICState, (obj), TYPE_OPENPIC)
+OBJECT_DECLARE_SIMPLE_TYPE(OpenPICState, OPENPIC)
-typedef struct OpenPICState {
+struct OpenPICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -185,6 +171,6 @@ typedef struct OpenPICState {
uint32_t irq_ipi0;
uint32_t irq_tim0;
uint32_t irq_msi;
-} OpenPICState;
+};
#endif /* OPENPIC_H */
diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h
index 6b65397b7e..476b136146 100644
--- a/include/hw/ppc/pnv.h
+++ b/include/hw/ppc/pnv.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -16,135 +16,72 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_H
-#define _PPC_PNV_H
+#ifndef PPC_PNV_H
+#define PPC_PNV_H
+
+#include "cpu.h"
#include "hw/boards.h"
#include "hw/sysbus.h"
#include "hw/ipmi/ipmi.h"
-#include "hw/ppc/pnv_lpc.h"
-#include "hw/ppc/pnv_psi.h"
-#include "hw/ppc/pnv_occ.h"
+#include "hw/ppc/pnv_pnor.h"
#define TYPE_PNV_CHIP "pnv-chip"
-#define PNV_CHIP(obj) OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP)
-#define PNV_CHIP_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvChipClass, (klass), TYPE_PNV_CHIP)
-#define PNV_CHIP_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvChipClass, (obj), TYPE_PNV_CHIP)
-
-typedef enum PnvChipType {
- PNV_CHIP_POWER8E, /* AKA Murano (default) */
- PNV_CHIP_POWER8, /* AKA Venice */
- PNV_CHIP_POWER8NVL, /* AKA Naples */
- PNV_CHIP_POWER9, /* AKA Nimbus */
-} PnvChipType;
-
-typedef struct PnvChip {
- /*< private >*/
- SysBusDevice parent_obj;
-
- /*< public >*/
- uint32_t chip_id;
- uint64_t ram_start;
- uint64_t ram_size;
-
- uint32_t nr_cores;
- uint64_t cores_mask;
- void *cores;
-
- hwaddr xscom_base;
- MemoryRegion xscom_mmio;
- MemoryRegion xscom;
- AddressSpace xscom_as;
-} PnvChip;
-
-#define TYPE_PNV8_CHIP "pnv8-chip"
-#define PNV8_CHIP(obj) OBJECT_CHECK(Pnv8Chip, (obj), TYPE_PNV8_CHIP)
-
-typedef struct Pnv8Chip {
- /*< private >*/
- PnvChip parent_obj;
- /*< public >*/
- MemoryRegion icp_mmio;
-
- PnvLpcController lpc;
- PnvPsi psi;
- PnvOCC occ;
-} Pnv8Chip;
-
-#define TYPE_PNV9_CHIP "pnv9-chip"
-#define PNV9_CHIP(obj) OBJECT_CHECK(Pnv9Chip, (obj), TYPE_PNV9_CHIP)
-
-typedef struct Pnv9Chip {
- /*< private >*/
- PnvChip parent_obj;
-
- /*< public >*/
-} Pnv9Chip;
-
-typedef struct PnvChipClass {
- /*< private >*/
- SysBusDeviceClass parent_class;
-
- /*< public >*/
- PnvChipType chip_type;
- uint64_t chip_cfam_id;
- uint64_t cores_mask;
-
- hwaddr xscom_base;
-
- DeviceRealize parent_realize;
-
- uint32_t (*core_pir)(PnvChip *chip, uint32_t core_id);
- void (*intc_create)(PnvChip *chip, PowerPCCPU *cpu, Error **errp);
- ISABus *(*isa_create)(PnvChip *chip, Error **errp);
-} PnvChipClass;
+typedef struct PnvCore PnvCore;
+typedef struct PnvChip PnvChip;
+typedef struct Pnv8Chip Pnv8Chip;
+typedef struct Pnv9Chip Pnv9Chip;
+typedef struct Pnv10Chip Pnv10Chip;
#define PNV_CHIP_TYPE_SUFFIX "-" TYPE_PNV_CHIP
#define PNV_CHIP_TYPE_NAME(cpu_model) cpu_model PNV_CHIP_TYPE_SUFFIX
#define TYPE_PNV_CHIP_POWER8E PNV_CHIP_TYPE_NAME("power8e_v2.1")
-#define PNV_CHIP_POWER8E(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER8E)
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8E,
+ TYPE_PNV_CHIP_POWER8E)
#define TYPE_PNV_CHIP_POWER8 PNV_CHIP_TYPE_NAME("power8_v2.0")
-#define PNV_CHIP_POWER8(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER8)
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8,
+ TYPE_PNV_CHIP_POWER8)
#define TYPE_PNV_CHIP_POWER8NVL PNV_CHIP_TYPE_NAME("power8nvl_v1.0")
-#define PNV_CHIP_POWER8NVL(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER8NVL)
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8NVL,
+ TYPE_PNV_CHIP_POWER8NVL)
-#define TYPE_PNV_CHIP_POWER9 PNV_CHIP_TYPE_NAME("power9_v2.0")
-#define PNV_CHIP_POWER9(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER9)
+#define TYPE_PNV_CHIP_POWER9 PNV_CHIP_TYPE_NAME("power9_v2.2")
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER9,
+ TYPE_PNV_CHIP_POWER9)
-/*
- * This generates a HW chip id depending on an index, as found on a
- * two socket system with dual chip modules :
- *
- * 0x0, 0x1, 0x10, 0x11
- *
- * 4 chips should be the maximum
- *
- * TODO: use a machine property to define the chip ids
- */
-#define PNV_CHIP_HWID(i) ((((i) & 0x3e) << 3) | ((i) & 0x1))
+#define TYPE_PNV_CHIP_POWER10 PNV_CHIP_TYPE_NAME("power10_v2.0")
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER10,
+ TYPE_PNV_CHIP_POWER10)
-/*
- * Converts back a HW chip id to an index. This is useful to calculate
- * the MMIO addresses of some controllers which depend on the chip id.
- */
-#define PNV_CHIP_INDEX(chip) \
- (((chip)->chip_id >> 2) * 2 + ((chip)->chip_id & 0x3))
+PnvCore *pnv_chip_find_core(PnvChip *chip, uint32_t core_id);
+PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir);
+
+typedef struct PnvPHB PnvPHB;
#define TYPE_PNV_MACHINE MACHINE_TYPE_NAME("powernv")
-#define PNV_MACHINE(obj) \
- OBJECT_CHECK(PnvMachineState, (obj), TYPE_PNV_MACHINE)
+typedef struct PnvMachineClass PnvMachineClass;
+typedef struct PnvMachineState PnvMachineState;
+DECLARE_OBJ_CHECKERS(PnvMachineState, PnvMachineClass,
+ PNV_MACHINE, TYPE_PNV_MACHINE)
-typedef struct PnvMachineState {
+
+struct PnvMachineClass {
+ /*< private >*/
+ MachineClass parent_class;
+
+ /*< public >*/
+ const char *compat;
+ int compat_size;
+
+ void (*dt_power_mgt)(PnvMachineState *pnv, void *fdt);
+ void (*i2c_init)(PnvMachineState *pnv);
+};
+
+struct PnvMachineState {
/*< private >*/
MachineState parent_obj;
@@ -159,17 +96,14 @@ typedef struct PnvMachineState {
IPMIBmc *bmc;
Notifier powerdown_notifier;
-} PnvMachineState;
-static inline bool pnv_chip_is_power9(const PnvChip *chip)
-{
- return PNV_CHIP_GET_CLASS(chip)->chip_type == PNV_CHIP_POWER9;
-}
+ PnvPnor *pnor;
+
+ hwaddr fw_load_addr;
+};
-static inline bool pnv_is_power9(PnvMachineState *pnv)
-{
- return pnv_chip_is_power9(pnv->chips[0]);
-}
+PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id);
+PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb);
#define PNV_FDT_ADDR 0x01000000
#define PNV_TIMEBASE_FREQ 512000000ULL
@@ -179,13 +113,26 @@ static inline bool pnv_is_power9(PnvMachineState *pnv)
*/
void pnv_dt_bmc_sensors(IPMIBmc *bmc, void *fdt);
void pnv_bmc_powerdown(IPMIBmc *bmc);
+IPMIBmc *pnv_bmc_create(PnvPnor *pnor);
+IPMIBmc *pnv_bmc_find(Error **errp);
+void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
/*
* POWER8 MMIO base addresses
*/
#define PNV_XSCOM_SIZE 0x800000000ull
#define PNV_XSCOM_BASE(chip) \
- (chip->xscom_base + ((uint64_t)(chip)->chip_id) * PNV_XSCOM_SIZE)
+ (0x0003fc0000000000ull + ((uint64_t)(chip)->chip_id) * PNV_XSCOM_SIZE)
+
+#define PNV_OCC_COMMON_AREA_SIZE 0x0000000000800000ull
+#define PNV_OCC_COMMON_AREA_BASE 0x7fff800000ull
+#define PNV_OCC_SENSOR_BASE(chip) (PNV_OCC_COMMON_AREA_BASE + \
+ PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
+
+#define PNV_HOMER_SIZE 0x0000000000400000ull
+#define PNV_HOMER_BASE(chip) \
+ (0x7ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE)
+
/*
* XSCOM 0x20109CA defines the ICP BAR:
@@ -203,16 +150,100 @@ void pnv_bmc_powerdown(IPMIBmc *bmc);
*/
#define PNV_ICP_SIZE 0x0000000000100000ull
#define PNV_ICP_BASE(chip) \
- (0x0003ffff80000000ull + (uint64_t) PNV_CHIP_INDEX(chip) * PNV_ICP_SIZE)
+ (0x0003ffff80000000ull + (uint64_t) (chip)->chip_id * PNV_ICP_SIZE)
#define PNV_PSIHB_SIZE 0x0000000000100000ull
#define PNV_PSIHB_BASE(chip) \
- (0x0003fffe80000000ull + (uint64_t)PNV_CHIP_INDEX(chip) * PNV_PSIHB_SIZE)
+ (0x0003fffe80000000ull + (uint64_t)(chip)->chip_id * PNV_PSIHB_SIZE)
#define PNV_PSIHB_FSP_SIZE 0x0000000100000000ull
#define PNV_PSIHB_FSP_BASE(chip) \
- (0x0003ffe000000000ull + (uint64_t)PNV_CHIP_INDEX(chip) * \
+ (0x0003ffe000000000ull + (uint64_t)(chip)->chip_id * \
PNV_PSIHB_FSP_SIZE)
-#endif /* _PPC_PNV_H */
+/*
+ * POWER9 MMIO base addresses
+ */
+#define PNV9_CHIP_BASE(chip, base) \
+ ((base) + ((uint64_t) (chip)->chip_id << 42))
+
+#define PNV9_XIVE_VC_SIZE 0x0000008000000000ull
+#define PNV9_XIVE_VC_BASE(chip) PNV9_CHIP_BASE(chip, 0x0006010000000000ull)
+
+#define PNV9_XIVE_PC_SIZE 0x0000001000000000ull
+#define PNV9_XIVE_PC_BASE(chip) PNV9_CHIP_BASE(chip, 0x0006018000000000ull)
+
+#define PNV9_LPCM_SIZE 0x0000000100000000ull
+#define PNV9_LPCM_BASE(chip) PNV9_CHIP_BASE(chip, 0x0006030000000000ull)
+
+#define PNV9_PSIHB_SIZE 0x0000000000100000ull
+#define PNV9_PSIHB_BASE(chip) PNV9_CHIP_BASE(chip, 0x0006030203000000ull)
+
+#define PNV9_XIVE_IC_SIZE 0x0000000000080000ull
+#define PNV9_XIVE_IC_BASE(chip) PNV9_CHIP_BASE(chip, 0x0006030203100000ull)
+
+#define PNV9_XIVE_TM_SIZE 0x0000000000040000ull
+#define PNV9_XIVE_TM_BASE(chip) PNV9_CHIP_BASE(chip, 0x0006030203180000ull)
+
+#define PNV9_PSIHB_ESB_SIZE 0x0000000000010000ull
+#define PNV9_PSIHB_ESB_BASE(chip) PNV9_CHIP_BASE(chip, 0x00060302031c0000ull)
+
+#define PNV9_XSCOM_SIZE 0x0000000400000000ull
+#define PNV9_XSCOM_BASE(chip) PNV9_CHIP_BASE(chip, 0x00603fc00000000ull)
+
+#define PNV9_OCC_COMMON_AREA_SIZE 0x0000000000800000ull
+#define PNV9_OCC_COMMON_AREA_BASE 0x203fff800000ull
+#define PNV9_OCC_SENSOR_BASE(chip) (PNV9_OCC_COMMON_AREA_BASE + \
+ PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
+
+#define PNV9_HOMER_SIZE 0x0000000000400000ull
+#define PNV9_HOMER_BASE(chip) \
+ (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV9_HOMER_SIZE)
+
+/*
+ * POWER10 MMIO base addresses - 16TB stride per chip
+ */
+#define PNV10_CHIP_BASE(chip, base) \
+ ((base) + ((uint64_t) (chip)->chip_id << 44))
+
+#define PNV10_XSCOM_SIZE 0x0000000400000000ull
+#define PNV10_XSCOM_BASE(chip) PNV10_CHIP_BASE(chip, 0x00603fc00000000ull)
+
+#define PNV10_LPCM_SIZE 0x0000000100000000ull
+#define PNV10_LPCM_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030000000000ull)
+
+#define PNV10_XIVE2_IC_SIZE 0x0000000002000000ull
+#define PNV10_XIVE2_IC_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030200000000ull)
+
+#define PNV10_PSIHB_ESB_SIZE 0x0000000000100000ull
+#define PNV10_PSIHB_ESB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030202000000ull)
+
+#define PNV10_PSIHB_SIZE 0x0000000000100000ull
+#define PNV10_PSIHB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030203000000ull)
+
+#define PNV10_XIVE2_TM_SIZE 0x0000000000040000ull
+#define PNV10_XIVE2_TM_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030203180000ull)
+
+#define PNV10_XIVE2_NVC_SIZE 0x0000000008000000ull
+#define PNV10_XIVE2_NVC_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030208000000ull)
+
+#define PNV10_XIVE2_NVPG_SIZE 0x0000010000000000ull
+#define PNV10_XIVE2_NVPG_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006040000000000ull)
+
+#define PNV10_XIVE2_ESB_SIZE 0x0000010000000000ull
+#define PNV10_XIVE2_ESB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006050000000000ull)
+
+#define PNV10_XIVE2_END_SIZE 0x0000020000000000ull
+#define PNV10_XIVE2_END_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006060000000000ull)
+
+#define PNV10_OCC_COMMON_AREA_SIZE 0x0000000000800000ull
+#define PNV10_OCC_COMMON_AREA_BASE 0x300fff800000ull
+#define PNV10_OCC_SENSOR_BASE(chip) (PNV10_OCC_COMMON_AREA_BASE + \
+ PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
+
+#define PNV10_HOMER_SIZE 0x0000000000400000ull
+#define PNV10_HOMER_BASE(chip) \
+ (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV10_HOMER_SIZE)
+
+#endif /* PPC_PNV_H */
diff --git a/include/hw/ppc/pnv_chip.h b/include/hw/ppc/pnv_chip.h
new file mode 100644
index 0000000000..8589f3291e
--- /dev/null
+++ b/include/hw/ppc/pnv_chip.h
@@ -0,0 +1,162 @@
+#ifndef PPC_PNV_CHIP_H
+#define PPC_PNV_CHIP_H
+
+#include "hw/pci-host/pnv_phb4.h"
+#include "hw/ppc/pnv_chiptod.h"
+#include "hw/ppc/pnv_core.h"
+#include "hw/ppc/pnv_homer.h"
+#include "hw/ppc/pnv_n1_chiplet.h"
+#include "hw/ppc/pnv_lpc.h"
+#include "hw/ppc/pnv_occ.h"
+#include "hw/ppc/pnv_psi.h"
+#include "hw/ppc/pnv_sbe.h"
+#include "hw/ppc/pnv_xive.h"
+#include "hw/ppc/pnv_i2c.h"
+#include "hw/sysbus.h"
+
+OBJECT_DECLARE_TYPE(PnvChip, PnvChipClass,
+ PNV_CHIP)
+
+struct PnvChip {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ uint32_t chip_id;
+ uint64_t ram_start;
+ uint64_t ram_size;
+
+ uint32_t nr_cores;
+ uint32_t nr_threads;
+ uint64_t cores_mask;
+ PnvCore **cores;
+
+ uint32_t num_pecs;
+
+ MemoryRegion xscom_mmio;
+ MemoryRegion xscom;
+ AddressSpace xscom_as;
+
+ MemoryRegion *fw_mr;
+ gchar *dt_isa_nodename;
+};
+
+#define TYPE_PNV8_CHIP "pnv8-chip"
+DECLARE_INSTANCE_CHECKER(Pnv8Chip, PNV8_CHIP,
+ TYPE_PNV8_CHIP)
+
+struct Pnv8Chip {
+ /*< private >*/
+ PnvChip parent_obj;
+
+ /*< public >*/
+ MemoryRegion icp_mmio;
+
+ PnvLpcController lpc;
+ Pnv8Psi psi;
+ PnvOCC occ;
+ PnvHomer homer;
+
+#define PNV8_CHIP_PHB3_MAX 4
+ /*
+ * The array is used to allow quick access to the phbs by
+ * pnv_ics_get_child() and pnv_ics_resend_child().
+ */
+ PnvPHB *phbs[PNV8_CHIP_PHB3_MAX];
+ uint32_t num_phbs;
+
+ XICSFabric *xics;
+};
+
+#define TYPE_PNV9_CHIP "pnv9-chip"
+DECLARE_INSTANCE_CHECKER(Pnv9Chip, PNV9_CHIP,
+ TYPE_PNV9_CHIP)
+
+struct Pnv9Chip {
+ /*< private >*/
+ PnvChip parent_obj;
+
+ /*< public >*/
+ PnvXive xive;
+ Pnv9Psi psi;
+ PnvLpcController lpc;
+ PnvChipTOD chiptod;
+ PnvOCC occ;
+ PnvSBE sbe;
+ PnvHomer homer;
+
+ uint32_t nr_quads;
+ PnvQuad *quads;
+
+#define PNV9_CHIP_MAX_PEC 3
+ PnvPhb4PecState pecs[PNV9_CHIP_MAX_PEC];
+
+#define PNV9_CHIP_MAX_I2C 4
+ PnvI2C i2c[PNV9_CHIP_MAX_I2C];
+};
+
+/*
+ * A SMT8 fused core is a pair of SMT4 cores.
+ */
+#define PNV9_PIR2FUSEDCORE(pir) (((pir) >> 3) & 0xf)
+#define PNV9_PIR2CHIP(pir) (((pir) >> 8) & 0x7f)
+
+#define TYPE_PNV10_CHIP "pnv10-chip"
+DECLARE_INSTANCE_CHECKER(Pnv10Chip, PNV10_CHIP,
+ TYPE_PNV10_CHIP)
+
+struct Pnv10Chip {
+ /*< private >*/
+ PnvChip parent_obj;
+
+ /*< public >*/
+ PnvXive2 xive;
+ Pnv9Psi psi;
+ PnvLpcController lpc;
+ PnvChipTOD chiptod;
+ PnvOCC occ;
+ PnvSBE sbe;
+ PnvHomer homer;
+ PnvN1Chiplet n1_chiplet;
+
+ uint32_t nr_quads;
+ PnvQuad *quads;
+
+#define PNV10_CHIP_MAX_PEC 2
+ PnvPhb4PecState pecs[PNV10_CHIP_MAX_PEC];
+
+#define PNV10_CHIP_MAX_I2C 4
+ PnvI2C i2c[PNV10_CHIP_MAX_I2C];
+};
+
+#define PNV10_PIR2FUSEDCORE(pir) (((pir) >> 3) & 0xf)
+#define PNV10_PIR2CHIP(pir) (((pir) >> 8) & 0x7f)
+
+struct PnvChipClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+
+ /*< public >*/
+ uint64_t chip_cfam_id;
+ uint64_t cores_mask;
+ uint32_t num_pecs;
+ uint32_t num_phbs;
+
+ uint32_t i2c_num_engines;
+ const int *i2c_ports_per_engine;
+
+ DeviceRealize parent_realize;
+
+ uint32_t (*chip_pir)(PnvChip *chip, uint32_t core_id, uint32_t thread_id);
+ void (*intc_create)(PnvChip *chip, PowerPCCPU *cpu, Error **errp);
+ void (*intc_reset)(PnvChip *chip, PowerPCCPU *cpu);
+ void (*intc_destroy)(PnvChip *chip, PowerPCCPU *cpu);
+ void (*intc_print_info)(PnvChip *chip, PowerPCCPU *cpu, Monitor *mon);
+ ISABus *(*isa_create)(PnvChip *chip, Error **errp);
+ void (*dt_populate)(PnvChip *chip, void *fdt);
+ void (*pic_print_info)(PnvChip *chip, Monitor *mon);
+ uint64_t (*xscom_core_base)(PnvChip *chip, uint32_t core_id);
+ uint32_t (*xscom_pcba)(PnvChip *chip, uint64_t addr);
+};
+
+#endif
diff --git a/include/hw/ppc/pnv_chiptod.h b/include/hw/ppc/pnv_chiptod.h
new file mode 100644
index 0000000000..fde569bcbf
--- /dev/null
+++ b/include/hw/ppc/pnv_chiptod.h
@@ -0,0 +1,53 @@
+/*
+ * QEMU PowerPC PowerNV Emulation of some CHIPTOD behaviour
+ *
+ * Copyright (c) 2022-2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_CHIPTOD_H
+#define PPC_PNV_CHIPTOD_H
+
+#include "qom/object.h"
+
+#define TYPE_PNV_CHIPTOD "pnv-chiptod"
+OBJECT_DECLARE_TYPE(PnvChipTOD, PnvChipTODClass, PNV_CHIPTOD)
+#define TYPE_PNV9_CHIPTOD TYPE_PNV_CHIPTOD "-POWER9"
+DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV9_CHIPTOD, TYPE_PNV9_CHIPTOD)
+#define TYPE_PNV10_CHIPTOD TYPE_PNV_CHIPTOD "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV10_CHIPTOD, TYPE_PNV10_CHIPTOD)
+
+enum tod_state {
+ tod_error = 0,
+ tod_not_set = 7,
+ tod_running = 2,
+ tod_stopped = 1,
+};
+
+typedef struct PnvCore PnvCore;
+
+struct PnvChipTOD {
+ DeviceState xd;
+
+ PnvChip *chip;
+ MemoryRegion xscom_regs;
+
+ bool primary;
+ bool secondary;
+ enum tod_state tod_state;
+ uint64_t tod_error;
+ uint64_t pss_mss_ctrl_reg;
+ PnvCore *slave_pc_target;
+};
+
+struct PnvChipTODClass {
+ DeviceClass parent_class;
+
+ void (*broadcast_ttype)(PnvChipTOD *sender, uint32_t trigger);
+ PnvCore *(*tx_ttype_target)(PnvChipTOD *chiptod, uint64_t val);
+
+ int xscom_size;
+};
+
+#endif /* PPC_PNV_CHIPTOD_H */
diff --git a/include/hw/ppc/pnv_core.h b/include/hw/ppc/pnv_core.h
index 447ae761f7..c6d62fd145 100644
--- a/include/hw/ppc/pnv_core.h
+++ b/include/hw/ppc/pnv_core.h
@@ -5,7 +5,7 @@
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2 of
+ * as published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful, but
@@ -16,35 +16,74 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_CORE_H
-#define _PPC_PNV_CORE_H
+
+#ifndef PPC_PNV_CORE_H
+#define PPC_PNV_CORE_H
#include "hw/cpu/core.h"
+#include "target/ppc/cpu.h"
+#include "hw/ppc/pnv.h"
+#include "qom/object.h"
#define TYPE_PNV_CORE "powernv-cpu-core"
-#define PNV_CORE(obj) \
- OBJECT_CHECK(PnvCore, (obj), TYPE_PNV_CORE)
-#define PNV_CORE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvCoreClass, (klass), TYPE_PNV_CORE)
-#define PNV_CORE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvCoreClass, (obj), TYPE_PNV_CORE)
-
-typedef struct PnvCore {
+OBJECT_DECLARE_TYPE(PnvCore, PnvCoreClass,
+ PNV_CORE)
+
+struct PnvCore {
/*< private >*/
CPUCore parent_obj;
/*< public >*/
PowerPCCPU **threads;
uint32_t pir;
+ uint32_t hwid;
+ uint64_t hrmor;
+ PnvChip *chip;
MemoryRegion xscom_regs;
-} PnvCore;
+};
-typedef struct PnvCoreClass {
+struct PnvCoreClass {
DeviceClass parent_class;
-} PnvCoreClass;
+
+ const MemoryRegionOps *xscom_ops;
+ uint64_t xscom_size;
+};
#define PNV_CORE_TYPE_SUFFIX "-" TYPE_PNV_CORE
#define PNV_CORE_TYPE_NAME(cpu_model) cpu_model PNV_CORE_TYPE_SUFFIX
-#endif /* _PPC_PNV_CORE_H */
+typedef struct PnvCPUState {
+ Object *intc;
+} PnvCPUState;
+
+static inline PnvCPUState *pnv_cpu_state(PowerPCCPU *cpu)
+{
+ return (PnvCPUState *)cpu->machine_data;
+}
+
+struct PnvQuadClass {
+ DeviceClass parent_class;
+
+ const MemoryRegionOps *xscom_ops;
+ uint64_t xscom_size;
+
+ const MemoryRegionOps *xscom_qme_ops;
+ uint64_t xscom_qme_size;
+};
+
+#define TYPE_PNV_QUAD "powernv-cpu-quad"
+
+#define PNV_QUAD_TYPE_SUFFIX "-" TYPE_PNV_QUAD
+#define PNV_QUAD_TYPE_NAME(cpu_model) cpu_model PNV_QUAD_TYPE_SUFFIX
+
+OBJECT_DECLARE_TYPE(PnvQuad, PnvQuadClass, PNV_QUAD)
+
+struct PnvQuad {
+ DeviceState parent_obj;
+
+ uint32_t quad_id;
+ MemoryRegion xscom_regs;
+ MemoryRegion xscom_qme_regs;
+};
+#endif /* PPC_PNV_CORE_H */
diff --git a/include/hw/ppc/pnv_homer.h b/include/hw/ppc/pnv_homer.h
new file mode 100644
index 0000000000..b1c5d498dc
--- /dev/null
+++ b/include/hw/ppc/pnv_homer.h
@@ -0,0 +1,59 @@
+/*
+ * QEMU PowerPC PowerNV Emulation of a few HOMER related registers
+ *
+ * Copyright (c) 2019, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_PNV_HOMER_H
+#define PPC_PNV_HOMER_H
+
+#include "hw/ppc/pnv.h"
+#include "qom/object.h"
+
+#define TYPE_PNV_HOMER "pnv-homer"
+OBJECT_DECLARE_TYPE(PnvHomer, PnvHomerClass,
+ PNV_HOMER)
+#define TYPE_PNV8_HOMER TYPE_PNV_HOMER "-POWER8"
+DECLARE_INSTANCE_CHECKER(PnvHomer, PNV8_HOMER,
+ TYPE_PNV8_HOMER)
+#define TYPE_PNV9_HOMER TYPE_PNV_HOMER "-POWER9"
+DECLARE_INSTANCE_CHECKER(PnvHomer, PNV9_HOMER,
+ TYPE_PNV9_HOMER)
+#define TYPE_PNV10_HOMER TYPE_PNV_HOMER "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvHomer, PNV10_HOMER,
+ TYPE_PNV10_HOMER)
+
+struct PnvHomer {
+ DeviceState parent;
+
+ PnvChip *chip;
+ MemoryRegion pba_regs;
+ MemoryRegion regs;
+};
+
+
+struct PnvHomerClass {
+ DeviceClass parent_class;
+
+ int pba_size;
+ const MemoryRegionOps *pba_ops;
+ int homer_size;
+ const MemoryRegionOps *homer_ops;
+
+ hwaddr core_max_base;
+};
+
+#endif /* PPC_PNV_HOMER_H */
diff --git a/include/hw/ppc/pnv_i2c.h b/include/hw/ppc/pnv_i2c.h
new file mode 100644
index 0000000000..1a37730f1e
--- /dev/null
+++ b/include/hw/ppc/pnv_i2c.h
@@ -0,0 +1,38 @@
+/*
+ * QEMU PowerPC PowerNV Processor I2C model
+ *
+ * Copyright (c) 2019-2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_I2C_H
+#define PPC_PNV_I2C_H
+
+#include "hw/ppc/pnv.h"
+#include "hw/i2c/i2c.h"
+#include "qemu/fifo8.h"
+
+#define TYPE_PNV_I2C "pnv-i2c"
+#define PNV_I2C(obj) OBJECT_CHECK(PnvI2C, (obj), TYPE_PNV_I2C)
+
+#define PNV_I2C_REGS 0x20
+
+typedef struct PnvI2C {
+ DeviceState parent;
+
+ struct PnvChip *chip;
+
+ qemu_irq psi_irq;
+
+ uint64_t regs[PNV_I2C_REGS];
+ uint32_t engine;
+ uint32_t num_busses;
+ I2CBus **busses;
+
+ MemoryRegion xscom_regs;
+
+ Fifo8 fifo;
+} PnvI2C;
+
+#endif /* PPC_PNV_I2C_H */
diff --git a/include/hw/ppc/pnv_lpc.h b/include/hw/ppc/pnv_lpc.h
index d657489b07..5d22c45570 100644
--- a/include/hw/ppc/pnv_lpc.h
+++ b/include/hw/ppc/pnv_lpc.h
@@ -1,12 +1,12 @@
/*
* QEMU PowerPC PowerNV LPC controller
*
- * Copyright (c) 2016, IBM Corporation.
+ * Copyright (c) 2016-2022, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -16,16 +16,32 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_LPC_H
-#define _PPC_PNV_LPC_H
-#include "hw/ppc/pnv_psi.h"
+#ifndef PPC_PNV_LPC_H
+#define PPC_PNV_LPC_H
+
+#include "exec/memory.h"
+#include "hw/ppc/pnv.h"
+#include "hw/qdev-core.h"
#define TYPE_PNV_LPC "pnv-lpc"
-#define PNV_LPC(obj) \
- OBJECT_CHECK(PnvLpcController, (obj), TYPE_PNV_LPC)
+typedef struct PnvLpcClass PnvLpcClass;
+typedef struct PnvLpcController PnvLpcController;
+DECLARE_OBJ_CHECKERS(PnvLpcController, PnvLpcClass,
+ PNV_LPC, TYPE_PNV_LPC)
+#define TYPE_PNV8_LPC TYPE_PNV_LPC "-POWER8"
+DECLARE_INSTANCE_CHECKER(PnvLpcController, PNV8_LPC,
+ TYPE_PNV8_LPC)
+
+#define TYPE_PNV9_LPC TYPE_PNV_LPC "-POWER9"
+DECLARE_INSTANCE_CHECKER(PnvLpcController, PNV9_LPC,
+ TYPE_PNV9_LPC)
+
+#define TYPE_PNV10_LPC TYPE_PNV_LPC "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvLpcController, PNV10_LPC,
+ TYPE_PNV10_LPC)
-typedef struct PnvLpcController {
+struct PnvLpcController {
DeviceState parent;
uint64_t eccb_stat_reg;
@@ -50,6 +66,8 @@ typedef struct PnvLpcController {
MemoryRegion opb_master_regs;
/* OPB Master LS registers */
+ uint32_t opb_irq_route0;
+ uint32_t opb_irq_route1;
uint32_t opb_irq_stat;
uint32_t opb_irq_mask;
uint32_t opb_irq_pol;
@@ -67,9 +85,17 @@ typedef struct PnvLpcController {
MemoryRegion xscom_regs;
/* PSI to generate interrupts */
- PnvPsi *psi;
-} PnvLpcController;
+ qemu_irq psi_irq;
+};
+
+struct PnvLpcClass {
+ DeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+};
ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp);
+int pnv_dt_lpc(PnvChip *chip, void *fdt, int root_offset,
+ uint64_t lpcm_addr, uint64_t lpcm_size);
-#endif /* _PPC_PNV_LPC_H */
+#endif /* PPC_PNV_LPC_H */
diff --git a/include/hw/ppc/pnv_n1_chiplet.h b/include/hw/ppc/pnv_n1_chiplet.h
new file mode 100644
index 0000000000..a7ad039668
--- /dev/null
+++ b/include/hw/ppc/pnv_n1_chiplet.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU PowerPC N1 chiplet model
+ *
+ * Copyright (c) 2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_N1_CHIPLET_H
+#define PPC_PNV_N1_CHIPLET_H
+
+#include "hw/ppc/pnv_nest_pervasive.h"
+
+#define TYPE_PNV_N1_CHIPLET "pnv-N1-chiplet"
+#define PNV_N1_CHIPLET(obj) OBJECT_CHECK(PnvN1Chiplet, (obj), TYPE_PNV_N1_CHIPLET)
+
+typedef struct PnvPbScom {
+ uint64_t mode;
+ uint64_t hp_mode2_curr;
+} PnvPbScom;
+
+typedef struct PnvN1Chiplet {
+ DeviceState parent;
+ MemoryRegion xscom_pb_eq_mr;
+ MemoryRegion xscom_pb_es_mr;
+ PnvNestChipletPervasive nest_pervasive; /* common pervasive chiplet unit */
+#define PNV_PB_SCOM_EQ_SIZE 8
+ PnvPbScom eq[PNV_PB_SCOM_EQ_SIZE];
+#define PNV_PB_SCOM_ES_SIZE 4
+ PnvPbScom es[PNV_PB_SCOM_ES_SIZE];
+} PnvN1Chiplet;
+#endif /*PPC_PNV_N1_CHIPLET_H */
diff --git a/include/hw/ppc/pnv_nest_pervasive.h b/include/hw/ppc/pnv_nest_pervasive.h
new file mode 100644
index 0000000000..73cacf3823
--- /dev/null
+++ b/include/hw/ppc/pnv_nest_pervasive.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU PowerPC nest pervasive common chiplet model
+ *
+ * Copyright (c) 2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_NEST_CHIPLET_PERVASIVE_H
+#define PPC_PNV_NEST_CHIPLET_PERVASIVE_H
+
+#define TYPE_PNV_NEST_CHIPLET_PERVASIVE "pnv-nest-chiplet-pervasive"
+#define PNV_NEST_CHIPLET_PERVASIVE(obj) OBJECT_CHECK(PnvNestChipletPervasive, (obj), TYPE_PNV_NEST_CHIPLET_PERVASIVE)
+
+typedef struct PnvPervasiveCtrlRegs {
+#define PNV_CPLT_CTRL_SIZE 6
+ uint64_t cplt_ctrl[PNV_CPLT_CTRL_SIZE];
+ uint64_t cplt_cfg0;
+ uint64_t cplt_cfg1;
+ uint64_t cplt_stat0;
+ uint64_t cplt_mask0;
+ uint64_t ctrl_protect_mode;
+ uint64_t ctrl_atomic_lock;
+} PnvPervasiveCtrlRegs;
+
+typedef struct PnvNestChipletPervasive {
+ DeviceState parent;
+ MemoryRegion xscom_ctrl_regs_mr;
+ PnvPervasiveCtrlRegs control_regs;
+} PnvNestChipletPervasive;
+
+#endif /*PPC_PNV_NEST_CHIPLET_PERVASIVE_H */
diff --git a/include/hw/ppc/pnv_occ.h b/include/hw/ppc/pnv_occ.h
index 82f299dc76..df321244e3 100644
--- a/include/hw/ppc/pnv_occ.h
+++ b/include/hw/ppc/pnv_occ.h
@@ -1,12 +1,12 @@
/*
* QEMU PowerPC PowerNV Emulation of a few OCC related registers
*
- * Copyright (c) 2015-2017, IBM Corporation.
+ * Copyright (c) 2015-2022, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -16,23 +16,48 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_OCC_H
-#define _PPC_PNV_OCC_H
-#include "hw/ppc/pnv_psi.h"
+#ifndef PPC_PNV_OCC_H
+#define PPC_PNV_OCC_H
+
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
#define TYPE_PNV_OCC "pnv-occ"
-#define PNV_OCC(obj) OBJECT_CHECK(PnvOCC, (obj), TYPE_PNV_OCC)
+OBJECT_DECLARE_TYPE(PnvOCC, PnvOCCClass,
+ PNV_OCC)
+#define TYPE_PNV8_OCC TYPE_PNV_OCC "-POWER8"
+DECLARE_INSTANCE_CHECKER(PnvOCC, PNV8_OCC,
+ TYPE_PNV8_OCC)
+#define TYPE_PNV9_OCC TYPE_PNV_OCC "-POWER9"
+DECLARE_INSTANCE_CHECKER(PnvOCC, PNV9_OCC,
+ TYPE_PNV9_OCC)
+#define TYPE_PNV10_OCC TYPE_PNV_OCC "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvOCC, PNV10_OCC, TYPE_PNV10_OCC)
+
+#define PNV_OCC_SENSOR_DATA_BLOCK_OFFSET 0x00580000
+#define PNV_OCC_SENSOR_DATA_BLOCK_SIZE 0x00025800
-typedef struct PnvOCC {
+struct PnvOCC {
DeviceState xd;
/* OCC Misc interrupt */
uint64_t occmisc;
- PnvPsi *psi;
+ qemu_irq psi_irq;
MemoryRegion xscom_regs;
-} PnvOCC;
+ MemoryRegion sram_regs;
+};
+
+struct PnvOCCClass {
+ DeviceClass parent_class;
+
+ int xscom_size;
+ const MemoryRegionOps *xscom_ops;
+};
+
+#define PNV_OCC_SENSOR_DATA_BLOCK_BASE(i) \
+ (PNV_OCC_SENSOR_DATA_BLOCK_OFFSET + (i) * PNV_OCC_SENSOR_DATA_BLOCK_SIZE)
-#endif /* _PPC_PNV_OCC_H */
+#endif /* PPC_PNV_OCC_H */
diff --git a/include/hw/ppc/pnv_pnor.h b/include/hw/ppc/pnv_pnor.h
new file mode 100644
index 0000000000..2e37ac88bf
--- /dev/null
+++ b/include/hw/ppc/pnv_pnor.h
@@ -0,0 +1,33 @@
+/*
+ * QEMU PowerNV PNOR simple model
+ *
+ * Copyright (c) 2019, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PPC_PNV_PNOR_H
+#define PPC_PNV_PNOR_H
+
+#include "hw/sysbus.h"
+
+/*
+ * PNOR offset on the LPC FW address space
+ */
+#define PNOR_SPI_OFFSET 0x0c000000UL
+
+#define TYPE_PNV_PNOR "pnv-pnor"
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPnor, PNV_PNOR)
+
+struct PnvPnor {
+ SysBusDevice parent_obj;
+
+ BlockBackend *blk;
+
+ uint8_t *storage;
+ int64_t size;
+ MemoryRegion mmio;
+};
+
+#endif /* PPC_PNV_PNOR_H */
diff --git a/include/hw/ppc/pnv_psi.h b/include/hw/ppc/pnv_psi.h
index 64ac73512e..2a6f715350 100644
--- a/include/hw/ppc/pnv_psi.h
+++ b/include/hw/ppc/pnv_psi.h
@@ -1,12 +1,12 @@
/*
* QEMU PowerPC PowerNV Processor Service Interface (PSI) model
*
- * Copyright (c) 2015-2017, IBM Corporation.
+ * Copyright (c) 2015-2022, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -16,20 +16,23 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_PSI_H
-#define _PPC_PNV_PSI_H
+
+#ifndef PPC_PNV_PSI_H
+#define PPC_PNV_PSI_H
#include "hw/sysbus.h"
#include "hw/ppc/xics.h"
+#include "hw/ppc/xive.h"
+#include "hw/qdev-core.h"
#define TYPE_PNV_PSI "pnv-psi"
-#define PNV_PSI(obj) \
- OBJECT_CHECK(PnvPsi, (obj), TYPE_PNV_PSI)
+OBJECT_DECLARE_TYPE(PnvPsi, PnvPsiClass,
+ PNV_PSI)
#define PSIHB_XSCOM_MAX 0x20
-typedef struct PnvPsi {
- SysBusDevice parent;
+struct PnvPsi {
+ DeviceState parent;
MemoryRegion regs_mr;
uint64_t bar;
@@ -39,18 +42,47 @@ typedef struct PnvPsi {
uint64_t fsp_bar;
/* Interrupt generation */
- ICSState ics;
qemu_irq *qirqs;
/* Registers */
uint64_t regs[PSIHB_XSCOM_MAX];
MemoryRegion xscom_regs;
-} PnvPsi;
+};
+
+#define TYPE_PNV8_PSI TYPE_PNV_PSI "-POWER8"
+OBJECT_DECLARE_SIMPLE_TYPE(Pnv8Psi, PNV8_PSI)
+
+struct Pnv8Psi {
+ PnvPsi parent;
+
+ ICSState ics;
+};
+
+#define TYPE_PNV9_PSI TYPE_PNV_PSI "-POWER9"
+OBJECT_DECLARE_SIMPLE_TYPE(Pnv9Psi, PNV9_PSI)
+
+struct Pnv9Psi {
+ PnvPsi parent;
+
+ XiveSource source;
+};
+
+#define TYPE_PNV10_PSI TYPE_PNV_PSI "-POWER10"
+
+
+struct PnvPsiClass {
+ SysBusDeviceClass parent_class;
+
+ uint32_t xscom_pcba;
+ uint32_t xscom_size;
+ uint64_t bar_mask;
+ const char *compat;
+ int compat_size;
+};
/* The PSI and FSP interrupts are muxed on the same IRQ number */
typedef enum PnvPsiIrq {
- PSIHB_IRQ_PSI, /* internal use only */
PSIHB_IRQ_FSP, /* internal use only */
PSIHB_IRQ_OCC,
PSIHB_IRQ_FSI,
@@ -61,6 +93,23 @@ typedef enum PnvPsiIrq {
#define PSI_NUM_INTERRUPTS 6
-extern void pnv_psi_irq_set(PnvPsi *psi, PnvPsiIrq irq, bool state);
+/* P9 PSI Interrupts */
+#define PSIHB9_IRQ_PSI 0
+#define PSIHB9_IRQ_OCC 1
+#define PSIHB9_IRQ_FSI 2
+#define PSIHB9_IRQ_LPCHC 3
+#define PSIHB9_IRQ_LOCAL_ERR 4
+#define PSIHB9_IRQ_GLOBAL_ERR 5
+#define PSIHB9_IRQ_TPM 6
+#define PSIHB9_IRQ_LPC_SIRQ0 7
+#define PSIHB9_IRQ_LPC_SIRQ1 8
+#define PSIHB9_IRQ_LPC_SIRQ2 9
+#define PSIHB9_IRQ_LPC_SIRQ3 10
+#define PSIHB9_IRQ_SBE_I2C 11
+#define PSIHB9_IRQ_DIO 12
+#define PSIHB9_IRQ_PSU 13
+#define PSIHB9_NUM_IRQS 14
+
+void pnv_psi_pic_print_info(Pnv9Psi *psi, Monitor *mon);
-#endif /* _PPC_PNV_PSI_H */
+#endif /* PPC_PNV_PSI_H */
diff --git a/include/hw/ppc/pnv_sbe.h b/include/hw/ppc/pnv_sbe.h
new file mode 100644
index 0000000000..b6b378ad14
--- /dev/null
+++ b/include/hw/ppc/pnv_sbe.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU PowerPC PowerNV Emulation of some SBE behaviour
+ *
+ * Copyright (c) 2022, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_PNV_SBE_H
+#define PPC_PNV_SBE_H
+
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_PNV_SBE "pnv-sbe"
+OBJECT_DECLARE_TYPE(PnvSBE, PnvSBEClass, PNV_SBE)
+#define TYPE_PNV9_SBE TYPE_PNV_SBE "-POWER9"
+DECLARE_INSTANCE_CHECKER(PnvSBE, PNV9_SBE, TYPE_PNV9_SBE)
+#define TYPE_PNV10_SBE TYPE_PNV_SBE "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvSBE, PNV10_SBE, TYPE_PNV10_SBE)
+
+struct PnvSBE {
+ DeviceState xd;
+
+ uint64_t mbox[8];
+ uint64_t sbe_doorbell;
+ uint64_t host_doorbell;
+
+ qemu_irq psi_irq;
+ QEMUTimer *timer;
+
+ MemoryRegion xscom_mbox_regs;
+ MemoryRegion xscom_ctrl_regs;
+};
+
+struct PnvSBEClass {
+ DeviceClass parent_class;
+
+ int xscom_ctrl_size;
+ int xscom_mbox_size;
+ const MemoryRegionOps *xscom_ctrl_ops;
+ const MemoryRegionOps *xscom_mbox_ops;
+};
+
+#endif /* PPC_PNV_SBE_H */
diff --git a/include/hw/ppc/pnv_xive.h b/include/hw/ppc/pnv_xive.h
new file mode 100644
index 0000000000..9c48430ee4
--- /dev/null
+++ b/include/hw/ppc/pnv_xive.h
@@ -0,0 +1,168 @@
+/*
+ * QEMU PowerPC XIVE interrupt controller model
+ *
+ * Copyright (c) 2017-2019, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PPC_PNV_XIVE_H
+#define PPC_PNV_XIVE_H
+
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/xive.h"
+#include "qom/object.h"
+#include "hw/ppc/xive2.h"
+
+#define TYPE_PNV_XIVE "pnv-xive"
+OBJECT_DECLARE_TYPE(PnvXive, PnvXiveClass,
+ PNV_XIVE)
+
+#define XIVE_BLOCK_MAX 16
+
+#define XIVE_TABLE_BLK_MAX 16 /* Block Scope Table (0-15) */
+#define XIVE_TABLE_MIG_MAX 16 /* Migration Register Table (1-15) */
+#define XIVE_TABLE_VDT_MAX 16 /* VDT Domain Table (0-15) */
+#define XIVE_TABLE_EDT_MAX 64 /* EDT Domain Table (0-63) */
+
+struct PnvXive {
+ XiveRouter parent_obj;
+
+ /* Owning chip */
+ PnvChip *chip;
+
+ /* XSCOM addresses giving access to the controller registers */
+ MemoryRegion xscom_regs;
+
+ /* Main MMIO regions that can be configured by FW */
+ MemoryRegion ic_mmio;
+ MemoryRegion ic_reg_mmio;
+ MemoryRegion ic_notify_mmio;
+ MemoryRegion ic_lsi_mmio;
+ MemoryRegion tm_indirect_mmio;
+ MemoryRegion vc_mmio;
+ MemoryRegion pc_mmio;
+ MemoryRegion tm_mmio;
+
+ /*
+ * IPI and END address spaces modeling the EDT segmentation in the
+ * VC region
+ */
+ AddressSpace ipi_as;
+ MemoryRegion ipi_mmio;
+ MemoryRegion ipi_edt_mmio;
+
+ AddressSpace end_as;
+ MemoryRegion end_mmio;
+ MemoryRegion end_edt_mmio;
+
+ /* Shortcut values for the Main MMIO regions */
+ hwaddr ic_base;
+ uint32_t ic_shift;
+ hwaddr vc_base;
+ uint32_t vc_shift;
+ hwaddr pc_base;
+ uint32_t pc_shift;
+ hwaddr tm_base;
+ uint32_t tm_shift;
+
+ /* Our XIVE source objects for IPIs and ENDs */
+ XiveSource ipi_source;
+ XiveENDSource end_source;
+
+ /* Interrupt controller registers */
+ uint64_t regs[0x300];
+
+ /*
+ * Virtual Structure Descriptor tables : EAT, SBE, ENDT, NVTT, IRQ
+ * These are in a SRAM protected by ECC.
+ */
+ uint64_t vsds[5][XIVE_BLOCK_MAX];
+
+ /* Translation tables */
+ uint64_t blk[XIVE_TABLE_BLK_MAX];
+ uint64_t mig[XIVE_TABLE_MIG_MAX];
+ uint64_t vdt[XIVE_TABLE_VDT_MAX];
+ uint64_t edt[XIVE_TABLE_EDT_MAX];
+};
+
+struct PnvXiveClass {
+ XiveRouterClass parent_class;
+
+ DeviceRealize parent_realize;
+};
+
+void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon);
+
+/*
+ * XIVE2 interrupt controller (POWER10)
+ */
+#define TYPE_PNV_XIVE2 "pnv-xive2"
+OBJECT_DECLARE_TYPE(PnvXive2, PnvXive2Class, PNV_XIVE2);
+
+typedef struct PnvXive2 {
+ Xive2Router parent_obj;
+
+ /* Owning chip */
+ PnvChip *chip;
+
+ /* XSCOM addresses giving access to the controller registers */
+ MemoryRegion xscom_regs;
+
+ MemoryRegion ic_mmio;
+ MemoryRegion ic_mmios[8];
+ MemoryRegion esb_mmio;
+ MemoryRegion end_mmio;
+ MemoryRegion nvc_mmio;
+ MemoryRegion nvpg_mmio;
+ MemoryRegion tm_mmio;
+
+ /* Shortcut values for the Main MMIO regions */
+ hwaddr ic_base;
+ uint32_t ic_shift;
+ hwaddr esb_base;
+ uint32_t esb_shift;
+ hwaddr end_base;
+ uint32_t end_shift;
+ hwaddr nvc_base;
+ uint32_t nvc_shift;
+ hwaddr nvpg_base;
+ uint32_t nvpg_shift;
+ hwaddr tm_base;
+ uint32_t tm_shift;
+
+ /* Interrupt controller registers */
+ uint64_t cq_regs[0x40];
+ uint64_t vc_regs[0x100];
+ uint64_t pc_regs[0x100];
+ uint64_t tctxt_regs[0x30];
+
+ /* To change default behavior */
+ uint64_t capabilities;
+ uint64_t config;
+
+ /* Our XIVE source objects for IPIs and ENDs */
+ XiveSource ipi_source;
+ Xive2EndSource end_source;
+
+ /*
+ * Virtual Structure Descriptor tables
+ * These are in a SRAM protected by ECC.
+ */
+ uint64_t vsds[9][XIVE_BLOCK_MAX];
+
+ /* Translation tables */
+ uint64_t tables[8][XIVE_BLOCK_MAX];
+
+} PnvXive2;
+
+typedef struct PnvXive2Class {
+ Xive2RouterClass parent_class;
+
+ DeviceRealize parent_realize;
+} PnvXive2Class;
+
+void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon);
+
+#endif /* PPC_PNV_XIVE_H */
diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h
index 255b26a5aa..6209e18492 100644
--- a/include/hw/ppc/pnv_xscom.h
+++ b/include/hw/ppc/pnv_xscom.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -16,28 +16,26 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _PPC_PNV_XSCOM_H
-#define _PPC_PNV_XSCOM_H
-#include "qom/object.h"
+#ifndef PPC_PNV_XSCOM_H
+#define PPC_PNV_XSCOM_H
-typedef struct PnvXScomInterface {
- Object parent;
-} PnvXScomInterface;
+#include "exec/memory.h"
+#include "hw/ppc/pnv.h"
+
+typedef struct PnvXScomInterface PnvXScomInterface;
#define TYPE_PNV_XSCOM_INTERFACE "pnv-xscom-interface"
#define PNV_XSCOM_INTERFACE(obj) \
- OBJECT_CHECK(PnvXScomInterface, (obj), TYPE_PNV_XSCOM_INTERFACE)
-#define PNV_XSCOM_INTERFACE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvXScomInterfaceClass, (klass), \
+ INTERFACE_CHECK(PnvXScomInterface, (obj), TYPE_PNV_XSCOM_INTERFACE)
+typedef struct PnvXScomInterfaceClass PnvXScomInterfaceClass;
+DECLARE_CLASS_CHECKERS(PnvXScomInterfaceClass, PNV_XSCOM_INTERFACE,
TYPE_PNV_XSCOM_INTERFACE)
-#define PNV_XSCOM_INTERFACE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvXScomInterfaceClass, (obj), TYPE_PNV_XSCOM_INTERFACE)
-typedef struct PnvXScomInterfaceClass {
+struct PnvXScomInterfaceClass {
InterfaceClass parent;
int (*dt_xscom)(PnvXScomInterface *dev, void *fdt, int offset);
-} PnvXScomInterfaceClass;
+};
/*
* Layout of the XSCOM PCB addresses of EX core 1 (POWER 8)
@@ -60,29 +58,154 @@ typedef struct PnvXScomInterfaceClass {
(PNV_XSCOM_EX_CORE_BASE | ((uint64_t)(core) << 24))
#define PNV_XSCOM_EX_SIZE 0x100000
-#define PNV_XSCOM_P9_EC_BASE(core) \
- ((uint64_t)(((core) & 0x1F) + 0x20) << 24)
-#define PNV_XSCOM_P9_EC_SIZE 0x100000
-
#define PNV_XSCOM_LPC_BASE 0xb0020
#define PNV_XSCOM_LPC_SIZE 0x4
#define PNV_XSCOM_PSIHB_BASE 0x2010900
#define PNV_XSCOM_PSIHB_SIZE 0x20
+#define PNV_XSCOM_CHIPTOD_BASE 0x0040000
+#define PNV_XSCOM_CHIPTOD_SIZE 0x31
+
#define PNV_XSCOM_OCC_BASE 0x0066000
#define PNV_XSCOM_OCC_SIZE 0x6000
-extern void pnv_xscom_realize(PnvChip *chip, Error **errp);
-extern int pnv_dt_xscom(PnvChip *chip, void *fdt, int offset);
+#define PNV_XSCOM_PBA_BASE 0x2013f00
+#define PNV_XSCOM_PBA_SIZE 0x40
+
+#define PNV_XSCOM_PBCQ_NEST_BASE 0x2012000
+#define PNV_XSCOM_PBCQ_NEST_SIZE 0x46
+
+#define PNV_XSCOM_PBCQ_PCI_BASE 0x9012000
+#define PNV_XSCOM_PBCQ_PCI_SIZE 0x15
+
+#define PNV_XSCOM_PBCQ_SPCI_BASE 0x9013c00
+#define PNV_XSCOM_PBCQ_SPCI_SIZE 0x5
+
+/*
+ * Layout of the XSCOM PCB addresses (POWER 9)
+ */
+#define PNV9_XSCOM_EC_BASE(core) \
+ ((uint64_t)(((core) & 0x1F) + 0x20) << 24)
+#define PNV9_XSCOM_EC_SIZE 0x100000
+
+#define PNV9_XSCOM_EQ_BASE(core) \
+ ((uint64_t)(((core) & 0x1C) + 0x40) << 22)
+#define PNV9_XSCOM_EQ_SIZE 0x100000
+
+#define PNV9_XSCOM_I2CM_BASE 0xa0000
+#define PNV9_XSCOM_I2CM_SIZE 0x1000
+
+#define PNV9_XSCOM_CHIPTOD_BASE PNV_XSCOM_CHIPTOD_BASE
+#define PNV9_XSCOM_CHIPTOD_SIZE PNV_XSCOM_CHIPTOD_SIZE
+
+#define PNV9_XSCOM_OCC_BASE PNV_XSCOM_OCC_BASE
+#define PNV9_XSCOM_OCC_SIZE 0x8000
+
+#define PNV9_XSCOM_SBE_CTRL_BASE 0x00050008
+#define PNV9_XSCOM_SBE_CTRL_SIZE 0x1
+
+#define PNV9_XSCOM_SBE_MBOX_BASE 0x000D0050
+#define PNV9_XSCOM_SBE_MBOX_SIZE 0x16
+
+#define PNV9_XSCOM_PBA_BASE 0x5012b00
+#define PNV9_XSCOM_PBA_SIZE 0x40
+
+#define PNV9_XSCOM_PSIHB_BASE 0x5012900
+#define PNV9_XSCOM_PSIHB_SIZE 0x100
+
+#define PNV9_XSCOM_XIVE_BASE 0x5013000
+#define PNV9_XSCOM_XIVE_SIZE 0x300
+
+#define PNV9_XSCOM_PEC_NEST_BASE 0x4010c00
+#define PNV9_XSCOM_PEC_NEST_SIZE 0x100
+
+#define PNV9_XSCOM_PEC_PCI_BASE 0xd010800
+#define PNV9_XSCOM_PEC_PCI_SIZE 0x200
+
+/* XSCOM PCI "pass-through" window to PHB SCOM */
+#define PNV9_XSCOM_PEC_PCI_STK0 0x100
+#define PNV9_XSCOM_PEC_PCI_STK1 0x140
+#define PNV9_XSCOM_PEC_PCI_STK2 0x180
+
+/*
+ * Layout of the XSCOM PCB addresses (POWER 10)
+ */
+#define PNV10_XSCOM_EQ_CHIPLET(core) (0x20 + ((core) >> 2))
+#define PNV10_XSCOM_EQ(chiplet) ((chiplet) << 24)
+#define PNV10_XSCOM_EC(proc) \
+ ((0x2 << 16) | ((1 << (3 - (proc))) << 12))
+
+#define PNV10_XSCOM_QME(chiplet) \
+ (PNV10_XSCOM_EQ(chiplet) | (0xE << 16))
+
+/*
+ * Make the region larger by 0x1000 (instead of starting at an offset) so the
+ * modelled addresses start from 0
+ */
+#define PNV10_XSCOM_QME_BASE(core) \
+ ((uint64_t) PNV10_XSCOM_QME(PNV10_XSCOM_EQ_CHIPLET(core)))
+#define PNV10_XSCOM_QME_SIZE (0x8000 + 0x1000)
+
+#define PNV10_XSCOM_EQ_BASE(core) \
+ ((uint64_t) PNV10_XSCOM_EQ(PNV10_XSCOM_EQ_CHIPLET(core)))
+#define PNV10_XSCOM_EQ_SIZE 0x20000
+
+#define PNV10_XSCOM_EC_BASE(core) \
+ ((uint64_t) PNV10_XSCOM_EQ_BASE(core) | PNV10_XSCOM_EC(core & 0x3))
+#define PNV10_XSCOM_EC_SIZE 0x1000
+
+#define PNV10_XSCOM_PSIHB_BASE 0x3011D00
+#define PNV10_XSCOM_PSIHB_SIZE 0x100
+
+#define PNV10_XSCOM_I2CM_BASE PNV9_XSCOM_I2CM_BASE
+#define PNV10_XSCOM_I2CM_SIZE PNV9_XSCOM_I2CM_SIZE
+
+#define PNV10_XSCOM_CHIPTOD_BASE PNV9_XSCOM_CHIPTOD_BASE
+#define PNV10_XSCOM_CHIPTOD_SIZE PNV9_XSCOM_CHIPTOD_SIZE
+
+#define PNV10_XSCOM_OCC_BASE PNV9_XSCOM_OCC_BASE
+#define PNV10_XSCOM_OCC_SIZE PNV9_XSCOM_OCC_SIZE
+
+#define PNV10_XSCOM_SBE_CTRL_BASE PNV9_XSCOM_SBE_CTRL_BASE
+#define PNV10_XSCOM_SBE_CTRL_SIZE PNV9_XSCOM_SBE_CTRL_SIZE
+
+#define PNV10_XSCOM_SBE_MBOX_BASE PNV9_XSCOM_SBE_MBOX_BASE
+#define PNV10_XSCOM_SBE_MBOX_SIZE PNV9_XSCOM_SBE_MBOX_SIZE
+
+#define PNV10_XSCOM_PBA_BASE 0x01010CDA
+#define PNV10_XSCOM_PBA_SIZE 0x40
+
+#define PNV10_XSCOM_XIVE2_BASE 0x2010800
+#define PNV10_XSCOM_XIVE2_SIZE 0x400
+
+#define PNV10_XSCOM_N1_CHIPLET_CTRL_REGS_BASE 0x3000000
+#define PNV10_XSCOM_CHIPLET_CTRL_REGS_SIZE 0x400
+
+#define PNV10_XSCOM_N1_PB_SCOM_EQ_BASE 0x3011000
+#define PNV10_XSCOM_N1_PB_SCOM_EQ_SIZE 0x200
+
+#define PNV10_XSCOM_N1_PB_SCOM_ES_BASE 0x3011300
+#define PNV10_XSCOM_N1_PB_SCOM_ES_SIZE 0x100
+
+#define PNV10_XSCOM_PEC_NEST_BASE 0x3011800 /* index goes downwards ... */
+#define PNV10_XSCOM_PEC_NEST_SIZE 0x100
+
+#define PNV10_XSCOM_PEC_PCI_BASE 0x8010800 /* index goes upwards ... */
+#define PNV10_XSCOM_PEC_PCI_SIZE 0x200
+
+void pnv_xscom_init(PnvChip *chip, uint64_t size, hwaddr addr);
+int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset,
+ uint64_t xscom_base, uint64_t xscom_size,
+ const char *compat, int compat_size);
-extern void pnv_xscom_add_subregion(PnvChip *chip, hwaddr offset,
- MemoryRegion *mr);
-extern void pnv_xscom_region_init(MemoryRegion *mr,
- struct Object *owner,
- const MemoryRegionOps *ops,
- void *opaque,
- const char *name,
- uint64_t size);
+void pnv_xscom_add_subregion(PnvChip *chip, hwaddr offset,
+ MemoryRegion *mr);
+void pnv_xscom_region_init(MemoryRegion *mr,
+ Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size);
-#endif /* _PPC_PNV_XSCOM_H */
+#endif /* PPC_PNV_XSCOM_H */
diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h
index 298ec354a8..d5d119ea7f 100644
--- a/include/hw/ppc/ppc.h
+++ b/include/hw/ppc/ppc.h
@@ -1,9 +1,12 @@
#ifndef HW_PPC_H
#define HW_PPC_H
-#include "target/ppc/cpu-qom.h"
+#include "target/ppc/cpu.h"
void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level);
+PowerPCCPU *ppc_get_vcpu_by_pir(int pir);
+int ppc_cpu_pir(PowerPCCPU *cpu);
+int ppc_cpu_tir(PowerPCCPU *cpu);
/* PowerPC hardware exceptions management helpers */
typedef void (*clk_setup_cb)(void *opaque, uint32_t freq);
@@ -22,6 +25,7 @@ struct ppc_tb_t {
/* Time base management */
int64_t tb_offset; /* Compensation */
int64_t atb_offset; /* Compensation */
+ int64_t vtb_offset;
uint32_t tb_freq; /* TB frequency */
/* Decrementer management */
uint64_t decr_next; /* Tick for next decr interrupt */
@@ -30,8 +34,7 @@ struct ppc_tb_t {
/* Hypervisor decrementer management */
uint64_t hdecr_next; /* Tick for next hdecr interrupt */
QEMUTimer *hdecr_timer;
- uint64_t purr_load;
- uint64_t purr_start;
+ int64_t purr_offset;
void *opaque;
uint32_t flags;
};
@@ -51,7 +54,12 @@ struct ppc_tb_t {
*/
uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset);
-clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq);
+void cpu_ppc_tb_init(CPUPPCState *env, uint32_t freq);
+void cpu_ppc_tb_reset(CPUPPCState *env);
+void cpu_ppc_tb_free(CPUPPCState *env);
+void cpu_ppc_hdecr_init(CPUPPCState *env);
+void cpu_ppc_hdecr_exit(CPUPPCState *env);
+
/* Embedded PowerPC DCR management */
typedef uint32_t (*dcr_read_cb)(void *opaque, int dcrn);
typedef void (*dcr_write_cb)(void *opaque, int dcrn, uint32_t val);
@@ -66,20 +74,23 @@ clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
void ppc40x_core_reset(PowerPCCPU *cpu);
void ppc40x_chip_reset(PowerPCCPU *cpu);
void ppc40x_system_reset(PowerPCCPU *cpu);
-void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val);
#if defined(CONFIG_USER_ONLY)
static inline void ppc40x_irq_init(PowerPCCPU *cpu) {}
static inline void ppc6xx_irq_init(PowerPCCPU *cpu) {}
static inline void ppc970_irq_init(PowerPCCPU *cpu) {}
static inline void ppcPOWER7_irq_init(PowerPCCPU *cpu) {}
+static inline void ppcPOWER9_irq_init(PowerPCCPU *cpu) {}
static inline void ppce500_irq_init(PowerPCCPU *cpu) {}
+static inline void ppc_irq_reset(PowerPCCPU *cpu) {}
#else
void ppc40x_irq_init(PowerPCCPU *cpu);
void ppce500_irq_init(PowerPCCPU *cpu);
void ppc6xx_irq_init(PowerPCCPU *cpu);
void ppc970_irq_init(PowerPCCPU *cpu);
void ppcPOWER7_irq_init(PowerPCCPU *cpu);
+void ppcPOWER9_irq_init(PowerPCCPU *cpu);
+void ppc_irq_reset(PowerPCCPU *cpu);
#endif
/* PPC machines for OpenBIOS */
@@ -90,11 +101,11 @@ enum {
ARCH_MAC99_U3,
};
-#define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00)
-#define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01)
-#define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02)
-#define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03)
-#define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04)
+#define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00)
+#define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01)
+#define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02)
+#define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03)
+#define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04)
#define FW_CFG_PPC_IS_KVM (FW_CFG_ARCH_LOCAL + 0x05)
#define FW_CFG_PPC_KVM_HC (FW_CFG_ARCH_LOCAL + 0x06)
#define FW_CFG_PPC_KVM_PID (FW_CFG_ARCH_LOCAL + 0x07)
diff --git a/include/hw/ppc/ppc4xx.h b/include/hw/ppc/ppc4xx.h
index 3a2a04c8ce..1bd9b8821b 100644
--- a/include/hw/ppc/ppc4xx.h
+++ b/include/hw/ppc/ppc4xx.h
@@ -25,35 +25,129 @@
#ifndef PPC4XX_H
#define PPC4XX_H
-/* PowerPC 4xx core initialization */
-PowerPCCPU *ppc4xx_init(const char *cpu_model,
- clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
- uint32_t sysclk);
-
-/* PowerPC 4xx universal interrupt controller */
-enum {
- PPCUIC_OUTPUT_INT = 0,
- PPCUIC_OUTPUT_CINT = 1,
- PPCUIC_OUTPUT_NB,
+#include "hw/ppc/ppc.h"
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/*
+ * Generic DCR device
+ */
+#define TYPE_PPC4xx_DCR_DEVICE "ppc4xx-dcr-device"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxDcrDeviceState, PPC4xx_DCR_DEVICE);
+struct Ppc4xxDcrDeviceState {
+ SysBusDevice parent_obj;
+
+ PowerPCCPU *cpu;
+};
+
+void ppc4xx_dcr_register(Ppc4xxDcrDeviceState *dev, int dcrn, void *opaque,
+ dcr_read_cb dcr_read, dcr_write_cb dcr_write);
+bool ppc4xx_dcr_realize(Ppc4xxDcrDeviceState *dev, PowerPCCPU *cpu,
+ Error **errp);
+
+/* Memory Access Layer (MAL) */
+#define TYPE_PPC4xx_MAL "ppc4xx-mal"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxMalState, PPC4xx_MAL);
+struct Ppc4xxMalState {
+ Ppc4xxDcrDeviceState parent_obj;
+
+ qemu_irq irqs[4];
+ uint32_t cfg;
+ uint32_t esr;
+ uint32_t ier;
+ uint32_t txcasr;
+ uint32_t txcarr;
+ uint32_t txeobisr;
+ uint32_t txdeir;
+ uint32_t rxcasr;
+ uint32_t rxcarr;
+ uint32_t rxeobisr;
+ uint32_t rxdeir;
+ uint32_t *txctpr;
+ uint32_t *rxctpr;
+ uint32_t *rcbs;
+ uint8_t txcnum;
+ uint8_t rxcnum;
+};
+
+/* Peripheral local bus arbitrer */
+#define TYPE_PPC4xx_PLB "ppc4xx-plb"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxPlbState, PPC4xx_PLB);
+struct Ppc4xxPlbState {
+ Ppc4xxDcrDeviceState parent_obj;
+
+ uint32_t acr;
+ uint32_t bear;
+ uint32_t besr;
};
-qemu_irq *ppcuic_init (CPUPPCState *env, qemu_irq *irqs,
- uint32_t dcr_base, int has_ssr, int has_vr);
-ram_addr_t ppc4xx_sdram_adjust(ram_addr_t ram_size, int nr_banks,
- MemoryRegion ram_memories[],
- hwaddr ram_bases[],
- hwaddr ram_sizes[],
- const unsigned int sdram_bank_sizes[]);
+/* Peripheral controller */
+#define TYPE_PPC4xx_EBC "ppc4xx-ebc"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxEbcState, PPC4xx_EBC);
+struct Ppc4xxEbcState {
+ Ppc4xxDcrDeviceState parent_obj;
-void ppc4xx_sdram_init (CPUPPCState *env, qemu_irq irq, int nbanks,
- MemoryRegion ram_memories[],
- hwaddr *ram_bases,
- hwaddr *ram_sizes,
- int do_init);
+ uint32_t addr;
+ uint32_t bcr[8];
+ uint32_t bap[8];
+ uint32_t bear;
+ uint32_t besr0;
+ uint32_t besr1;
+ uint32_t cfg;
+};
+
+/* SDRAM DDR controller */
+typedef struct {
+ MemoryRegion ram;
+ MemoryRegion container; /* used for clipping */
+ hwaddr base;
+ hwaddr size;
+ uint32_t bcr;
+} Ppc4xxSdramBank;
+
+#define SDR0_DDR0_DDRM_ENCODE(n) ((((unsigned long)(n)) & 0x03) << 29)
+#define SDR0_DDR0_DDRM_DDR1 0x20000000
+#define SDR0_DDR0_DDRM_DDR2 0x40000000
-void ppc4xx_mal_init(CPUPPCState *env, uint8_t txcnum, uint8_t rxcnum,
- qemu_irq irqs[4]);
+#define TYPE_PPC4xx_SDRAM_DDR "ppc4xx-sdram-ddr"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxSdramDdrState, PPC4xx_SDRAM_DDR);
+struct Ppc4xxSdramDdrState {
+ Ppc4xxDcrDeviceState parent_obj;
+
+ MemoryRegion *dram_mr;
+ uint32_t nbanks; /* Banks to use from 4, e.g. when board has less slots */
+ Ppc4xxSdramBank bank[4];
+ qemu_irq irq;
+
+ uint32_t addr;
+ uint32_t besr0;
+ uint32_t besr1;
+ uint32_t bear;
+ uint32_t cfg;
+ uint32_t status;
+ uint32_t rtr;
+ uint32_t pmit;
+ uint32_t tr;
+ uint32_t ecccfg;
+ uint32_t eccesr;
+};
+
+void ppc4xx_sdram_ddr_enable(Ppc4xxSdramDdrState *s);
+
+/* SDRAM DDR2 controller */
+#define TYPE_PPC4xx_SDRAM_DDR2 "ppc4xx-sdram-ddr2"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxSdramDdr2State, PPC4xx_SDRAM_DDR2);
+struct Ppc4xxSdramDdr2State {
+ Ppc4xxDcrDeviceState parent_obj;
+
+ MemoryRegion *dram_mr;
+ uint32_t nbanks; /* Banks to use from 4, e.g. when board has less slots */
+ Ppc4xxSdramBank bank[4];
+
+ uint32_t addr;
+ uint32_t mcopt2;
+};
-#define TYPE_PPC4xx_PCI_HOST_BRIDGE "ppc4xx-pcihost"
+void ppc4xx_sdram_ddr2_enable(Ppc4xxSdramDdr2State *s);
#endif /* PPC4XX_H */
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 9e01a5a12e..4aaf23d28f 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -8,15 +8,21 @@
#include "hw/mem/pc-dimm.h"
#include "hw/ppc/spapr_ovec.h"
#include "hw/ppc/spapr_irq.h"
+#include "qom/object.h"
+#include "hw/ppc/spapr_xive.h" /* For SpaprXive */
+#include "hw/ppc/xics.h" /* For ICSState */
+#include "hw/ppc/spapr_tpm_proxy.h"
+#include "hw/ppc/spapr_nested.h" /* For SpaprMachineStateNested */
-struct VIOsPAPRBus;
-struct sPAPRPHBState;
-struct sPAPRNVRAM;
-typedef struct sPAPREventLogEntry sPAPREventLogEntry;
-typedef struct sPAPREventSource sPAPREventSource;
-typedef struct sPAPRPendingHPT sPAPRPendingHPT;
-typedef struct ICSState ICSState;
-typedef struct sPAPRXive sPAPRXive;
+struct SpaprVioBus;
+struct SpaprPhbState;
+struct SpaprNvram;
+
+typedef struct SpaprEventLogEntry SpaprEventLogEntry;
+typedef struct SpaprEventSource SpaprEventSource;
+typedef struct SpaprPendingHpt SpaprPendingHpt;
+
+typedef struct Vof Vof;
#define HPTE64_V_HPTE_DIRTY 0x0000000000000040ULL
#define SPAPR_ENTRY_POINT 0x100
@@ -25,33 +31,25 @@ typedef struct sPAPRXive sPAPRXive;
#define TYPE_SPAPR_RTC "spapr-rtc"
-#define SPAPR_RTC(obj) \
- OBJECT_CHECK(sPAPRRTCState, (obj), TYPE_SPAPR_RTC)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprRtcState, SPAPR_RTC)
-typedef struct sPAPRRTCState sPAPRRTCState;
-struct sPAPRRTCState {
+struct SpaprRtcState {
/*< private >*/
DeviceState parent_obj;
int64_t ns_offset;
};
-typedef struct sPAPRDIMMState sPAPRDIMMState;
-typedef struct sPAPRMachineClass sPAPRMachineClass;
+typedef struct SpaprDimmState SpaprDimmState;
#define TYPE_SPAPR_MACHINE "spapr-machine"
-#define SPAPR_MACHINE(obj) \
- OBJECT_CHECK(sPAPRMachineState, (obj), TYPE_SPAPR_MACHINE)
-#define SPAPR_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(sPAPRMachineClass, obj, TYPE_SPAPR_MACHINE)
-#define SPAPR_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(sPAPRMachineClass, klass, TYPE_SPAPR_MACHINE)
+OBJECT_DECLARE_TYPE(SpaprMachineState, SpaprMachineClass, SPAPR_MACHINE)
typedef enum {
SPAPR_RESIZE_HPT_DEFAULT = 0,
SPAPR_RESIZE_HPT_DISABLED,
SPAPR_RESIZE_HPT_ENABLED,
SPAPR_RESIZE_HPT_REQUIRED,
-} sPAPRResizeHPT;
+} SpaprResizeHpt;
/**
* Capabilities
@@ -73,8 +71,20 @@ typedef enum {
#define SPAPR_CAP_HPT_MAXPAGESIZE 0x06
/* Nested KVM-HV */
#define SPAPR_CAP_NESTED_KVM_HV 0x07
+/* Large Decrementer */
+#define SPAPR_CAP_LARGE_DECREMENTER 0x08
+/* Count Cache Flush Assist HW Instruction */
+#define SPAPR_CAP_CCF_ASSIST 0x09
+/* Implements PAPR FWNMI option */
+#define SPAPR_CAP_FWNMI 0x0A
+/* Support H_RPT_INVALIDATE */
+#define SPAPR_CAP_RPT_INVALIDATE 0x0B
+/* Support for AIL modes */
+#define SPAPR_CAP_AIL_MODE_3 0x0C
+/* Nested PAPR */
+#define SPAPR_CAP_NESTED_PAPR 0x0D
/* Num Caps */
-#define SPAPR_CAP_NUM (SPAPR_CAP_NESTED_KVM_HV + 1)
+#define SPAPR_CAP_NUM (SPAPR_CAP_NESTED_PAPR + 1)
/*
* Capability Values
@@ -82,86 +92,141 @@ typedef enum {
/* Bool Caps */
#define SPAPR_CAP_OFF 0x00
#define SPAPR_CAP_ON 0x01
+
/* Custom Caps */
+
+/* Generic */
#define SPAPR_CAP_BROKEN 0x00
#define SPAPR_CAP_WORKAROUND 0x01
#define SPAPR_CAP_FIXED 0x02
+/* SPAPR_CAP_IBS (cap-ibs) */
#define SPAPR_CAP_FIXED_IBS 0x02
#define SPAPR_CAP_FIXED_CCD 0x03
+#define SPAPR_CAP_FIXED_NA 0x10 /* Lets leave a bit of a gap... */
+
+#define FDT_MAX_SIZE 0x200000
+
+/* Max number of NUMA nodes */
+#define NUMA_NODES_MAX_NUM (MAX_NODES)
-typedef struct sPAPRCapabilities sPAPRCapabilities;
-struct sPAPRCapabilities {
+/*
+ * NUMA FORM1 macros. FORM1_DIST_REF_POINTS was taken from
+ * MAX_DISTANCE_REF_POINTS in arch/powerpc/mm/numa.h from Linux
+ * kernel source. It represents the amount of associativity domains
+ * for non-CPU resources.
+ *
+ * FORM1_NUMA_ASSOC_SIZE is the base array size of an ibm,associativity
+ * array for any non-CPU resource.
+ */
+#define FORM1_DIST_REF_POINTS 4
+#define FORM1_NUMA_ASSOC_SIZE (FORM1_DIST_REF_POINTS + 1)
+
+/*
+ * FORM2 NUMA affinity has a single associativity domain, giving
+ * us a assoc size of 2.
+ */
+#define FORM2_DIST_REF_POINTS 1
+#define FORM2_NUMA_ASSOC_SIZE (FORM2_DIST_REF_POINTS + 1)
+
+typedef struct SpaprCapabilities SpaprCapabilities;
+struct SpaprCapabilities {
uint8_t caps[SPAPR_CAP_NUM];
};
/**
- * sPAPRMachineClass:
+ * SpaprMachineClass:
*/
-struct sPAPRMachineClass {
+struct SpaprMachineClass {
/*< private >*/
MachineClass parent_class;
/*< public >*/
bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */
+ bool dr_phb_enabled; /* enable dynamic-reconfig/hotplug of PHBs */
bool update_dt_enabled; /* enable KVMPPC_H_UPDATE_DT */
bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */
bool pre_2_10_has_unused_icps;
bool legacy_irq_allocation;
-
- void (*phb_placement)(sPAPRMachineState *spapr, uint32_t index,
- uint64_t *buid, hwaddr *pio,
+ uint32_t nr_xirqs;
+ bool broken_host_serial_model; /* present real host info to the guest */
+ bool pre_4_1_migration; /* don't migrate hpt-max-page-size */
+ bool linux_pci_probe;
+ bool smp_threads_vsmt; /* set VSMT to smp_threads by default */
+ hwaddr rma_limit; /* clamp the RMA to this size */
+ bool pre_5_1_assoc_refpoints;
+ bool pre_5_2_numa_associativity;
+ bool pre_6_2_numa_affinity;
+
+ bool (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
+ uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns, Error **errp);
- sPAPRResizeHPT resize_hpt_default;
- sPAPRCapabilities default_caps;
- sPAPRIrq *irq;
+ SpaprResizeHpt resize_hpt_default;
+ SpaprCapabilities default_caps;
+ SpaprIrq *irq;
};
+#define WDT_MAX_WATCHDOGS 4 /* Maximum number of watchdog devices */
+
+#define TYPE_SPAPR_WDT "spapr-wdt"
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprWatchdog, SPAPR_WDT)
+
+typedef struct SpaprWatchdog {
+ /*< private >*/
+ DeviceState parent_obj;
+ /*< public >*/
+
+ QEMUTimer timer;
+ uint8_t action; /* One of PSERIES_WDTF_ACTION_xxx */
+ uint8_t leave_others; /* leaveOtherWatchdogsRunningOnTimeout */
+} SpaprWatchdog;
+
/**
- * sPAPRMachineState:
+ * SpaprMachineState:
*/
-struct sPAPRMachineState {
+struct SpaprMachineState {
/*< private >*/
MachineState parent_obj;
- struct VIOsPAPRBus *vio_bus;
- QLIST_HEAD(, sPAPRPHBState) phbs;
- struct sPAPRNVRAM *nvram;
- ICSState *ics;
- sPAPRRTCState rtc;
+ struct SpaprVioBus *vio_bus;
+ QLIST_HEAD(, SpaprPhbState) phbs;
+ struct SpaprNvram *nvram;
+ SpaprRtcState rtc;
- sPAPRResizeHPT resize_hpt;
+ SpaprResizeHpt resize_hpt;
void *htab;
uint32_t htab_shift;
- uint64_t patb_entry; /* Process tbl registed in H_REGISTER_PROCESS_TABLE */
- sPAPRPendingHPT *pending_hpt; /* in-progress resize */
+ uint64_t patb_entry; /* Process tbl registered in H_REGISTER_PROC_TBL */
+ SpaprPendingHpt *pending_hpt; /* in-progress resize */
hwaddr rma_size;
- int vrma_adjust;
- ssize_t rtas_size;
- void *rtas_blob;
uint32_t fdt_size;
uint32_t fdt_initial_size;
void *fdt_blob;
+ uint8_t fdt_rng_seed[32];
long kernel_size;
bool kernel_le;
+ uint64_t kernel_addr;
uint32_t initrd_base;
long initrd_size;
+ Vof *vof;
uint64_t rtc_offset; /* Now used only during incoming migration */
struct PPCTimebase tb;
- bool has_graphics;
+ bool want_stdout_path;
uint32_t vsmt; /* Virtual SMT mode (KVM's "core stride") */
+ /* Nested HV support (TCG only) */
+ SpaprMachineStateNested nested;
+
Notifier epow_notifier;
- QTAILQ_HEAD(, sPAPREventLogEntry) pending_events;
+ QTAILQ_HEAD(, SpaprEventLogEntry) pending_events;
bool use_hotplug_event_source;
- sPAPREventSource *event_sources;
+ SpaprEventSource *event_sources;
/* ibm,client-architecture-support option negotiation */
- bool cas_reboot;
- bool cas_legacy_guest_workaround;
- sPAPROptionVector *ov5; /* QEMU-supported option vectors */
- sPAPROptionVector *ov5_cas; /* negotiated (via CAS) option vectors */
+ bool cas_pre_isa3_guest;
+ SpaprOptionVector *ov5; /* QEMU-supported option vectors */
+ SpaprOptionVector *ov5_cas; /* negotiated (via CAS) option vectors */
uint32_t max_compat_pvr;
/* Migration state */
@@ -172,20 +237,52 @@ struct sPAPRMachineState {
/* Pending DIMM unplug cache. It is populated when a LMB
* unplug starts. It can be regenerated if a migration
* occurs during the unplug process. */
- QTAILQ_HEAD(, sPAPRDIMMState) pending_dimm_unplugs;
+ QTAILQ_HEAD(, SpaprDimmState) pending_dimm_unplugs;
+
+ /* State related to FWNMI option */
+
+ /* System Reset and Machine Check Notification Routine addresses
+ * registered by "ibm,nmi-register" RTAS call.
+ */
+ target_ulong fwnmi_system_reset_addr;
+ target_ulong fwnmi_machine_check_addr;
+
+ /* Machine Check FWNMI synchronization, fwnmi_machine_check_interlock is
+ * set to -1 if a FWNMI machine check is not in progress, else is set to
+ * the CPU that was delivered the machine check, and is set back to -1
+ * when that CPU makes an "ibm,nmi-interlock" RTAS call. The cond is used
+ * to synchronize other CPUs.
+ */
+ int fwnmi_machine_check_interlock;
+ QemuCond fwnmi_machine_check_interlock_cond;
+
+ /* Set by -boot */
+ char *boot_device;
/*< public >*/
char *kvm_type;
+ char *host_model;
+ char *host_serial;
- const char *icp_type;
int32_t irq_map_nr;
unsigned long *irq_map;
- sPAPRXive *xive;
- sPAPRIrq *irq;
+ SpaprIrq *irq;
qemu_irq *qirqs;
+ SpaprInterruptController *active_intc;
+ ICSState *ics;
+ SpaprXive *xive;
bool cmd_line_caps[SPAPR_CAP_NUM];
- sPAPRCapabilities def, eff, mig;
+ SpaprCapabilities def, eff, mig;
+
+ SpaprTpmProxy *tpm_proxy;
+
+ uint32_t FORM1_assoc_array[NUMA_NODES_MAX_NUM][FORM1_NUMA_ASSOC_SIZE];
+ uint32_t FORM2_assoc_array[NUMA_NODES_MAX_NUM][FORM2_NUMA_ASSOC_SIZE];
+
+ Error *fwnmi_migration_blocker;
+
+ SpaprWatchdog wds[WDT_MAX_WATCHDOGS];
};
#define H_SUCCESS 0
@@ -266,6 +363,12 @@ struct sPAPRMachineState {
#define H_P7 -60
#define H_P8 -61
#define H_P9 -62
+#define H_NOOP -63
+#define H_UNSUPPORTED -67
+#define H_OVERLAP -68
+#define H_STATE -75
+#define H_IN_USE -77
+#define H_INVALID_ELEMENT_VALUE -81
#define H_UNSUPPORTED_FLAG -256
#define H_MULTI_THREADS_ACTIVE -9005
@@ -303,7 +406,7 @@ struct sPAPRMachineState {
/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
-#define H_SET_MODE_RESOURCE_SET_DAWR 2
+#define H_SET_MODE_RESOURCE_SET_DAWR0 2
#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
#define H_SET_MODE_RESOURCE_LE 4
@@ -334,9 +437,14 @@ struct sPAPRMachineState {
#define H_CPU_CHAR_HON_BRANCH_HINTS PPC_BIT(5)
#define H_CPU_CHAR_THR_RECONF_TRIG PPC_BIT(6)
#define H_CPU_CHAR_CACHE_COUNT_DIS PPC_BIT(7)
+#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST PPC_BIT(9)
+
#define H_CPU_BEHAV_FAVOUR_SECURITY PPC_BIT(0)
#define H_CPU_BEHAV_L1D_FLUSH_PR PPC_BIT(1)
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR PPC_BIT(2)
+#define H_CPU_BEHAV_FLUSH_COUNT_CACHE PPC_BIT(5)
+#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY PPC_BIT(7)
+#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS PPC_BIT(8)
/* Each control block has to be on a 4K boundary */
#define H_CB_ALIGNMENT 4096
@@ -471,8 +579,25 @@ struct sPAPRMachineState {
#define H_INT_ESB 0x3C8
#define H_INT_SYNC 0x3CC
#define H_INT_RESET 0x3D0
-
-#define MAX_HCALL_OPCODE H_INT_RESET
+#define H_SCM_READ_METADATA 0x3E4
+#define H_SCM_WRITE_METADATA 0x3E8
+#define H_SCM_BIND_MEM 0x3EC
+#define H_SCM_UNBIND_MEM 0x3F0
+#define H_SCM_UNBIND_ALL 0x3FC
+#define H_SCM_HEALTH 0x400
+#define H_RPT_INVALIDATE 0x448
+#define H_SCM_FLUSH 0x44C
+#define H_WATCHDOG 0x45C
+#define H_GUEST_GET_CAPABILITIES 0x460
+#define H_GUEST_SET_CAPABILITIES 0x464
+#define H_GUEST_CREATE 0x470
+#define H_GUEST_CREATE_VCPU 0x474
+#define H_GUEST_GET_STATE 0x478
+#define H_GUEST_SET_STATE 0x47C
+#define H_GUEST_RUN_VCPU 0x480
+#define H_GUEST_DELETE 0x488
+
+#define MAX_HCALL_OPCODE H_GUEST_DELETE
/* The hcalls above are standardized in PAPR and implemented by pHyp
* as well.
@@ -487,25 +612,60 @@ struct sPAPRMachineState {
/* Client Architecture support */
#define KVMPPC_H_CAS (KVMPPC_HCALL_BASE + 0x2)
#define KVMPPC_H_UPDATE_DT (KVMPPC_HCALL_BASE + 0x3)
-#define KVMPPC_HCALL_MAX KVMPPC_H_UPDATE_DT
+/* 0x4 was used for KVMPPC_H_UPDATE_PHANDLE in SLOF */
+#define KVMPPC_H_VOF_CLIENT (KVMPPC_HCALL_BASE + 0x5)
-typedef struct sPAPRDeviceTreeUpdateHeader {
+/* Platform-specific hcalls used for nested HV KVM */
+#define KVMPPC_H_SET_PARTITION_TABLE (KVMPPC_HCALL_BASE + 0x800)
+#define KVMPPC_H_ENTER_NESTED (KVMPPC_HCALL_BASE + 0x804)
+#define KVMPPC_H_TLB_INVALIDATE (KVMPPC_HCALL_BASE + 0x808)
+#define KVMPPC_H_COPY_TOFROM_GUEST (KVMPPC_HCALL_BASE + 0x80C)
+
+#define KVMPPC_HCALL_MAX KVMPPC_H_COPY_TOFROM_GUEST
+
+/*
+ * The hcall range 0xEF00 to 0xEF80 is reserved for use in facilitating
+ * Secure VM mode via an Ultravisor / Protected Execution Facility
+ */
+#define SVM_HCALL_BASE 0xEF00
+#define SVM_H_TPM_COMM 0xEF10
+#define SVM_HCALL_MAX SVM_H_TPM_COMM
+
+typedef struct SpaprDeviceTreeUpdateHeader {
uint32_t version_id;
-} sPAPRDeviceTreeUpdateHeader;
+} SpaprDeviceTreeUpdateHeader;
#define hcall_dprintf(fmt, ...) \
do { \
qemu_log_mask(LOG_GUEST_ERROR, "%s: " fmt, __func__, ## __VA_ARGS__); \
} while (0)
-typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, sPAPRMachineState *sm,
+typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
target_ulong opcode,
target_ulong *args);
void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn);
+void spapr_unregister_hypercall(target_ulong opcode);
target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
target_ulong *args);
+target_ulong vhyp_mmu_resize_hpt_prepare(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong shift);
+target_ulong vhyp_mmu_resize_hpt_commit(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong flags,
+ target_ulong shift);
+bool is_ram_address(SpaprMachineState *spapr, hwaddr addr);
+void push_sregs_to_kvm_pr(SpaprMachineState *spapr);
+
+/* Virtual Processor Area structure constants */
+#define VPA_MIN_SIZE 640
+#define VPA_SIZE_OFFSET 0x4
+#define VPA_SHARED_PROC_OFFSET 0x9
+#define VPA_SHARED_PROC_VAL 0x2
+#define VPA_DISPATCH_COUNTER 0x100
+
/* ibm,set-eeh-option */
#define RTAS_EEH_DISABLE 0
#define RTAS_EEH_ENABLE 1
@@ -559,6 +719,7 @@ target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
#define RTAS_DDW_PGSIZE_128M 0x20
#define RTAS_DDW_PGSIZE_256M 0x40
#define RTAS_DDW_PGSIZE_16G 0x80
+#define RTAS_DDW_PGSIZE_2M 0x100
/* RTAS tokens */
#define RTAS_TOKEN_BASE 0x2000
@@ -605,8 +766,11 @@ target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
#define RTAS_IBM_CREATE_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x27)
#define RTAS_IBM_REMOVE_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x28)
#define RTAS_IBM_RESET_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x29)
+#define RTAS_IBM_SUSPEND_ME (RTAS_TOKEN_BASE + 0x2A)
+#define RTAS_IBM_NMI_REGISTER (RTAS_TOKEN_BASE + 0x2B)
+#define RTAS_IBM_NMI_INTERLOCK (RTAS_TOKEN_BASE + 0x2C)
-#define RTAS_TOKEN_MAX (RTAS_TOKEN_BASE + 0x2A)
+#define RTAS_TOKEN_MAX (RTAS_TOKEN_BASE + 0x2D)
/* RTAS ibm,get-system-parameter token values */
#define RTAS_SYSPARM_SPLPAR_CHARACTERISTICS 20
@@ -639,7 +803,8 @@ static inline uint64_t ppc64_phys_to_real(uint64_t addr)
static inline uint32_t rtas_ld(target_ulong phys, int n)
{
- return ldl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n));
+ return ldl_be_phys(&address_space_memory,
+ ppc64_phys_to_real(phys + 4 * n));
}
static inline uint64_t rtas_ldq(target_ulong phys, int n)
@@ -649,19 +814,19 @@ static inline uint64_t rtas_ldq(target_ulong phys, int n)
static inline void rtas_st(target_ulong phys, int n, uint32_t val)
{
- stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n), val);
+ stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4 * n), val);
}
-typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, sPAPRMachineState *sm,
+typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
uint32_t token,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets);
void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn);
-target_ulong spapr_rtas_call(PowerPCCPU *cpu, sPAPRMachineState *sm,
+target_ulong spapr_rtas_call(PowerPCCPU *cpu, SpaprMachineState *sm,
uint32_t token, uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets);
void spapr_dt_rtas_tokens(void *fdt, int rtas);
-void spapr_load_rtas(sPAPRMachineState *spapr, void *fdt, hwaddr addr);
+void spapr_load_rtas(SpaprMachineState *spapr, void *fdt, hwaddr addr);
#define SPAPR_TCE_PAGE_SHIFT 12
#define SPAPR_TCE_PAGE_SIZE (1ULL << SPAPR_TCE_PAGE_SHIFT)
@@ -674,31 +839,33 @@ void spapr_load_rtas(sPAPRMachineState *spapr, void *fdt, hwaddr addr);
#define SPAPR_IS_PCI_LIOBN(liobn) (!!((liobn) & 0x80000000))
#define SPAPR_PCI_DMA_WINDOW_NUM(liobn) ((liobn) & 0xff)
+#define RTAS_MIN_SIZE 20 /* hv_rtas_size in SLOF */
#define RTAS_ERROR_LOG_MAX 2048
+/* Offset from rtas-base where error log is placed */
+#define RTAS_ERROR_LOG_OFFSET 0x30
+
#define RTAS_EVENT_SCAN_RATE 1
/* This helper should be used to encode interrupt specifiers when the related
* "interrupt-controller" node has its "#interrupt-cells" property set to 2 (ie,
* VIO devices, RTAS event sources and PHBs).
*/
-static inline void spapr_dt_xics_irq(uint32_t *intspec, int irq, bool is_lsi)
+static inline void spapr_dt_irq(uint32_t *intspec, int irq, bool is_lsi)
{
intspec[0] = cpu_to_be32(irq);
intspec[1] = is_lsi ? cpu_to_be32(1) : 0;
}
-typedef struct sPAPRTCETable sPAPRTCETable;
#define TYPE_SPAPR_TCE_TABLE "spapr-tce-table"
-#define SPAPR_TCE_TABLE(obj) \
- OBJECT_CHECK(sPAPRTCETable, (obj), TYPE_SPAPR_TCE_TABLE)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprTceTable, SPAPR_TCE_TABLE)
#define TYPE_SPAPR_IOMMU_MEMORY_REGION "spapr-iommu-memory-region"
-#define SPAPR_IOMMU_MEMORY_REGION(obj) \
- OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_SPAPR_IOMMU_MEMORY_REGION)
+DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, SPAPR_IOMMU_MEMORY_REGION,
+ TYPE_SPAPR_IOMMU_MEMORY_REGION)
-struct sPAPRTCETable {
+struct SpaprTceTable {
DeviceState parent;
uint32_t liobn;
uint32_t nb_table;
@@ -709,69 +876,81 @@ struct sPAPRTCETable {
uint64_t *mig_table;
bool bypass;
bool need_vfio;
+ bool skipping_replay;
+ bool def_win;
int fd;
MemoryRegion root;
IOMMUMemoryRegion iommu;
- struct VIOsPAPRDevice *vdev; /* for @bypass migration compatibility only */
- QLIST_ENTRY(sPAPRTCETable) list;
+ struct SpaprVioDevice *vdev; /* for @bypass migration compatibility only */
+ QLIST_ENTRY(SpaprTceTable) list;
};
-sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn);
+SpaprTceTable *spapr_tce_find_by_liobn(target_ulong liobn);
-struct sPAPREventLogEntry {
+struct SpaprEventLogEntry {
uint32_t summary;
uint32_t extended_length;
void *extended_log;
- QTAILQ_ENTRY(sPAPREventLogEntry) next;
+ QTAILQ_ENTRY(SpaprEventLogEntry) next;
};
-void spapr_events_init(sPAPRMachineState *sm);
-void spapr_dt_events(sPAPRMachineState *sm, void *fdt);
-int spapr_h_cas_compose_response(sPAPRMachineState *sm,
- target_ulong addr, target_ulong size,
- sPAPROptionVector *ov5_updates);
-void close_htab_fd(sPAPRMachineState *spapr);
-void spapr_setup_hpt_and_vrma(sPAPRMachineState *spapr);
-void spapr_free_hpt(sPAPRMachineState *spapr);
-sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn);
-void spapr_tce_table_enable(sPAPRTCETable *tcet,
+void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space);
+void spapr_events_init(SpaprMachineState *sm);
+void spapr_dt_events(SpaprMachineState *sm, void *fdt);
+void close_htab_fd(SpaprMachineState *spapr);
+void spapr_setup_hpt(SpaprMachineState *spapr);
+void spapr_free_hpt(SpaprMachineState *spapr);
+void spapr_check_mmu_mode(bool guest_radix);
+SpaprTceTable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn);
+void spapr_tce_table_enable(SpaprTceTable *tcet,
uint32_t page_shift, uint64_t bus_offset,
uint32_t nb_table);
-void spapr_tce_table_disable(sPAPRTCETable *tcet);
-void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio);
+void spapr_tce_table_disable(SpaprTceTable *tcet);
+void spapr_tce_set_need_vfio(SpaprTceTable *tcet, bool need_vfio);
-MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet);
+MemoryRegion *spapr_tce_get_iommu(SpaprTceTable *tcet);
int spapr_dma_dt(void *fdt, int node_off, const char *propname,
uint32_t liobn, uint64_t window, uint32_t size);
int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
- sPAPRTCETable *tcet);
-void spapr_pci_switch_vga(bool big_endian);
-void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc);
-void spapr_hotplug_req_remove_by_index(sPAPRDRConnector *drc);
-void spapr_hotplug_req_add_by_count(sPAPRDRConnectorType drc_type,
+ SpaprTceTable *tcet);
+void spapr_pci_switch_vga(SpaprMachineState *spapr, bool big_endian);
+void spapr_hotplug_req_add_by_index(SpaprDrc *drc);
+void spapr_hotplug_req_remove_by_index(SpaprDrc *drc);
+void spapr_hotplug_req_add_by_count(SpaprDrcType drc_type,
uint32_t count);
-void spapr_hotplug_req_remove_by_count(sPAPRDRConnectorType drc_type,
+void spapr_hotplug_req_remove_by_count(SpaprDrcType drc_type,
uint32_t count);
-void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type,
+void spapr_hotplug_req_add_by_count_indexed(SpaprDrcType drc_type,
uint32_t count, uint32_t index);
-void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type,
+void spapr_hotplug_req_remove_by_count_indexed(SpaprDrcType drc_type,
uint32_t count, uint32_t index);
int spapr_hpt_shift_for_ramsize(uint64_t ramsize);
-void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
- Error **errp);
-void spapr_clear_pending_events(sPAPRMachineState *spapr);
-int spapr_max_server_number(sPAPRMachineState *spapr);
-
-/* CPU and LMB DRC release callbacks. */
+int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp);
+void spapr_clear_pending_events(SpaprMachineState *spapr);
+void spapr_clear_pending_hotplug_events(SpaprMachineState *spapr);
+void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev);
+int spapr_max_server_number(SpaprMachineState *spapr);
+void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
+ uint64_t pte0, uint64_t pte1);
+void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered);
+
+/* DRC callbacks. */
void spapr_core_release(DeviceState *dev);
+int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
+ void *fdt, int *fdt_start_offset, Error **errp);
void spapr_lmb_release(DeviceState *dev);
+int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
+ void *fdt, int *fdt_start_offset, Error **errp);
+void spapr_phb_release(DeviceState *dev);
+int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
+ void *fdt, int *fdt_start_offset, Error **errp);
-void spapr_rtc_read(sPAPRRTCState *rtc, struct tm *tm, uint32_t *ns);
-int spapr_rtc_import_offset(sPAPRRTCState *rtc, int64_t legacy_offset);
+void spapr_rtc_read(SpaprRtcState *rtc, struct tm *tm, uint32_t *ns);
+int spapr_rtc_import_offset(SpaprRtcState *rtc, int64_t legacy_offset);
#define TYPE_SPAPR_RNG "spapr-rng"
-#define SPAPR_MEMORY_BLOCK_SIZE (1 << 28) /* 256MB */
+#define SPAPR_MEMORY_BLOCK_SIZE ((hwaddr)1 << 28) /* 256MB */
/*
* This defines the maximum number of DIMM slots we can have for sPAPR
@@ -796,13 +975,14 @@ int spapr_rtc_import_offset(sPAPRRTCState *rtc, int64_t legacy_offset);
#define SPAPR_LMB_FLAGS_ASSIGNED 0x00000008
#define SPAPR_LMB_FLAGS_DRC_INVALID 0x00000020
#define SPAPR_LMB_FLAGS_RESERVED 0x00000080
+#define SPAPR_LMB_FLAGS_HOTREMOVABLE 0x00000100
void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg);
#define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift))
int spapr_get_vcpu_id(PowerPCCPU *cpu);
-void spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp);
+bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp);
PowerPCCPU *spapr_find_cpu(int vcpu_id);
int spapr_caps_pre_load(void *opaque);
@@ -817,20 +997,27 @@ extern const VMStateDescription vmstate_spapr_cap_dfp;
extern const VMStateDescription vmstate_spapr_cap_cfpc;
extern const VMStateDescription vmstate_spapr_cap_sbbc;
extern const VMStateDescription vmstate_spapr_cap_ibs;
+extern const VMStateDescription vmstate_spapr_cap_hpt_maxpagesize;
extern const VMStateDescription vmstate_spapr_cap_nested_kvm_hv;
-
-static inline uint8_t spapr_get_cap(sPAPRMachineState *spapr, int cap)
+extern const VMStateDescription vmstate_spapr_cap_nested_papr;
+extern const VMStateDescription vmstate_spapr_cap_large_decr;
+extern const VMStateDescription vmstate_spapr_cap_ccf_assist;
+extern const VMStateDescription vmstate_spapr_cap_fwnmi;
+extern const VMStateDescription vmstate_spapr_cap_rpt_invalidate;
+extern const VMStateDescription vmstate_spapr_wdt;
+
+static inline uint8_t spapr_get_cap(SpaprMachineState *spapr, int cap)
{
return spapr->eff.caps[cap];
}
-void spapr_caps_init(sPAPRMachineState *spapr);
-void spapr_caps_apply(sPAPRMachineState *spapr);
-void spapr_caps_cpu_apply(sPAPRMachineState *spapr, PowerPCCPU *cpu);
-void spapr_caps_add_properties(sPAPRMachineClass *smc, Error **errp);
-int spapr_caps_post_migration(sPAPRMachineState *spapr);
+void spapr_caps_init(SpaprMachineState *spapr);
+void spapr_caps_apply(SpaprMachineState *spapr);
+void spapr_caps_cpu_apply(SpaprMachineState *spapr, PowerPCCPU *cpu);
+void spapr_caps_add_properties(SpaprMachineClass *smc);
+int spapr_caps_post_migration(SpaprMachineState *spapr);
-void spapr_check_pagesize(sPAPRMachineState *spapr, hwaddr pagesize,
+bool spapr_check_pagesize(SpaprMachineState *spapr, hwaddr pagesize,
Error **errp);
/*
* XIVE definitions
@@ -839,4 +1026,28 @@ void spapr_check_pagesize(sPAPRMachineState *spapr, hwaddr pagesize,
#define SPAPR_OV5_XIVE_EXPLOIT 0x40
#define SPAPR_OV5_XIVE_BOTH 0x80 /* Only to advertise on the platform */
+void spapr_set_all_lpcrs(target_ulong value, target_ulong mask);
+void spapr_init_all_lpcrs(target_ulong value, target_ulong mask);
+hwaddr spapr_get_rtas_addr(void);
+bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr);
+
+void spapr_vof_reset(SpaprMachineState *spapr, void *fdt, Error **errp);
+void spapr_vof_quiesce(MachineState *ms);
+bool spapr_vof_setprop(MachineState *ms, const char *path, const char *propname,
+ void *val, int vallen);
+target_ulong spapr_h_vof_client(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ target_ulong opcode, target_ulong *args);
+target_ulong spapr_vof_client_architecture_support(MachineState *ms,
+ CPUState *cs,
+ target_ulong ovec_addr);
+void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt);
+
+/* H_WATCHDOG */
+void spapr_watchdog_init(SpaprMachineState *spapr);
+void spapr_register_nested_hv(void);
+void spapr_unregister_nested_hv(void);
+void spapr_nested_reset(SpaprMachineState *spapr);
+void spapr_register_nested_papr(void);
+void spapr_unregister_nested_papr(void);
+
#endif /* HW_SPAPR_H */
diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
index 9e2821e4b3..69a52e39b8 100644
--- a/include/hw/ppc/spapr_cpu_core.h
+++ b/include/hw/ppc/spapr_cpu_core.h
@@ -9,48 +9,56 @@
#ifndef HW_SPAPR_CPU_CORE_H
#define HW_SPAPR_CPU_CORE_H
-#include "hw/qdev.h"
#include "hw/cpu/core.h"
+#include "hw/qdev-core.h"
#include "target/ppc/cpu-qom.h"
#include "target/ppc/cpu.h"
+#include "qom/object.h"
#define TYPE_SPAPR_CPU_CORE "spapr-cpu-core"
-#define SPAPR_CPU_CORE(obj) \
- OBJECT_CHECK(sPAPRCPUCore, (obj), TYPE_SPAPR_CPU_CORE)
-#define SPAPR_CPU_CORE_CLASS(klass) \
- OBJECT_CLASS_CHECK(sPAPRCPUCoreClass, (klass), TYPE_SPAPR_CPU_CORE)
-#define SPAPR_CPU_CORE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(sPAPRCPUCoreClass, (obj), TYPE_SPAPR_CPU_CORE)
+OBJECT_DECLARE_TYPE(SpaprCpuCore, SpaprCpuCoreClass,
+ SPAPR_CPU_CORE)
#define SPAPR_CPU_CORE_TYPE_NAME(model) model "-" TYPE_SPAPR_CPU_CORE
-typedef struct sPAPRCPUCore {
+struct SpaprCpuCore {
/*< private >*/
CPUCore parent_obj;
/*< public >*/
PowerPCCPU **threads;
int node_id;
- bool pre_3_0_migration; /* older machine don't know about sPAPRCPUState */
-} sPAPRCPUCore;
+ bool pre_3_0_migration; /* older machine don't know about SpaprCpuState */
+};
-typedef struct sPAPRCPUCoreClass {
+struct SpaprCpuCoreClass {
DeviceClass parent_class;
const char *cpu_type;
-} sPAPRCPUCoreClass;
+};
const char *spapr_get_cpu_core_type(const char *cpu_type);
-void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip, target_ulong r3);
+void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip,
+ target_ulong r1, target_ulong r3,
+ target_ulong r4);
-typedef struct sPAPRCPUState {
+struct nested_ppc_state;
+
+typedef struct SpaprCpuState {
uint64_t vpa_addr;
uint64_t slb_shadow_addr, slb_shadow_size;
uint64_t dtl_addr, dtl_size;
-} sPAPRCPUState;
+ bool prod; /* not migrated, only used to improve dispatch latencies */
+ struct ICPState *icp;
+ struct XiveTCTX *tctx;
+
+ /* Fields for nested-HV support */
+ bool in_nested; /* true while the L2 is executing */
+ struct nested_ppc_state *nested_host_state; /* holds the L1 state while L2 executes */
+} SpaprCpuState;
-static inline sPAPRCPUState *spapr_cpu_state(PowerPCCPU *cpu)
+static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu)
{
- return (sPAPRCPUState *)cpu->machine_data;
+ return (SpaprCpuState *)cpu->machine_data;
}
#endif
diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h
index f6ff32e7e2..02a63b3666 100644
--- a/include/hw/ppc/spapr_drc.h
+++ b/include/hw/ppc/spapr_drc.h
@@ -14,64 +14,38 @@
#define HW_SPAPR_DRC_H
#include <libfdt.h>
-#include "qapi/qapi-types-run-state.h"
#include "qom/object.h"
-#include "sysemu/sysemu.h"
-#include "hw/qdev.h"
+#include "sysemu/runstate.h"
+#include "hw/qdev-core.h"
+#include "qapi/error.h"
#define TYPE_SPAPR_DR_CONNECTOR "spapr-dr-connector"
#define SPAPR_DR_CONNECTOR_GET_CLASS(obj) \
- OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DR_CONNECTOR)
+ OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DR_CONNECTOR)
#define SPAPR_DR_CONNECTOR_CLASS(klass) \
- OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, \
+ OBJECT_CLASS_CHECK(SpaprDrcClass, klass, \
TYPE_SPAPR_DR_CONNECTOR)
-#define SPAPR_DR_CONNECTOR(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
+#define SPAPR_DR_CONNECTOR(obj) OBJECT_CHECK(SpaprDrc, (obj), \
TYPE_SPAPR_DR_CONNECTOR)
#define TYPE_SPAPR_DRC_PHYSICAL "spapr-drc-physical"
-#define SPAPR_DRC_PHYSICAL_GET_CLASS(obj) \
- OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_PHYSICAL)
-#define SPAPR_DRC_PHYSICAL_CLASS(klass) \
- OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, \
- TYPE_SPAPR_DRC_PHYSICAL)
-#define SPAPR_DRC_PHYSICAL(obj) OBJECT_CHECK(sPAPRDRCPhysical, (obj), \
+#define SPAPR_DRC_PHYSICAL(obj) OBJECT_CHECK(SpaprDrcPhysical, (obj), \
TYPE_SPAPR_DRC_PHYSICAL)
#define TYPE_SPAPR_DRC_LOGICAL "spapr-drc-logical"
-#define SPAPR_DRC_LOGICAL_GET_CLASS(obj) \
- OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_LOGICAL)
-#define SPAPR_DRC_LOGICAL_CLASS(klass) \
- OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, \
- TYPE_SPAPR_DRC_LOGICAL)
-#define SPAPR_DRC_LOGICAL(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
- TYPE_SPAPR_DRC_LOGICAL)
#define TYPE_SPAPR_DRC_CPU "spapr-drc-cpu"
-#define SPAPR_DRC_CPU_GET_CLASS(obj) \
- OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_CPU)
-#define SPAPR_DRC_CPU_CLASS(klass) \
- OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, TYPE_SPAPR_DRC_CPU)
-#define SPAPR_DRC_CPU(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
- TYPE_SPAPR_DRC_CPU)
#define TYPE_SPAPR_DRC_PCI "spapr-drc-pci"
-#define SPAPR_DRC_PCI_GET_CLASS(obj) \
- OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_PCI)
-#define SPAPR_DRC_PCI_CLASS(klass) \
- OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, TYPE_SPAPR_DRC_PCI)
-#define SPAPR_DRC_PCI(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
- TYPE_SPAPR_DRC_PCI)
#define TYPE_SPAPR_DRC_LMB "spapr-drc-lmb"
-#define SPAPR_DRC_LMB_GET_CLASS(obj) \
- OBJECT_GET_CLASS(sPAPRDRConnectorClass, obj, TYPE_SPAPR_DRC_LMB)
-#define SPAPR_DRC_LMB_CLASS(klass) \
- OBJECT_CLASS_CHECK(sPAPRDRConnectorClass, klass, TYPE_SPAPR_DRC_LMB)
-#define SPAPR_DRC_LMB(obj) OBJECT_CHECK(sPAPRDRConnector, (obj), \
- TYPE_SPAPR_DRC_LMB)
+
+#define TYPE_SPAPR_DRC_PHB "spapr-drc-phb"
+
+#define TYPE_SPAPR_DRC_PMEM "spapr-drc-pmem"
/*
- * Various hotplug types managed by sPAPRDRConnector
+ * Various hotplug types managed by SpaprDrc
*
* these are somewhat arbitrary, but to make things easier
* when generating DRC indexes later we've aligned the bit
@@ -87,7 +61,8 @@ typedef enum {
SPAPR_DR_CONNECTOR_TYPE_SHIFT_VIO = 3,
SPAPR_DR_CONNECTOR_TYPE_SHIFT_PCI = 4,
SPAPR_DR_CONNECTOR_TYPE_SHIFT_LMB = 8,
-} sPAPRDRConnectorTypeShift;
+ SPAPR_DR_CONNECTOR_TYPE_SHIFT_PMEM = 9,
+} SpaprDrcTypeShift;
typedef enum {
SPAPR_DR_CONNECTOR_TYPE_ANY = ~0,
@@ -96,7 +71,8 @@ typedef enum {
SPAPR_DR_CONNECTOR_TYPE_VIO = 1 << SPAPR_DR_CONNECTOR_TYPE_SHIFT_VIO,
SPAPR_DR_CONNECTOR_TYPE_PCI = 1 << SPAPR_DR_CONNECTOR_TYPE_SHIFT_PCI,
SPAPR_DR_CONNECTOR_TYPE_LMB = 1 << SPAPR_DR_CONNECTOR_TYPE_SHIFT_LMB,
-} sPAPRDRConnectorType;
+ SPAPR_DR_CONNECTOR_TYPE_PMEM = 1 << SPAPR_DR_CONNECTOR_TYPE_SHIFT_PMEM,
+} SpaprDrcType;
/*
* set via set-indicator RTAS calls
@@ -108,7 +84,7 @@ typedef enum {
typedef enum {
SPAPR_DR_ISOLATION_STATE_ISOLATED = 0,
SPAPR_DR_ISOLATION_STATE_UNISOLATED = 1
-} sPAPRDRIsolationState;
+} SpaprDRIsolationState;
/*
* set via set-indicator RTAS calls
@@ -124,7 +100,7 @@ typedef enum {
SPAPR_DR_ALLOCATION_STATE_USABLE = 1,
SPAPR_DR_ALLOCATION_STATE_EXCHANGE = 2,
SPAPR_DR_ALLOCATION_STATE_RECOVER = 3
-} sPAPRDRAllocationState;
+} SpaprDRAllocationState;
/*
* DR-indicator (LED/visual indicator)
@@ -143,7 +119,7 @@ typedef enum {
SPAPR_DR_INDICATOR_ACTIVE = 1,
SPAPR_DR_INDICATOR_IDENTIFY = 2,
SPAPR_DR_INDICATOR_ACTION = 3,
-} sPAPRDRIndicatorState;
+} SpaprDRIndicatorState;
/*
* returned via get-sensor-state RTAS calls
@@ -161,7 +137,7 @@ typedef enum {
SPAPR_DR_ENTITY_SENSE_UNUSABLE = 2,
SPAPR_DR_ENTITY_SENSE_EXCHANGE = 3,
SPAPR_DR_ENTITY_SENSE_RECOVER = 4,
-} sPAPRDREntitySense;
+} SpaprDREntitySense;
typedef enum {
SPAPR_DR_CC_RESPONSE_NEXT_SIB = 1, /* currently unused */
@@ -172,7 +148,7 @@ typedef enum {
SPAPR_DR_CC_RESPONSE_ERROR = -1,
SPAPR_DR_CC_RESPONSE_CONTINUE = -2,
SPAPR_DR_CC_RESPONSE_NOT_CONFIGURABLE = -9003,
-} sPAPRDRCCResponse;
+} SpaprDRCCResponse;
typedef enum {
/*
@@ -190,9 +166,9 @@ typedef enum {
SPAPR_DRC_STATE_PHYSICAL_POWERON = 6,
SPAPR_DRC_STATE_PHYSICAL_UNISOLATE = 7,
SPAPR_DRC_STATE_PHYSICAL_CONFIGURED = 8,
-} sPAPRDRCState;
+} SpaprDrcState;
-typedef struct sPAPRDRConnector {
+typedef struct SpaprDrc {
/*< private >*/
DeviceState parent;
@@ -211,56 +187,71 @@ typedef struct sPAPRDRConnector {
bool unplug_requested;
void *fdt;
int fdt_start_offset;
-} sPAPRDRConnector;
+} SpaprDrc;
+
+struct SpaprMachineState;
-typedef struct sPAPRDRConnectorClass {
+typedef struct SpaprDrcClass {
/*< private >*/
DeviceClass parent;
- sPAPRDRCState empty_state;
- sPAPRDRCState ready_state;
+ SpaprDrcState empty_state;
+ SpaprDrcState ready_state;
/*< public >*/
- sPAPRDRConnectorTypeShift typeshift;
+ SpaprDrcTypeShift typeshift;
const char *typename; /* used in device tree, PAPR 13.5.2.6 & C.6.1 */
const char *drc_name_prefix; /* used other places in device tree */
- sPAPRDREntitySense (*dr_entity_sense)(sPAPRDRConnector *drc);
- uint32_t (*isolate)(sPAPRDRConnector *drc);
- uint32_t (*unisolate)(sPAPRDRConnector *drc);
+ SpaprDREntitySense (*dr_entity_sense)(SpaprDrc *drc);
+ uint32_t (*isolate)(SpaprDrc *drc);
+ uint32_t (*unisolate)(SpaprDrc *drc);
void (*release)(DeviceState *dev);
-} sPAPRDRConnectorClass;
-typedef struct sPAPRDRCPhysical {
+ int (*dt_populate)(SpaprDrc *drc, struct SpaprMachineState *spapr,
+ void *fdt, int *fdt_start_offset, Error **errp);
+} SpaprDrcClass;
+
+typedef struct SpaprDrcPhysical {
/*< private >*/
- sPAPRDRConnector parent;
+ SpaprDrc parent;
/* DR-indicator */
uint32_t dr_indicator;
-} sPAPRDRCPhysical;
+} SpaprDrcPhysical;
static inline bool spapr_drc_hotplugged(DeviceState *dev)
{
return dev->hotplugged && !runstate_check(RUN_STATE_INMIGRATE);
}
-void spapr_drc_reset(sPAPRDRConnector *drc);
+/* Returns true if an unplug request completed */
+bool spapr_drc_reset(SpaprDrc *drc);
-uint32_t spapr_drc_index(sPAPRDRConnector *drc);
-sPAPRDRConnectorType spapr_drc_type(sPAPRDRConnector *drc);
+uint32_t spapr_drc_index(SpaprDrc *drc);
+SpaprDrcType spapr_drc_type(SpaprDrc *drc);
-sPAPRDRConnector *spapr_dr_connector_new(Object *owner, const char *type,
+SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type,
uint32_t id);
-sPAPRDRConnector *spapr_drc_by_index(uint32_t index);
-sPAPRDRConnector *spapr_drc_by_id(const char *type, uint32_t id);
-int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
- uint32_t drc_type_mask);
+SpaprDrc *spapr_drc_by_index(uint32_t index);
+SpaprDrc *spapr_drc_by_id(const char *type, uint32_t id);
+int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask);
-void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
- int fdt_start_offset, Error **errp);
-void spapr_drc_detach(sPAPRDRConnector *drc);
-bool spapr_drc_needed(void *opaque);
+/*
+ * These functions respectively abort if called with a device already
+ * attached or no device attached. In the case of spapr_drc_attach(),
+ * this means that the attachability of the DRC *must* be checked
+ * beforehand (eg. check drc->dev at pre-plug).
+ */
+void spapr_drc_attach(SpaprDrc *drc, DeviceState *d);
+void spapr_drc_unplug_request(SpaprDrc *drc);
+
+/*
+ * Reset all DRCs, causing pending hot-plug/unplug requests to complete.
+ * Safely handles potential DRC removal (eg. PHBs or PCI bridges).
+ */
+void spapr_drc_reset_all(struct SpaprMachineState *spapr);
-static inline bool spapr_drc_unplug_requested(sPAPRDRConnector *drc)
+static inline bool spapr_drc_unplug_requested(SpaprDrc *drc)
{
return drc->unplug_requested;
}
diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h
index 14b02c3aca..4fd2d5853d 100644
--- a/include/hw/ppc/spapr_irq.h
+++ b/include/hw/ppc/spapr_irq.h
@@ -10,61 +10,120 @@
#ifndef HW_SPAPR_IRQ_H
#define HW_SPAPR_IRQ_H
+#include "target/ppc/cpu-qom.h"
+#include "qom/object.h"
+
/*
- * IRQ range offsets per device type
+ * The XIVE IRQ backend uses the same layout as the XICS backend but
+ * covers the full range of the IRQ number space. The IRQ numbers for
+ * the CPU IPIs are allocated at the bottom of this space, below 4K,
+ * to preserve compatibility with XICS which does not use that range.
+ */
+
+/*
+ * CPU IPI range (XIVE only)
*/
#define SPAPR_IRQ_IPI 0x0
-#define SPAPR_IRQ_EPOW 0x1000 /* XICS_IRQ_BASE offset */
-#define SPAPR_IRQ_HOTPLUG 0x1001
-#define SPAPR_IRQ_VIO 0x1100 /* 256 VIO devices */
-#define SPAPR_IRQ_PCI_LSI 0x1200 /* 32+ PHBs devices */
+#define SPAPR_IRQ_NR_IPIS 0x1000
+
+/*
+ * IRQ range offsets per device type
+ */
+
+#define SPAPR_XIRQ_BASE XICS_IRQ_BASE /* 0x1000 */
+#define SPAPR_IRQ_EPOW (SPAPR_XIRQ_BASE + 0x0000)
+#define SPAPR_IRQ_HOTPLUG (SPAPR_XIRQ_BASE + 0x0001)
+#define SPAPR_IRQ_VIO (SPAPR_XIRQ_BASE + 0x0100) /* 256 VIO devices */
+#define SPAPR_IRQ_PCI_LSI (SPAPR_XIRQ_BASE + 0x0200) /* 32+ PHBs devices */
+
+/* Offset of the dynamic range covered by the bitmap allocator */
+#define SPAPR_IRQ_MSI (SPAPR_XIRQ_BASE + 0x0300)
+
+#define SPAPR_NR_XIRQS 0x1000
-#define SPAPR_IRQ_MSI 0x1300 /* Offset of the dynamic range covered
- * by the bitmap allocator */
+struct SpaprMachineState;
-typedef struct sPAPRMachineState sPAPRMachineState;
+typedef struct SpaprInterruptController SpaprInterruptController;
-void spapr_irq_msi_init(sPAPRMachineState *spapr, uint32_t nr_msis);
-int spapr_irq_msi_alloc(sPAPRMachineState *spapr, uint32_t num, bool align,
+#define TYPE_SPAPR_INTC "spapr-interrupt-controller"
+#define SPAPR_INTC(obj) \
+ INTERFACE_CHECK(SpaprInterruptController, (obj), TYPE_SPAPR_INTC)
+typedef struct SpaprInterruptControllerClass SpaprInterruptControllerClass;
+DECLARE_CLASS_CHECKERS(SpaprInterruptControllerClass, SPAPR_INTC,
+ TYPE_SPAPR_INTC)
+
+struct SpaprInterruptControllerClass {
+ InterfaceClass parent;
+
+ int (*activate)(SpaprInterruptController *intc, uint32_t nr_servers,
+ Error **errp);
+ void (*deactivate)(SpaprInterruptController *intc);
+
+ /*
+ * These methods will typically be called on all intcs, active and
+ * inactive
+ */
+ int (*cpu_intc_create)(SpaprInterruptController *intc,
+ PowerPCCPU *cpu, Error **errp);
+ void (*cpu_intc_reset)(SpaprInterruptController *intc, PowerPCCPU *cpu);
+ void (*cpu_intc_destroy)(SpaprInterruptController *intc, PowerPCCPU *cpu);
+ int (*claim_irq)(SpaprInterruptController *intc, int irq, bool lsi,
+ Error **errp);
+ void (*free_irq)(SpaprInterruptController *intc, int irq);
+
+ /* These methods should only be called on the active intc */
+ void (*set_irq)(SpaprInterruptController *intc, int irq, int val);
+ void (*print_info)(SpaprInterruptController *intc, Monitor *mon);
+ void (*dt)(SpaprInterruptController *intc, uint32_t nr_servers,
+ void *fdt, uint32_t phandle);
+ int (*post_load)(SpaprInterruptController *intc, int version_id);
+};
+
+void spapr_irq_update_active_intc(struct SpaprMachineState *spapr);
+
+int spapr_irq_cpu_intc_create(struct SpaprMachineState *spapr,
+ PowerPCCPU *cpu, Error **errp);
+void spapr_irq_cpu_intc_reset(struct SpaprMachineState *spapr, PowerPCCPU *cpu);
+void spapr_irq_cpu_intc_destroy(struct SpaprMachineState *spapr, PowerPCCPU *cpu);
+void spapr_irq_print_info(struct SpaprMachineState *spapr, Monitor *mon);
+void spapr_irq_dt(struct SpaprMachineState *spapr, uint32_t nr_servers,
+ void *fdt, uint32_t phandle);
+
+uint32_t spapr_irq_nr_msis(struct SpaprMachineState *spapr);
+int spapr_irq_msi_alloc(struct SpaprMachineState *spapr, uint32_t num, bool align,
Error **errp);
-void spapr_irq_msi_free(sPAPRMachineState *spapr, int irq, uint32_t num);
-void spapr_irq_msi_reset(sPAPRMachineState *spapr);
-
-typedef struct sPAPRIrq {
- uint32_t nr_irqs;
- uint32_t nr_msis;
- uint8_t ov5;
-
- void (*init)(sPAPRMachineState *spapr, Error **errp);
- int (*claim)(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp);
- void (*free)(sPAPRMachineState *spapr, int irq, int num);
- qemu_irq (*qirq)(sPAPRMachineState *spapr, int irq);
- void (*print_info)(sPAPRMachineState *spapr, Monitor *mon);
- void (*dt_populate)(sPAPRMachineState *spapr, uint32_t nr_servers,
- void *fdt, uint32_t phandle);
- void (*cpu_intc_create)(sPAPRMachineState *spapr, PowerPCCPU *cpu,
- Error **errp);
- int (*post_load)(sPAPRMachineState *spapr, int version_id);
- void (*reset)(sPAPRMachineState *spapr, Error **errp);
- void (*set_irq)(void *opaque, int srcno, int val);
-} sPAPRIrq;
-
-extern sPAPRIrq spapr_irq_xics;
-extern sPAPRIrq spapr_irq_xics_legacy;
-extern sPAPRIrq spapr_irq_xive;
-extern sPAPRIrq spapr_irq_dual;
-
-void spapr_irq_init(sPAPRMachineState *spapr, Error **errp);
-int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp);
-void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num);
-qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq);
-int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id);
-void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp);
+void spapr_irq_msi_free(struct SpaprMachineState *spapr, int irq, uint32_t num);
+
+typedef struct SpaprIrq {
+ bool xics;
+ bool xive;
+} SpaprIrq;
+
+extern SpaprIrq spapr_irq_xics;
+extern SpaprIrq spapr_irq_xics_legacy;
+extern SpaprIrq spapr_irq_xive;
+extern SpaprIrq spapr_irq_dual;
+
+void spapr_irq_init(struct SpaprMachineState *spapr, Error **errp);
+int spapr_irq_claim(struct SpaprMachineState *spapr, int irq, bool lsi, Error **errp);
+void spapr_irq_free(struct SpaprMachineState *spapr, int irq, int num);
+qemu_irq spapr_qirq(struct SpaprMachineState *spapr, int irq);
+int spapr_irq_post_load(struct SpaprMachineState *spapr, int version_id);
+void spapr_irq_reset(struct SpaprMachineState *spapr, Error **errp);
+int spapr_irq_get_phandle(struct SpaprMachineState *spapr, void *fdt, Error **errp);
+
+typedef int (*SpaprInterruptControllerInitKvm)(SpaprInterruptController *,
+ uint32_t, Error **);
+
+int spapr_irq_init_kvm(SpaprInterruptControllerInitKvm fn,
+ SpaprInterruptController *intc,
+ uint32_t nr_servers,
+ Error **errp);
/*
* XICS legacy routines
*/
-int spapr_irq_find(sPAPRMachineState *spapr, int num, bool align, Error **errp);
+int spapr_irq_find(struct SpaprMachineState *spapr, int num, bool align, Error **errp);
#define spapr_irq_findone(spapr, errp) spapr_irq_find(spapr, 1, false, errp)
#endif
diff --git a/include/hw/ppc/spapr_nested.h b/include/hw/ppc/spapr_nested.h
new file mode 100644
index 0000000000..93ef14adcc
--- /dev/null
+++ b/include/hw/ppc/spapr_nested.h
@@ -0,0 +1,524 @@
+#ifndef HW_SPAPR_NESTED_H
+#define HW_SPAPR_NESTED_H
+
+#include "target/ppc/cpu.h"
+
+/* Guest State Buffer Element IDs */
+#define GSB_HV_VCPU_IGNORED_ID 0x0000 /* An element whose value is ignored */
+#define GSB_HV_VCPU_STATE_SIZE 0x0001 /* HV internal format VCPU state size */
+#define GSB_VCPU_OUT_BUF_MIN_SZ 0x0002 /* Min size of the Run VCPU o/p buffer */
+#define GSB_VCPU_LPVR 0x0003 /* Logical PVR */
+#define GSB_TB_OFFSET 0x0004 /* Timebase Offset */
+#define GSB_PART_SCOPED_PAGETBL 0x0005 /* Partition Scoped Page Table */
+#define GSB_PROCESS_TBL 0x0006 /* Process Table */
+ /* RESERVED 0x0007 - 0x0BFF */
+#define GSB_VCPU_IN_BUFFER 0x0C00 /* Run VCPU Input Buffer */
+#define GSB_VCPU_OUT_BUFFER 0x0C01 /* Run VCPU Out Buffer */
+#define GSB_VCPU_VPA 0x0C02 /* HRA to Guest VCPU VPA */
+ /* RESERVED 0x0C03 - 0x0FFF */
+#define GSB_VCPU_GPR0 0x1000
+#define GSB_VCPU_GPR1 0x1001
+#define GSB_VCPU_GPR2 0x1002
+#define GSB_VCPU_GPR3 0x1003
+#define GSB_VCPU_GPR4 0x1004
+#define GSB_VCPU_GPR5 0x1005
+#define GSB_VCPU_GPR6 0x1006
+#define GSB_VCPU_GPR7 0x1007
+#define GSB_VCPU_GPR8 0x1008
+#define GSB_VCPU_GPR9 0x1009
+#define GSB_VCPU_GPR10 0x100A
+#define GSB_VCPU_GPR11 0x100B
+#define GSB_VCPU_GPR12 0x100C
+#define GSB_VCPU_GPR13 0x100D
+#define GSB_VCPU_GPR14 0x100E
+#define GSB_VCPU_GPR15 0x100F
+#define GSB_VCPU_GPR16 0x1010
+#define GSB_VCPU_GPR17 0x1011
+#define GSB_VCPU_GPR18 0x1012
+#define GSB_VCPU_GPR19 0x1013
+#define GSB_VCPU_GPR20 0x1014
+#define GSB_VCPU_GPR21 0x1015
+#define GSB_VCPU_GPR22 0x1016
+#define GSB_VCPU_GPR23 0x1017
+#define GSB_VCPU_GPR24 0x1018
+#define GSB_VCPU_GPR25 0x1019
+#define GSB_VCPU_GPR26 0x101A
+#define GSB_VCPU_GPR27 0x101B
+#define GSB_VCPU_GPR28 0x101C
+#define GSB_VCPU_GPR29 0x101D
+#define GSB_VCPU_GPR30 0x101E
+#define GSB_VCPU_GPR31 0x101F
+#define GSB_VCPU_HDEC_EXPIRY_TB 0x1020
+#define GSB_VCPU_SPR_NIA 0x1021
+#define GSB_VCPU_SPR_MSR 0x1022
+#define GSB_VCPU_SPR_LR 0x1023
+#define GSB_VCPU_SPR_XER 0x1024
+#define GSB_VCPU_SPR_CTR 0x1025
+#define GSB_VCPU_SPR_CFAR 0x1026
+#define GSB_VCPU_SPR_SRR0 0x1027
+#define GSB_VCPU_SPR_SRR1 0x1028
+#define GSB_VCPU_SPR_DAR 0x1029
+#define GSB_VCPU_DEC_EXPIRE_TB 0x102A
+#define GSB_VCPU_SPR_VTB 0x102B
+#define GSB_VCPU_SPR_LPCR 0x102C
+#define GSB_VCPU_SPR_HFSCR 0x102D
+#define GSB_VCPU_SPR_FSCR 0x102E
+#define GSB_VCPU_SPR_FPSCR 0x102F
+#define GSB_VCPU_SPR_DAWR0 0x1030
+#define GSB_VCPU_SPR_DAWR1 0x1031
+#define GSB_VCPU_SPR_CIABR 0x1032
+#define GSB_VCPU_SPR_PURR 0x1033
+#define GSB_VCPU_SPR_SPURR 0x1034
+#define GSB_VCPU_SPR_IC 0x1035
+#define GSB_VCPU_SPR_SPRG0 0x1036
+#define GSB_VCPU_SPR_SPRG1 0x1037
+#define GSB_VCPU_SPR_SPRG2 0x1038
+#define GSB_VCPU_SPR_SPRG3 0x1039
+#define GSB_VCPU_SPR_PPR 0x103A
+#define GSB_VCPU_SPR_MMCR0 0x103B
+#define GSB_VCPU_SPR_MMCR1 0x103C
+#define GSB_VCPU_SPR_MMCR2 0x103D
+#define GSB_VCPU_SPR_MMCR3 0x103E
+#define GSB_VCPU_SPR_MMCRA 0x103F
+#define GSB_VCPU_SPR_SIER 0x1040
+#define GSB_VCPU_SPR_SIER2 0x1041
+#define GSB_VCPU_SPR_SIER3 0x1042
+#define GSB_VCPU_SPR_BESCR 0x1043
+#define GSB_VCPU_SPR_EBBHR 0x1044
+#define GSB_VCPU_SPR_EBBRR 0x1045
+#define GSB_VCPU_SPR_AMR 0x1046
+#define GSB_VCPU_SPR_IAMR 0x1047
+#define GSB_VCPU_SPR_AMOR 0x1048
+#define GSB_VCPU_SPR_UAMOR 0x1049
+#define GSB_VCPU_SPR_SDAR 0x104A
+#define GSB_VCPU_SPR_SIAR 0x104B
+#define GSB_VCPU_SPR_DSCR 0x104C
+#define GSB_VCPU_SPR_TAR 0x104D
+#define GSB_VCPU_SPR_DEXCR 0x104E
+#define GSB_VCPU_SPR_HDEXCR 0x104F
+#define GSB_VCPU_SPR_HASHKEYR 0x1050
+#define GSB_VCPU_SPR_HASHPKEYR 0x1051
+#define GSB_VCPU_SPR_CTRL 0x1052
+ /* RESERVED 0x1053 - 0x1FFF */
+#define GSB_VCPU_SPR_CR 0x2000
+#define GSB_VCPU_SPR_PIDR 0x2001
+#define GSB_VCPU_SPR_DSISR 0x2002
+#define GSB_VCPU_SPR_VSCR 0x2003
+#define GSB_VCPU_SPR_VRSAVE 0x2004
+#define GSB_VCPU_SPR_DAWRX0 0x2005
+#define GSB_VCPU_SPR_DAWRX1 0x2006
+#define GSB_VCPU_SPR_PMC1 0x2007
+#define GSB_VCPU_SPR_PMC2 0x2008
+#define GSB_VCPU_SPR_PMC3 0x2009
+#define GSB_VCPU_SPR_PMC4 0x200A
+#define GSB_VCPU_SPR_PMC5 0x200B
+#define GSB_VCPU_SPR_PMC6 0x200C
+#define GSB_VCPU_SPR_WORT 0x200D
+#define GSB_VCPU_SPR_PSPB 0x200E
+ /* RESERVED 0x200F - 0x2FFF */
+#define GSB_VCPU_SPR_VSR0 0x3000
+#define GSB_VCPU_SPR_VSR1 0x3001
+#define GSB_VCPU_SPR_VSR2 0x3002
+#define GSB_VCPU_SPR_VSR3 0x3003
+#define GSB_VCPU_SPR_VSR4 0x3004
+#define GSB_VCPU_SPR_VSR5 0x3005
+#define GSB_VCPU_SPR_VSR6 0x3006
+#define GSB_VCPU_SPR_VSR7 0x3007
+#define GSB_VCPU_SPR_VSR8 0x3008
+#define GSB_VCPU_SPR_VSR9 0x3009
+#define GSB_VCPU_SPR_VSR10 0x300A
+#define GSB_VCPU_SPR_VSR11 0x300B
+#define GSB_VCPU_SPR_VSR12 0x300C
+#define GSB_VCPU_SPR_VSR13 0x300D
+#define GSB_VCPU_SPR_VSR14 0x300E
+#define GSB_VCPU_SPR_VSR15 0x300F
+#define GSB_VCPU_SPR_VSR16 0x3010
+#define GSB_VCPU_SPR_VSR17 0x3011
+#define GSB_VCPU_SPR_VSR18 0x3012
+#define GSB_VCPU_SPR_VSR19 0x3013
+#define GSB_VCPU_SPR_VSR20 0x3014
+#define GSB_VCPU_SPR_VSR21 0x3015
+#define GSB_VCPU_SPR_VSR22 0x3016
+#define GSB_VCPU_SPR_VSR23 0x3017
+#define GSB_VCPU_SPR_VSR24 0x3018
+#define GSB_VCPU_SPR_VSR25 0x3019
+#define GSB_VCPU_SPR_VSR26 0x301A
+#define GSB_VCPU_SPR_VSR27 0x301B
+#define GSB_VCPU_SPR_VSR28 0x301C
+#define GSB_VCPU_SPR_VSR29 0x301D
+#define GSB_VCPU_SPR_VSR30 0x301E
+#define GSB_VCPU_SPR_VSR31 0x301F
+#define GSB_VCPU_SPR_VSR32 0x3020
+#define GSB_VCPU_SPR_VSR33 0x3021
+#define GSB_VCPU_SPR_VSR34 0x3022
+#define GSB_VCPU_SPR_VSR35 0x3023
+#define GSB_VCPU_SPR_VSR36 0x3024
+#define GSB_VCPU_SPR_VSR37 0x3025
+#define GSB_VCPU_SPR_VSR38 0x3026
+#define GSB_VCPU_SPR_VSR39 0x3027
+#define GSB_VCPU_SPR_VSR40 0x3028
+#define GSB_VCPU_SPR_VSR41 0x3029
+#define GSB_VCPU_SPR_VSR42 0x302A
+#define GSB_VCPU_SPR_VSR43 0x302B
+#define GSB_VCPU_SPR_VSR44 0x302C
+#define GSB_VCPU_SPR_VSR45 0x302D
+#define GSB_VCPU_SPR_VSR46 0x302E
+#define GSB_VCPU_SPR_VSR47 0x302F
+#define GSB_VCPU_SPR_VSR48 0x3030
+#define GSB_VCPU_SPR_VSR49 0x3031
+#define GSB_VCPU_SPR_VSR50 0x3032
+#define GSB_VCPU_SPR_VSR51 0x3033
+#define GSB_VCPU_SPR_VSR52 0x3034
+#define GSB_VCPU_SPR_VSR53 0x3035
+#define GSB_VCPU_SPR_VSR54 0x3036
+#define GSB_VCPU_SPR_VSR55 0x3037
+#define GSB_VCPU_SPR_VSR56 0x3038
+#define GSB_VCPU_SPR_VSR57 0x3039
+#define GSB_VCPU_SPR_VSR58 0x303A
+#define GSB_VCPU_SPR_VSR59 0x303B
+#define GSB_VCPU_SPR_VSR60 0x303C
+#define GSB_VCPU_SPR_VSR61 0x303D
+#define GSB_VCPU_SPR_VSR62 0x303E
+#define GSB_VCPU_SPR_VSR63 0x303F
+ /* RESERVED 0x3040 - 0xEFFF */
+#define GSB_VCPU_SPR_HDAR 0xF000
+#define GSB_VCPU_SPR_HDSISR 0xF001
+#define GSB_VCPU_SPR_HEIR 0xF002
+#define GSB_VCPU_SPR_ASDR 0xF003
+/* End of list of Guest State Buffer Element IDs */
+#define GSB_LAST GSB_VCPU_SPR_ASDR
+
+typedef struct SpaprMachineStateNested {
+ uint64_t ptcr;
+ uint8_t api;
+#define NESTED_API_KVM_HV 1
+#define NESTED_API_PAPR 2
+ bool capabilities_set;
+ uint32_t pvr_base;
+ GHashTable *guests;
+} SpaprMachineStateNested;
+
+typedef struct SpaprMachineStateNestedGuest {
+ uint32_t pvr_logical;
+ unsigned long nr_vcpus;
+ uint64_t parttbl[2];
+ uint64_t tb_offset;
+ struct SpaprMachineStateNestedGuestVcpu *vcpus;
+} SpaprMachineStateNestedGuest;
+
+/* Nested PAPR API related macros */
+#define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000
+#define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000
+#define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000
+#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \
+ H_GUEST_CAPABILITIES_P9_MODE)
+#define H_GUEST_CAP_COPY_MEM_BMAP 0
+#define H_GUEST_CAP_P9_MODE_BMAP 1
+#define H_GUEST_CAP_P10_MODE_BMAP 2
+#define PAPR_NESTED_GUEST_MAX 4096
+#define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL
+#define PAPR_NESTED_GUEST_VCPU_MAX 2048
+#define VCPU_OUT_BUF_MIN_SZ 0x80ULL
+#define HVMASK_DEFAULT 0xffffffffffffffff
+#define HVMASK_LPCR 0x0070000003820800
+#define HVMASK_MSR 0xEBFFFFFFFFBFEFFF
+#define HVMASK_HDEXCR 0x00000000FFFFFFFF
+#define HVMASK_TB_OFFSET 0x000000FFFFFFFFFF
+#define GSB_MAX_BUF_SIZE (1024 * 1024)
+#define H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE 0x8000000000000000
+#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1
+#define GUEST_STATE_REQUEST_SET 0x2
+
+/*
+ * As per ISA v3.1B, following bits are reserved:
+ * 0:2
+ * 4:57 (ISA mentions bit 58 as well but it should be used for P10)
+ * 61:63 (hence, haven't included PCR bits for v2.06 and v2.05
+ * in LOW BITS)
+ */
+#define PCR_LOW_BITS (PCR_COMPAT_3_10 | PCR_COMPAT_3_00)
+#define HVMASK_PCR (~PCR_LOW_BITS)
+
+#define GUEST_STATE_ELEMENT(i, sz, s, f, ptr, c) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = ptr, \
+ .offset = offsetof(struct s, f), \
+ .copy = (c) \
+}
+
+#define GSBE_NESTED(i, sz, f, c) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = get_guest_ptr, \
+ .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\
+ .copy = (c), \
+ .mask = HVMASK_DEFAULT \
+}
+
+#define GSBE_NESTED_MSK(i, sz, f, c, m) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = get_guest_ptr, \
+ .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\
+ .copy = (c), \
+ .mask = (m) \
+}
+
+#define GSBE_NESTED_VCPU(i, sz, f, c) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = get_vcpu_ptr, \
+ .offset = offsetof(struct SpaprMachineStateNestedGuestVcpu, f),\
+ .copy = (c), \
+ .mask = HVMASK_DEFAULT \
+}
+
+#define GUEST_STATE_ELEMENT_NOP(i, sz) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = NULL, \
+ .offset = 0, \
+ .copy = NULL, \
+ .mask = HVMASK_DEFAULT \
+}
+
+#define GUEST_STATE_ELEMENT_NOP_DW(i) \
+ GUEST_STATE_ELEMENT_NOP(i, 8)
+#define GUEST_STATE_ELEMENT_NOP_W(i) \
+ GUEST_STATE_ELEMENT_NOP(i, 4)
+
+#define GUEST_STATE_ELEMENT_BASE(i, s, c) { \
+ .id = (i), \
+ .size = (s), \
+ .location = get_vcpu_state_ptr, \
+ .offset = 0, \
+ .copy = (c), \
+ .mask = HVMASK_DEFAULT \
+ }
+
+#define GUEST_STATE_ELEMENT_OFF(i, s, f, c) { \
+ .id = (i), \
+ .size = (s), \
+ .location = get_vcpu_state_ptr, \
+ .offset = offsetof(struct nested_ppc_state, f), \
+ .copy = (c), \
+ .mask = HVMASK_DEFAULT \
+ }
+
+#define GUEST_STATE_ELEMENT_MSK(i, s, f, c, m) { \
+ .id = (i), \
+ .size = (s), \
+ .location = get_vcpu_state_ptr, \
+ .offset = offsetof(struct nested_ppc_state, f), \
+ .copy = (c), \
+ .mask = (m) \
+ }
+
+#define GUEST_STATE_ELEMENT_ENV_QW(i, f) \
+ GUEST_STATE_ELEMENT_OFF(i, 16, f, copy_state_16to16)
+#define GUEST_STATE_ELEMENT_ENV_DW(i, f) \
+ GUEST_STATE_ELEMENT_OFF(i, 8, f, copy_state_8to8)
+#define GUEST_STATE_ELEMENT_ENV_W(i, f) \
+ GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to8)
+#define GUEST_STATE_ELEMENT_ENV_WW(i, f) \
+ GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to4)
+#define GSE_ENV_DWM(i, f, m) \
+ GUEST_STATE_ELEMENT_MSK(i, 8, f, copy_state_8to8, m)
+
+struct guest_state_element {
+ uint16_t id;
+ uint16_t size;
+ uint8_t value[];
+} QEMU_PACKED;
+
+struct guest_state_buffer {
+ uint32_t num_elements;
+ struct guest_state_element elements[];
+} QEMU_PACKED;
+
+/* Actual buffer plus some metadata about the request */
+struct guest_state_request {
+ struct guest_state_buffer *gsb;
+ int64_t buf;
+ int64_t len;
+ uint16_t flags;
+};
+
+/*
+ * Register state for entering a nested guest with H_ENTER_NESTED.
+ * New member must be added at the end.
+ */
+struct kvmppc_hv_guest_state {
+ uint64_t version; /* version of this structure layout, must be first */
+ uint32_t lpid;
+ uint32_t vcpu_token;
+ /* These registers are hypervisor privileged (at least for writing) */
+ uint64_t lpcr;
+ uint64_t pcr;
+ uint64_t amor;
+ uint64_t dpdes;
+ uint64_t hfscr;
+ int64_t tb_offset;
+ uint64_t dawr0;
+ uint64_t dawrx0;
+ uint64_t ciabr;
+ uint64_t hdec_expiry;
+ uint64_t purr;
+ uint64_t spurr;
+ uint64_t ic;
+ uint64_t vtb;
+ uint64_t hdar;
+ uint64_t hdsisr;
+ uint64_t heir;
+ uint64_t asdr;
+ /* These are OS privileged but need to be set late in guest entry */
+ uint64_t srr0;
+ uint64_t srr1;
+ uint64_t sprg[4];
+ uint64_t pidr;
+ uint64_t cfar;
+ uint64_t ppr;
+ /* Version 1 ends here */
+ uint64_t dawr1;
+ uint64_t dawrx1;
+ /* Version 2 ends here */
+};
+
+/* Latest version of hv_guest_state structure */
+#define HV_GUEST_STATE_VERSION 2
+
+/* Linux 64-bit powerpc pt_regs struct, used by nested HV */
+struct kvmppc_pt_regs {
+ uint64_t gpr[32];
+ uint64_t nip;
+ uint64_t msr;
+ uint64_t orig_gpr3; /* Used for restarting system calls */
+ uint64_t ctr;
+ uint64_t link;
+ uint64_t xer;
+ uint64_t ccr;
+ uint64_t softe; /* Soft enabled/disabled */
+ uint64_t trap; /* Reason for being here */
+ uint64_t dar; /* Fault registers */
+ uint64_t dsisr; /* on 4xx/Book-E used for ESR */
+ uint64_t result; /* Result of a system call */
+};
+
+/*
+ * nested_ppc_state is used to save the host CPU state before switching it to
+ * the guest CPU state, to be restored on H_ENTER_NESTED exit.
+ */
+struct nested_ppc_state {
+ uint64_t gpr[32];
+ uint64_t lr;
+ uint64_t ctr;
+ uint64_t cfar;
+ uint64_t msr;
+ uint64_t nip;
+ uint32_t cr;
+
+ uint64_t xer;
+
+ uint64_t lpcr;
+ uint64_t lpidr;
+ uint64_t pidr;
+ uint64_t pcr;
+ uint64_t dpdes;
+ uint64_t hfscr;
+ uint64_t srr0;
+ uint64_t srr1;
+ uint64_t sprg0;
+ uint64_t sprg1;
+ uint64_t sprg2;
+ uint64_t sprg3;
+ uint64_t ppr;
+
+ int64_t tb_offset;
+ /* Nested PAPR API */
+ uint64_t amor;
+ uint64_t dawr0;
+ uint64_t dawrx0;
+ uint64_t ciabr;
+ uint64_t purr;
+ uint64_t spurr;
+ uint64_t ic;
+ uint64_t vtb;
+ uint64_t hdar;
+ uint64_t hdsisr;
+ uint64_t heir;
+ uint64_t asdr;
+ uint64_t dawr1;
+ uint64_t dawrx1;
+ uint64_t dexcr;
+ uint64_t hdexcr;
+ uint64_t hashkeyr;
+ uint64_t hashpkeyr;
+ ppc_vsr_t vsr[64] QEMU_ALIGNED(16);
+ uint64_t ebbhr;
+ uint64_t tar;
+ uint64_t ebbrr;
+ uint64_t bescr;
+ uint64_t iamr;
+ uint64_t amr;
+ uint64_t uamor;
+ uint64_t dscr;
+ uint64_t fscr;
+ uint64_t pspb;
+ uint64_t ctrl;
+ uint64_t vrsave;
+ uint64_t dar;
+ uint64_t dsisr;
+ uint64_t pmc1;
+ uint64_t pmc2;
+ uint64_t pmc3;
+ uint64_t pmc4;
+ uint64_t pmc5;
+ uint64_t pmc6;
+ uint64_t mmcr0;
+ uint64_t mmcr1;
+ uint64_t mmcr2;
+ uint64_t mmcra;
+ uint64_t sdar;
+ uint64_t siar;
+ uint64_t sier;
+ uint32_t vscr;
+ uint64_t fpscr;
+ int64_t dec_expiry_tb;
+};
+
+struct SpaprMachineStateNestedGuestVcpuRunBuf {
+ uint64_t addr;
+ uint64_t size;
+};
+
+typedef struct SpaprMachineStateNestedGuestVcpu {
+ bool enabled;
+ struct nested_ppc_state state;
+ struct SpaprMachineStateNestedGuestVcpuRunBuf runbufin;
+ struct SpaprMachineStateNestedGuestVcpuRunBuf runbufout;
+ int64_t tb_offset;
+ uint64_t hdecr_expiry_tb;
+} SpaprMachineStateNestedGuestVcpu;
+
+struct guest_state_element_type {
+ uint16_t id;
+ int size;
+#define GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE 0x1
+#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x2
+ uint16_t flags;
+ void *(*location)(SpaprMachineStateNestedGuest *, target_ulong);
+ size_t offset;
+ void (*copy)(void *, void *, bool);
+ uint64_t mask;
+};
+
+void spapr_exit_nested(PowerPCCPU *cpu, int excp);
+typedef struct SpaprMachineState SpaprMachineState;
+bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu,
+ target_ulong lpid, ppc_v3_pate_t *entry);
+uint8_t spapr_nested_api(SpaprMachineState *spapr);
+void spapr_nested_gsb_init(void);
+bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu,
+ target_ulong lpid, ppc_v3_pate_t *entry);
+#endif /* HW_SPAPR_NESTED_H */
diff --git a/include/hw/ppc/spapr_numa.h b/include/hw/ppc/spapr_numa.h
new file mode 100644
index 0000000000..7cb3367400
--- /dev/null
+++ b/include/hw/ppc/spapr_numa.h
@@ -0,0 +1,37 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition NUMA associativity handling
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Authors:
+ * Daniel Henrique Barboza <danielhb413@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_SPAPR_NUMA_H
+#define HW_SPAPR_NUMA_H
+
+#include "hw/boards.h"
+#include "hw/ppc/spapr.h"
+
+/*
+ * Having both SpaprMachineState and MachineState as arguments
+ * feels odd, but it will spare a MACHINE() call inside the
+ * function. spapr_machine_init() is the only caller for it, and
+ * it has both pointers resolved already.
+ */
+void spapr_numa_associativity_init(SpaprMachineState *spapr,
+ MachineState *machine);
+void spapr_numa_associativity_check(SpaprMachineState *spapr);
+void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas);
+void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
+ int offset, int nodeid);
+int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
+ int offset, PowerPCCPU *cpu);
+int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
+ int offset);
+unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine);
+
+#endif /* HW_SPAPR_NUMA_H */
diff --git a/include/hw/ppc/spapr_nvdimm.h b/include/hw/ppc/spapr_nvdimm.h
new file mode 100644
index 0000000000..e9436cb6ef
--- /dev/null
+++ b/include/hw/ppc/spapr_nvdimm.h
@@ -0,0 +1,26 @@
+/*
+ * QEMU PowerPC PAPR SCM backend definitions
+ *
+ * Copyright (c) 2020, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef HW_SPAPR_NVDIMM_H
+#define HW_SPAPR_NVDIMM_H
+
+#include "hw/mem/nvdimm.h"
+
+typedef struct SpaprDrc SpaprDrc;
+typedef struct SpaprMachineState SpaprMachineState;
+
+int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
+ void *fdt, int *fdt_start_offset, Error **errp);
+void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt);
+bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
+ uint64_t size, Error **errp);
+void spapr_add_nvdimm(DeviceState *dev, uint64_t slot);
+void spapr_nvdimm_finish_flushes(void);
+
+#endif
diff --git a/include/hw/ppc/spapr_ovec.h b/include/hw/ppc/spapr_ovec.h
index 0f2d8d715d..c3e8b98e7e 100644
--- a/include/hw/ppc/spapr_ovec.h
+++ b/include/hw/ppc/spapr_ovec.h
@@ -33,13 +33,13 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
-#ifndef _SPAPR_OVEC_H
-#define _SPAPR_OVEC_H
+
+#ifndef SPAPR_OVEC_H
+#define SPAPR_OVEC_H
#include "cpu.h"
-#include "migration/vmstate.h"
-typedef struct sPAPROptionVector sPAPROptionVector;
+typedef struct SpaprOptionVector SpaprOptionVector;
#define OV_BIT(byte, bit) ((byte - 1) * BITS_PER_BYTE + bit)
@@ -49,6 +49,7 @@ typedef struct sPAPROptionVector sPAPROptionVector;
/* option vector 5 */
#define OV5_DRCONF_MEMORY OV_BIT(2, 2)
#define OV5_FORM1_AFFINITY OV_BIT(5, 0)
+#define OV5_FORM2_AFFINITY OV_BIT(5, 2)
#define OV5_HP_EVT OV_BIT(6, 5)
#define OV5_HPT_RESIZE OV_BIT(6, 7)
#define OV5_DRMEM_V2 OV_BIT(22, 0)
@@ -61,23 +62,22 @@ typedef struct sPAPROptionVector sPAPROptionVector;
#define OV5_MMU_RADIX_GTSE OV_BIT(26, 1) /* Radix GTSE */
/* interfaces */
-sPAPROptionVector *spapr_ovec_new(void);
-sPAPROptionVector *spapr_ovec_clone(sPAPROptionVector *ov_orig);
-void spapr_ovec_intersect(sPAPROptionVector *ov,
- sPAPROptionVector *ov1,
- sPAPROptionVector *ov2);
-bool spapr_ovec_diff(sPAPROptionVector *ov,
- sPAPROptionVector *ov_old,
- sPAPROptionVector *ov_new);
-void spapr_ovec_cleanup(sPAPROptionVector *ov);
-void spapr_ovec_set(sPAPROptionVector *ov, long bitnr);
-void spapr_ovec_clear(sPAPROptionVector *ov, long bitnr);
-bool spapr_ovec_test(sPAPROptionVector *ov, long bitnr);
-sPAPROptionVector *spapr_ovec_parse_vector(target_ulong table_addr, int vector);
-int spapr_ovec_populate_dt(void *fdt, int fdt_offset,
- sPAPROptionVector *ov, const char *name);
+SpaprOptionVector *spapr_ovec_new(void);
+SpaprOptionVector *spapr_ovec_clone(SpaprOptionVector *ov_orig);
+void spapr_ovec_intersect(SpaprOptionVector *ov,
+ SpaprOptionVector *ov1,
+ SpaprOptionVector *ov2);
+bool spapr_ovec_subset(SpaprOptionVector *ov1, SpaprOptionVector *ov2);
+void spapr_ovec_cleanup(SpaprOptionVector *ov);
+void spapr_ovec_set(SpaprOptionVector *ov, long bitnr);
+void spapr_ovec_clear(SpaprOptionVector *ov, long bitnr);
+bool spapr_ovec_test(SpaprOptionVector *ov, long bitnr);
+bool spapr_ovec_empty(SpaprOptionVector *ov);
+SpaprOptionVector *spapr_ovec_parse_vector(target_ulong table_addr, int vector);
+int spapr_dt_ovec(void *fdt, int fdt_offset,
+ SpaprOptionVector *ov, const char *name);
/* migration */
extern const VMStateDescription vmstate_spapr_ovec;
-#endif /* !defined (_SPAPR_OVEC_H) */
+#endif /* SPAPR_OVEC_H */
diff --git a/include/hw/ppc/spapr_rtas.h b/include/hw/ppc/spapr_rtas.h
deleted file mode 100644
index 383611f10f..0000000000
--- a/include/hw/ppc/spapr_rtas.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef HW_SPAPR_RTAS_H
-#define HW_SPAPR_RTAS_H
-/*
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-uint64_t qtest_rtas_call(char *cmd, uint32_t nargs, uint64_t args,
- uint32_t nret, uint64_t rets);
-#endif /* HW_SPAPR_RTAS_H */
diff --git a/include/hw/ppc/spapr_tpm_proxy.h b/include/hw/ppc/spapr_tpm_proxy.h
new file mode 100644
index 0000000000..96d2a9697e
--- /dev/null
+++ b/include/hw/ppc/spapr_tpm_proxy.h
@@ -0,0 +1,30 @@
+/*
+ * SPAPR TPM Proxy/Hypercall
+ *
+ * Copyright IBM Corp. 2019
+ *
+ * Authors:
+ * Michael Roth <mdroth@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_SPAPR_TPM_PROXY_H
+#define HW_SPAPR_TPM_PROXY_H
+
+#include "qom/object.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_SPAPR_TPM_PROXY "spapr-tpm-proxy"
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprTpmProxy, SPAPR_TPM_PROXY)
+
+struct SpaprTpmProxy {
+ /*< private >*/
+ DeviceState parent;
+
+ char *host_path;
+ int host_fd;
+};
+
+#endif /* HW_SPAPR_TPM_PROXY_H */
diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
index e8b006d18f..7eae1a4847 100644
--- a/include/hw/ppc/spapr_vio.h
+++ b/include/hw/ppc/spapr_vio.h
@@ -11,7 +11,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,122 +22,132 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "hw/ppc/spapr.h"
#include "sysemu/dma.h"
+#include "hw/irq.h"
+#include "qom/object.h"
#define TYPE_VIO_SPAPR_DEVICE "vio-spapr-device"
-#define VIO_SPAPR_DEVICE(obj) \
- OBJECT_CHECK(VIOsPAPRDevice, (obj), TYPE_VIO_SPAPR_DEVICE)
-#define VIO_SPAPR_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VIOsPAPRDeviceClass, (klass), TYPE_VIO_SPAPR_DEVICE)
-#define VIO_SPAPR_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VIOsPAPRDeviceClass, (obj), TYPE_VIO_SPAPR_DEVICE)
+OBJECT_DECLARE_TYPE(SpaprVioDevice, SpaprVioDeviceClass,
+ VIO_SPAPR_DEVICE)
#define TYPE_SPAPR_VIO_BUS "spapr-vio-bus"
-#define SPAPR_VIO_BUS(obj) OBJECT_CHECK(VIOsPAPRBus, (obj), TYPE_SPAPR_VIO_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprVioBus, SPAPR_VIO_BUS)
#define TYPE_SPAPR_VIO_BRIDGE "spapr-vio-bridge"
-typedef struct VIOsPAPR_CRQ {
+typedef struct SpaprVioCrq {
uint64_t qladdr;
uint32_t qsize;
uint32_t qnext;
- int(*SendFunc)(struct VIOsPAPRDevice *vdev, uint8_t *crq);
-} VIOsPAPR_CRQ;
+ int(*SendFunc)(struct SpaprVioDevice *vdev, uint8_t *crq);
+} SpaprVioCrq;
-typedef struct VIOsPAPRDevice VIOsPAPRDevice;
-typedef struct VIOsPAPRBus VIOsPAPRBus;
-typedef struct VIOsPAPRDeviceClass {
+struct SpaprVioDeviceClass {
DeviceClass parent_class;
const char *dt_name, *dt_type, *dt_compatible;
target_ulong signal_mask;
uint32_t rtce_window_size;
- void (*realize)(VIOsPAPRDevice *dev, Error **errp);
- void (*reset)(VIOsPAPRDevice *dev);
- int (*devnode)(VIOsPAPRDevice *dev, void *fdt, int node_off);
-} VIOsPAPRDeviceClass;
+ void (*realize)(SpaprVioDevice *dev, Error **errp);
+ void (*reset)(SpaprVioDevice *dev);
+ int (*devnode)(SpaprVioDevice *dev, void *fdt, int node_off);
+ const char *(*get_dt_compatible)(SpaprVioDevice *dev);
+};
-struct VIOsPAPRDevice {
+struct SpaprVioDevice {
DeviceState qdev;
uint32_t reg;
uint32_t irq;
uint64_t signal_state;
- VIOsPAPR_CRQ crq;
+ SpaprVioCrq crq;
AddressSpace as;
MemoryRegion mrroot;
MemoryRegion mrbypass;
- sPAPRTCETable *tcet;
+ SpaprTceTable *tcet;
};
#define DEFINE_SPAPR_PROPERTIES(type, field) \
DEFINE_PROP_UINT32("reg", type, field.reg, -1)
-struct VIOsPAPRBus {
+struct SpaprVioBus {
BusState bus;
uint32_t next_reg;
};
-extern VIOsPAPRBus *spapr_vio_bus_init(void);
-extern VIOsPAPRDevice *spapr_vio_find_by_reg(VIOsPAPRBus *bus, uint32_t reg);
-void spapr_dt_vdevice(VIOsPAPRBus *bus, void *fdt);
-extern gchar *spapr_vio_stdout_path(VIOsPAPRBus *bus);
+SpaprVioBus *spapr_vio_bus_init(void);
+SpaprVioDevice *spapr_vio_find_by_reg(SpaprVioBus *bus, uint32_t reg);
+void spapr_dt_vdevice(SpaprVioBus *bus, void *fdt);
+gchar *spapr_vio_stdout_path(SpaprVioBus *bus);
-static inline qemu_irq spapr_vio_qirq(VIOsPAPRDevice *dev)
+static inline void spapr_vio_irq_pulse(SpaprVioDevice *dev)
{
- sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
- return spapr_qirq(spapr, dev->irq);
+ qemu_irq_pulse(spapr_qirq(spapr, dev->irq));
}
-static inline bool spapr_vio_dma_valid(VIOsPAPRDevice *dev, uint64_t taddr,
+static inline bool spapr_vio_dma_valid(SpaprVioDevice *dev, uint64_t taddr,
uint32_t size, DMADirection dir)
{
- return dma_memory_valid(&dev->as, taddr, size, dir);
+ return dma_memory_valid(&dev->as, taddr, size, dir, MEMTXATTRS_UNSPECIFIED);
}
-static inline int spapr_vio_dma_read(VIOsPAPRDevice *dev, uint64_t taddr,
+static inline int spapr_vio_dma_read(SpaprVioDevice *dev, uint64_t taddr,
void *buf, uint32_t size)
{
- return (dma_memory_read(&dev->as, taddr, buf, size) != 0) ?
+ return (dma_memory_read(&dev->as, taddr,
+ buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
H_DEST_PARM : H_SUCCESS;
}
-static inline int spapr_vio_dma_write(VIOsPAPRDevice *dev, uint64_t taddr,
+static inline int spapr_vio_dma_write(SpaprVioDevice *dev, uint64_t taddr,
const void *buf, uint32_t size)
{
- return (dma_memory_write(&dev->as, taddr, buf, size) != 0) ?
+ return (dma_memory_write(&dev->as, taddr,
+ buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
H_DEST_PARM : H_SUCCESS;
}
-static inline int spapr_vio_dma_set(VIOsPAPRDevice *dev, uint64_t taddr,
+static inline int spapr_vio_dma_set(SpaprVioDevice *dev, uint64_t taddr,
uint8_t c, uint32_t size)
{
- return (dma_memory_set(&dev->as, taddr, c, size) != 0) ?
+ return (dma_memory_set(&dev->as, taddr,
+ c, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
H_DEST_PARM : H_SUCCESS;
}
-#define vio_stb(_dev, _addr, _val) (stb_dma(&(_dev)->as, (_addr), (_val)))
-#define vio_sth(_dev, _addr, _val) (stw_be_dma(&(_dev)->as, (_addr), (_val)))
-#define vio_stl(_dev, _addr, _val) (stl_be_dma(&(_dev)->as, (_addr), (_val)))
-#define vio_stq(_dev, _addr, _val) (stq_be_dma(&(_dev)->as, (_addr), (_val)))
-#define vio_ldq(_dev, _addr) (ldq_be_dma(&(_dev)->as, (_addr)))
-
-int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq);
-
-VIOsPAPRDevice *vty_lookup(sPAPRMachineState *spapr, target_ulong reg);
-void vty_putchars(VIOsPAPRDevice *sdev, uint8_t *buf, int len);
-void spapr_vty_create(VIOsPAPRBus *bus, Chardev *chardev);
-void spapr_vlan_create(VIOsPAPRBus *bus, NICInfo *nd);
-void spapr_vscsi_create(VIOsPAPRBus *bus);
-
-VIOsPAPRDevice *spapr_vty_get_default(VIOsPAPRBus *bus);
+#define vio_stb(_dev, _addr, _val) \
+ (stb_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+#define vio_sth(_dev, _addr, _val) \
+ (stw_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+#define vio_stl(_dev, _addr, _val) \
+ (stl_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+#define vio_stq(_dev, _addr, _val) \
+ (stq_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+#define vio_ldq(_dev, _addr) \
+ ({ \
+ uint64_t _val; \
+ ldq_be_dma(&(_dev)->as, (_addr), &_val, MEMTXATTRS_UNSPECIFIED); \
+ _val; \
+ })
+
+int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq);
+
+SpaprVioDevice *vty_lookup(SpaprMachineState *spapr, target_ulong reg);
+void vty_putchars(SpaprVioDevice *sdev, uint8_t *buf, int len);
+void spapr_vty_create(SpaprVioBus *bus, Chardev *chardev);
+void spapr_vlan_create(SpaprVioBus *bus, NICInfo *nd);
+void spapr_vscsi_create(SpaprVioBus *bus);
+
+SpaprVioDevice *spapr_vty_get_default(SpaprVioBus *bus);
extern const VMStateDescription vmstate_spapr_vio;
#define VMSTATE_SPAPR_VIO(_f, _s) \
- VMSTATE_STRUCT(_f, _s, 0, vmstate_spapr_vio, VIOsPAPRDevice)
+ VMSTATE_STRUCT(_f, _s, 0, vmstate_spapr_vio, SpaprVioDevice)
-void spapr_vio_set_bypass(VIOsPAPRDevice *dev, bool bypass);
+void spapr_vio_set_bypass(SpaprVioDevice *dev, bool bypass);
#endif /* HW_SPAPR_VIO_H */
diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h
index 7fdc250574..b282960ad9 100644
--- a/include/hw/ppc/spapr_xive.h
+++ b/include/hw/ppc/spapr_xive.h
@@ -10,12 +10,17 @@
#ifndef PPC_SPAPR_XIVE_H
#define PPC_SPAPR_XIVE_H
+#include "hw/ppc/spapr_irq.h"
#include "hw/ppc/xive.h"
#define TYPE_SPAPR_XIVE "spapr-xive"
-#define SPAPR_XIVE(obj) OBJECT_CHECK(sPAPRXive, (obj), TYPE_SPAPR_XIVE)
+#define SPAPR_XIVE(obj) OBJECT_CHECK(SpaprXive, (obj), TYPE_SPAPR_XIVE)
+#define SPAPR_XIVE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SpaprXiveClass, (klass), TYPE_SPAPR_XIVE)
+#define SPAPR_XIVE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SpaprXiveClass, (obj), TYPE_SPAPR_XIVE)
-typedef struct sPAPRXive {
+typedef struct SpaprXive {
XiveRouter parent;
/* Internal interrupt source for IPIs and virtual devices */
@@ -26,6 +31,9 @@ typedef struct sPAPRXive {
XiveENDSource end_source;
hwaddr end_base;
+ /* DT */
+ gchar *nodename;
+
/* Routing table */
XiveEAS *eat;
uint32_t nr_irqs;
@@ -35,18 +43,57 @@ typedef struct sPAPRXive {
/* TIMA mapping address */
hwaddr tm_base;
MemoryRegion tm_mmio;
-} sPAPRXive;
-bool spapr_xive_irq_claim(sPAPRXive *xive, uint32_t lisn, bool lsi);
-bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn);
-void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon);
+ /* KVM support */
+ int fd;
+ void *tm_mmap;
+ MemoryRegion tm_mmio_kvm;
+ VMChangeStateEntry *change;
+
+ uint8_t hv_prio;
+} SpaprXive;
+
+typedef struct SpaprXiveClass {
+ XiveRouterClass parent;
+
+ DeviceRealize parent_realize;
+} SpaprXiveClass;
+
+/*
+ * The sPAPR machine has a unique XIVE IC device. Assign a fixed value
+ * to the controller block id value. It can nevertheless be changed
+ * for testing purpose.
+ */
+#define SPAPR_XIVE_BLOCK_ID 0x0
+
+struct SpaprMachineState;
+void spapr_xive_hcall_init(struct SpaprMachineState *spapr);
+void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable);
+void spapr_xive_map_mmio(SpaprXive *xive);
-typedef struct sPAPRMachineState sPAPRMachineState;
+int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
+ uint32_t *out_server, uint8_t *out_prio);
-void spapr_xive_hcall_init(sPAPRMachineState *spapr);
-void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt,
- uint32_t phandle);
-void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx);
-void spapr_xive_mmio_set_enabled(sPAPRXive *xive, bool enable);
+/*
+ * KVM XIVE device helpers
+ */
+int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
+ Error **errp);
+void kvmppc_xive_disconnect(SpaprInterruptController *intc);
+void kvmppc_xive_reset(SpaprXive *xive, Error **errp);
+int kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
+ Error **errp);
+void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp);
+uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
+ uint64_t data, bool write);
+int kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
+ uint32_t end_idx, XiveEND *end,
+ Error **errp);
+int kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
+ uint32_t end_idx, XiveEND *end,
+ Error **errp);
+void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp);
+int kvmppc_xive_pre_save(SpaprXive *xive);
+int kvmppc_xive_post_load(SpaprXive *xive, int version_id);
#endif /* PPC_SPAPR_XIVE_H */
diff --git a/include/hw/ppc/vof.h b/include/hw/ppc/vof.h
new file mode 100644
index 0000000000..d3f293da8b
--- /dev/null
+++ b/include/hw/ppc/vof.h
@@ -0,0 +1,65 @@
+/*
+ * Virtual Open Firmware
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_VOF_H
+#define HW_VOF_H
+
+#include "qom/object.h"
+#include "exec/address-spaces.h"
+#include "exec/memory.h"
+#include "exec/cpu-defs.h"
+
+typedef struct Vof {
+ uint64_t top_addr; /* copied from rma_size */
+ GArray *claimed; /* array of SpaprOfClaimed */
+ uint64_t claimed_base;
+ GHashTable *of_instances; /* ihandle -> SpaprOfInstance */
+ uint32_t of_instance_last;
+ char *bootargs;
+ long fw_size;
+} Vof;
+
+int vof_client_call(MachineState *ms, Vof *vof, void *fdt,
+ target_ulong args_real);
+uint64_t vof_claim(Vof *vof, uint64_t virt, uint64_t size, uint64_t align);
+void vof_init(Vof *vof, uint64_t top_addr, Error **errp);
+void vof_cleanup(Vof *vof);
+void vof_build_dt(void *fdt, Vof *vof);
+uint32_t vof_client_open_store(void *fdt, Vof *vof, const char *nodename,
+ const char *prop, const char *path);
+
+#define TYPE_VOF_MACHINE_IF "vof-machine-if"
+
+typedef struct VofMachineIfClass VofMachineIfClass;
+DECLARE_CLASS_CHECKERS(VofMachineIfClass, VOF_MACHINE, TYPE_VOF_MACHINE_IF)
+
+struct VofMachineIfClass {
+ InterfaceClass parent;
+ target_ulong (*client_architecture_support)(MachineState *ms, CPUState *cs,
+ target_ulong vec);
+ void (*quiesce)(MachineState *ms);
+ bool (*setprop)(MachineState *ms, const char *path, const char *propname,
+ void *val, int vallen);
+};
+
+/*
+ * Initial stack size is from
+ * https://www.devicetree.org/open-firmware/bindings/ppc/release/ppc-2_1.html#REF27292
+ *
+ * "Client programs shall be invoked with a valid stack pointer (r1) with
+ * at least 32K bytes of memory available for stack growth".
+ */
+#define VOF_STACK_SIZE 0x8000
+
+#define VOF_MEM_READ(pa, buf, size) \
+ address_space_read(&address_space_memory, \
+ (pa), MEMTXATTRS_UNSPECIFIED, (buf), (size))
+#define VOF_MEM_WRITE(pa, buf, size) \
+ address_space_write(&address_space_memory, \
+ (pa), MEMTXATTRS_UNSPECIFIED, (buf), (size))
+
+#define PROM_ERROR (~0U)
+
+#endif /* HW_VOF_H */
diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h
index 07508cbd21..95ead0dd7c 100644
--- a/include/hw/ppc/xics.h
+++ b/include/hw/ppc/xics.h
@@ -28,7 +28,9 @@
#ifndef XICS_H
#define XICS_H
-#include "hw/qdev.h"
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
#define XICS_IPI 0x2
#define XICS_BUID 0x1
@@ -39,8 +41,6 @@
* (the kernel implementation supports more but we don't exploit
* that yet)
*/
-typedef struct ICPStateClass ICPStateClass;
-typedef struct ICPState ICPState;
typedef struct PnvICPState PnvICPState;
typedef struct ICSStateClass ICSStateClass;
typedef struct ICSState ICSState;
@@ -48,28 +48,18 @@ typedef struct ICSIRQState ICSIRQState;
typedef struct XICSFabric XICSFabric;
#define TYPE_ICP "icp"
-#define ICP(obj) OBJECT_CHECK(ICPState, (obj), TYPE_ICP)
-
-#define TYPE_KVM_ICP "icp-kvm"
-#define KVM_ICP(obj) OBJECT_CHECK(ICPState, (obj), TYPE_KVM_ICP)
+OBJECT_DECLARE_TYPE(ICPState, ICPStateClass,
+ ICP)
#define TYPE_PNV_ICP "pnv-icp"
-#define PNV_ICP(obj) OBJECT_CHECK(PnvICPState, (obj), TYPE_PNV_ICP)
+DECLARE_INSTANCE_CHECKER(PnvICPState, PNV_ICP,
+ TYPE_PNV_ICP)
-#define ICP_CLASS(klass) \
- OBJECT_CLASS_CHECK(ICPStateClass, (klass), TYPE_ICP)
-#define ICP_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ICPStateClass, (obj), TYPE_ICP)
struct ICPStateClass {
DeviceClass parent_class;
DeviceRealize parent_realize;
- DeviceReset parent_reset;
-
- void (*pre_save)(ICPState *icp);
- int (*post_load)(ICPState *icp, int version_id);
- void (*synchronize_state)(ICPState *icp);
};
struct ICPState {
@@ -96,33 +86,19 @@ struct PnvICPState {
uint32_t links[3];
};
-#define TYPE_ICS_BASE "ics-base"
-#define ICS_BASE(obj) OBJECT_CHECK(ICSState, (obj), TYPE_ICS_BASE)
-
-/* Retain ics for sPAPR for migration from existing sPAPR guests */
-#define TYPE_ICS_SIMPLE "ics"
-#define ICS_SIMPLE(obj) OBJECT_CHECK(ICSState, (obj), TYPE_ICS_SIMPLE)
+#define TYPE_ICS "ics"
+DECLARE_OBJ_CHECKERS(ICSState, ICSStateClass,
+ ICS, TYPE_ICS)
-#define TYPE_ICS_KVM "icskvm"
-#define ICS_KVM(obj) OBJECT_CHECK(ICSState, (obj), TYPE_ICS_KVM)
-
-#define ICS_BASE_CLASS(klass) \
- OBJECT_CLASS_CHECK(ICSStateClass, (klass), TYPE_ICS_BASE)
-#define ICS_BASE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ICSStateClass, (obj), TYPE_ICS_BASE)
struct ICSStateClass {
DeviceClass parent_class;
DeviceRealize parent_realize;
- DeviceReset parent_reset;
+ ResettablePhases parent_phases;
- void (*pre_save)(ICSState *s);
- int (*post_load)(ICSState *s, int version_id);
void (*reject)(ICSState *s, uint32_t irq);
void (*resend)(ICSState *s);
- void (*eoi)(ICSState *s, uint32_t irq);
- void (*synchronize_state)(ICSState *s);
};
struct ICSState {
@@ -160,24 +136,19 @@ struct ICSIRQState {
uint8_t flags;
};
-struct XICSFabric {
- Object parent;
-};
-
#define TYPE_XICS_FABRIC "xics-fabric"
#define XICS_FABRIC(obj) \
- OBJECT_CHECK(XICSFabric, (obj), TYPE_XICS_FABRIC)
-#define XICS_FABRIC_CLASS(klass) \
- OBJECT_CLASS_CHECK(XICSFabricClass, (klass), TYPE_XICS_FABRIC)
-#define XICS_FABRIC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XICSFabricClass, (obj), TYPE_XICS_FABRIC)
+ INTERFACE_CHECK(XICSFabric, (obj), TYPE_XICS_FABRIC)
+typedef struct XICSFabricClass XICSFabricClass;
+DECLARE_CLASS_CHECKERS(XICSFabricClass, XICS_FABRIC,
+ TYPE_XICS_FABRIC)
-typedef struct XICSFabricClass {
+struct XICSFabricClass {
InterfaceClass parent;
ICSState *(*ics_get)(XICSFabric *xi, int irq);
void (*ics_resend)(XICSFabric *xi);
ICPState *(*icp_get)(XICSFabric *xi, int server);
-} XICSFabricClass;
+};
ICPState *xics_icp_get(XICSFabric *xi, int server);
@@ -187,11 +158,17 @@ void icp_set_mfrr(ICPState *icp, uint8_t mfrr);
uint32_t icp_accept(ICPState *ss);
uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr);
void icp_eoi(ICPState *icp, uint32_t xirr);
+void icp_irq(ICSState *ics, int server, int nr, uint8_t priority);
+void icp_reset(ICPState *icp);
+
+void ics_write_xive(ICSState *ics, int nr, int server,
+ uint8_t priority, uint8_t saved_priority);
+void ics_set_irq(void *opaque, int srcno, int val);
-void ics_simple_write_xive(ICSState *ics, int nr, int server,
- uint8_t priority, uint8_t saved_priority);
-void ics_simple_set_irq(void *opaque, int srcno, int val);
-void ics_kvm_set_irq(void *opaque, int srcno, int val);
+static inline bool ics_irq_free(ICSState *ics, uint32_t srcno)
+{
+ return !(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK);
+}
void ics_set_irq_type(ICSState *ics, int srcno, bool lsi);
void icp_pic_print_info(ICPState *icp, Monitor *mon);
@@ -200,14 +177,20 @@ void ics_pic_print_info(ICSState *ics, Monitor *mon);
void ics_resend(ICSState *ics);
void icp_resend(ICPState *ss);
-typedef struct sPAPRMachineState sPAPRMachineState;
-
-void spapr_dt_xics(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt,
- uint32_t phandle);
-int xics_kvm_init(sPAPRMachineState *spapr, Error **errp);
-void xics_spapr_init(sPAPRMachineState *spapr);
-
Object *icp_create(Object *cpu, const char *type, XICSFabric *xi,
Error **errp);
+void icp_destroy(ICPState *icp);
+
+/* KVM */
+void icp_get_kvm_state(ICPState *icp);
+int icp_set_kvm_state(ICPState *icp, Error **errp);
+void icp_synchronize_state(ICPState *icp);
+void icp_kvm_realize(DeviceState *dev, Error **errp);
+
+void ics_get_kvm_state(ICSState *ics);
+int ics_set_kvm_state_one(ICSState *ics, int srcno, Error **errp);
+int ics_set_kvm_state(ICSState *ics, Error **errp);
+void ics_synchronize_state(ICSState *ics);
+void ics_kvm_set_irq(ICSState *ics, int srcno, int val);
#endif /* XICS_H */
diff --git a/include/hw/ppc/xics_spapr.h b/include/hw/ppc/xics_spapr.h
new file mode 100644
index 0000000000..de752c0d2c
--- /dev/null
+++ b/include/hw/ppc/xics_spapr.h
@@ -0,0 +1,43 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
+ *
+ * Copyright (c) 2010, 2011 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XICS_SPAPR_H
+#define XICS_SPAPR_H
+
+#include "hw/ppc/spapr.h"
+#include "qom/object.h"
+
+#define TYPE_ICS_SPAPR "ics-spapr"
+/* This is reusing the ICSState typedef from TYPE_ICS */
+DECLARE_INSTANCE_CHECKER(ICSState, ICS_SPAPR,
+ TYPE_ICS_SPAPR)
+
+int xics_kvm_connect(SpaprInterruptController *intc, uint32_t nr_servers,
+ Error **errp);
+void xics_kvm_disconnect(SpaprInterruptController *intc);
+bool xics_kvm_has_broken_disconnect(void);
+
+#endif /* XICS_SPAPR_H */
diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h
index ec23253ba4..f120874e0f 100644
--- a/include/hw/ppc/xive.h
+++ b/include/hw/ppc/xive.h
@@ -140,37 +140,35 @@
#ifndef PPC_XIVE_H
#define PPC_XIVE_H
-#include "hw/qdev-core.h"
+#include "sysemu/kvm.h"
#include "hw/sysbus.h"
#include "hw/ppc/xive_regs.h"
+#include "qom/object.h"
/*
- * XIVE Fabric (Interface between Source and Router)
+ * XIVE Notifier (Interface between Source and Router)
*/
-typedef struct XiveNotifier {
- Object parent;
-} XiveNotifier;
+typedef struct XiveNotifier XiveNotifier;
#define TYPE_XIVE_NOTIFIER "xive-notifier"
#define XIVE_NOTIFIER(obj) \
- OBJECT_CHECK(XiveNotifier, (obj), TYPE_XIVE_NOTIFIER)
-#define XIVE_NOTIFIER_CLASS(klass) \
- OBJECT_CLASS_CHECK(XiveNotifierClass, (klass), TYPE_XIVE_NOTIFIER)
-#define XIVE_NOTIFIER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XiveNotifierClass, (obj), TYPE_XIVE_NOTIFIER)
+ INTERFACE_CHECK(XiveNotifier, (obj), TYPE_XIVE_NOTIFIER)
+typedef struct XiveNotifierClass XiveNotifierClass;
+DECLARE_CLASS_CHECKERS(XiveNotifierClass, XIVE_NOTIFIER,
+ TYPE_XIVE_NOTIFIER)
-typedef struct XiveNotifierClass {
+struct XiveNotifierClass {
InterfaceClass parent;
- void (*notify)(XiveNotifier *xn, uint32_t lisn);
-} XiveNotifierClass;
+ void (*notify)(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
+};
/*
* XIVE Interrupt Source
*/
#define TYPE_XIVE_SOURCE "xive-source"
-#define XIVE_SOURCE(obj) OBJECT_CHECK(XiveSource, (obj), TYPE_XIVE_SOURCE)
+OBJECT_DECLARE_SIMPLE_TYPE(XiveSource, XIVE_SOURCE)
/*
* XIVE Interrupt Source characteristics, which define how the ESB are
@@ -178,8 +176,9 @@ typedef struct XiveNotifierClass {
*/
#define XIVE_SRC_H_INT_ESB 0x1 /* ESB managed with hcall H_INT_ESB */
#define XIVE_SRC_STORE_EOI 0x2 /* Store EOI supported */
+#define XIVE_SRC_PQ_DISABLE 0x4 /* Disable check on the PQ state bits */
-typedef struct XiveSource {
+struct XiveSource {
DeviceState parent;
/* IRQs */
@@ -188,14 +187,20 @@ typedef struct XiveSource {
/* PQ bits and LSI assertion bit */
uint8_t *status;
+ uint8_t reset_pq; /* PQ state on reset */
/* ESB memory region */
uint64_t esb_flags;
uint32_t esb_shift;
MemoryRegion esb_mmio;
+ MemoryRegion esb_mmio_emulated;
+
+ /* KVM support */
+ void *esb_mmap;
+ MemoryRegion esb_mmio_kvm;
XiveNotifier *xive;
-} XiveSource;
+};
/*
* ESB MMIO setting. Can be one page, for both source triggering and
@@ -213,6 +218,11 @@ static inline bool xive_source_esb_has_2page(XiveSource *xsrc)
xsrc->esb_shift == XIVE_ESB_4K_2PAGE;
}
+static inline size_t xive_source_esb_len(XiveSource *xsrc)
+{
+ return (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
+}
+
/* The trigger page is always the first/even page */
static inline hwaddr xive_source_esb_page(XiveSource *xsrc, uint32_t srcno)
{
@@ -253,6 +263,10 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno)
#define XIVE_ESB_QUEUED (XIVE_ESB_VAL_P | XIVE_ESB_VAL_Q)
#define XIVE_ESB_OFF XIVE_ESB_VAL_Q
+bool xive_esb_trigger(uint8_t *pq);
+bool xive_esb_eoi(uint8_t *pq);
+uint8_t xive_esb_set(uint8_t *pq, uint8_t value);
+
/*
* "magic" Event State Buffer (ESB) MMIO offsets.
*
@@ -266,6 +280,7 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno)
#define XIVE_ESB_STORE_EOI 0x400 /* Store */
#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
#define XIVE_ESB_GET 0x800 /* Load */
+#define XIVE_ESB_INJECT 0x800 /* Store */
#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
@@ -274,6 +289,30 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno)
uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno);
uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq);
+/*
+ * Source status helpers
+ */
+static inline void xive_source_set_status(XiveSource *xsrc, uint32_t srcno,
+ uint8_t status, bool enable)
+{
+ if (enable) {
+ xsrc->status[srcno] |= status;
+ } else {
+ xsrc->status[srcno] &= ~status;
+ }
+}
+
+static inline void xive_source_set_asserted(XiveSource *xsrc, uint32_t srcno,
+ bool enable)
+{
+ xive_source_set_status(xsrc, srcno, XIVE_STATUS_ASSERTED, enable);
+}
+
+static inline bool xive_source_is_asserted(XiveSource *xsrc, uint32_t srcno)
+{
+ return xsrc->status[srcno] & XIVE_STATUS_ASSERTED;
+}
+
void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset,
Monitor *mon);
@@ -283,39 +322,76 @@ static inline bool xive_source_irq_is_lsi(XiveSource *xsrc, uint32_t srcno)
return test_bit(srcno, xsrc->lsi_map);
}
-static inline void xive_source_irq_set(XiveSource *xsrc, uint32_t srcno,
- bool lsi)
+static inline void xive_source_irq_set_lsi(XiveSource *xsrc, uint32_t srcno)
{
assert(srcno < xsrc->nr_irqs);
- if (lsi) {
- bitmap_set(xsrc->lsi_map, srcno, 1);
- }
+ bitmap_set(xsrc->lsi_map, srcno, 1);
}
void xive_source_set_irq(void *opaque, int srcno, int val);
/*
+ * XIVE Thread interrupt Management (TM) context
+ */
+
+#define TYPE_XIVE_TCTX "xive-tctx"
+OBJECT_DECLARE_SIMPLE_TYPE(XiveTCTX, XIVE_TCTX)
+
+/*
+ * XIVE Thread interrupt Management register rings :
+ *
+ * QW-0 User event-based exception state
+ * QW-1 O/S OS context for priority management, interrupt acks
+ * QW-2 Pool hypervisor pool context for virtual processors dispatched
+ * QW-3 Physical physical thread context and security context
+ */
+#define XIVE_TM_RING_COUNT 4
+#define XIVE_TM_RING_SIZE 0x10
+
+typedef struct XivePresenter XivePresenter;
+
+struct XiveTCTX {
+ DeviceState parent_obj;
+
+ CPUState *cs;
+ qemu_irq hv_output;
+ qemu_irq os_output;
+
+ uint8_t regs[XIVE_TM_RING_COUNT * XIVE_TM_RING_SIZE];
+
+ XivePresenter *xptr;
+};
+
+static inline uint32_t xive_tctx_word2(uint8_t *ring)
+{
+ return *((uint32_t *) &ring[TM_WORD2]);
+}
+
+/*
* XIVE Router
*/
+typedef struct XiveFabric XiveFabric;
-typedef struct XiveRouter {
+struct XiveRouter {
SysBusDevice parent;
-} XiveRouter;
+
+ XiveFabric *xfb;
+};
#define TYPE_XIVE_ROUTER "xive-router"
-#define XIVE_ROUTER(obj) \
- OBJECT_CHECK(XiveRouter, (obj), TYPE_XIVE_ROUTER)
-#define XIVE_ROUTER_CLASS(klass) \
- OBJECT_CLASS_CHECK(XiveRouterClass, (klass), TYPE_XIVE_ROUTER)
-#define XIVE_ROUTER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XiveRouterClass, (obj), TYPE_XIVE_ROUTER)
-
-typedef struct XiveRouterClass {
+OBJECT_DECLARE_TYPE(XiveRouter, XiveRouterClass,
+ XIVE_ROUTER)
+
+struct XiveRouterClass {
SysBusDeviceClass parent;
/* XIVE table accessors */
int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas);
+ int (*get_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq);
+ int (*set_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq);
int (*get_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
XiveEND *end);
int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
@@ -324,9 +400,9 @@ typedef struct XiveRouterClass {
XiveNVT *nvt);
int (*write_nvt)(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt, uint8_t word_number);
-} XiveRouterClass;
-
-void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon);
+ uint8_t (*get_block_id)(XiveRouter *xrtr);
+ void (*end_notify)(XiveRouter *xrtr, XiveEAS *eas);
+};
int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas);
@@ -338,28 +414,83 @@ int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt);
int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt, uint8_t word_number);
+void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
+void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas);
+
+/*
+ * XIVE Presenter
+ */
+
+typedef struct XiveTCTXMatch {
+ XiveTCTX *tctx;
+ uint8_t ring;
+} XiveTCTXMatch;
+
+#define TYPE_XIVE_PRESENTER "xive-presenter"
+#define XIVE_PRESENTER(obj) \
+ INTERFACE_CHECK(XivePresenter, (obj), TYPE_XIVE_PRESENTER)
+typedef struct XivePresenterClass XivePresenterClass;
+DECLARE_CLASS_CHECKERS(XivePresenterClass, XIVE_PRESENTER,
+ TYPE_XIVE_PRESENTER)
+#define XIVE_PRESENTER_GEN1_TIMA_OS 0x1
+
+struct XivePresenterClass {
+ InterfaceClass parent;
+ int (*match_nvt)(XivePresenter *xptr, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, XiveTCTXMatch *match);
+ bool (*in_kernel)(const XivePresenter *xptr);
+ uint32_t (*get_config)(XivePresenter *xptr);
+};
+
+int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
+ uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint32_t logic_serv);
+bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv);
+
+/*
+ * XIVE Fabric (Interface between Interrupt Controller and Machine)
+ */
+
+#define TYPE_XIVE_FABRIC "xive-fabric"
+#define XIVE_FABRIC(obj) \
+ INTERFACE_CHECK(XiveFabric, (obj), TYPE_XIVE_FABRIC)
+typedef struct XiveFabricClass XiveFabricClass;
+DECLARE_CLASS_CHECKERS(XiveFabricClass, XIVE_FABRIC,
+ TYPE_XIVE_FABRIC)
+
+struct XiveFabricClass {
+ InterfaceClass parent;
+ int (*match_nvt)(XiveFabric *xfb, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, XiveTCTXMatch *match);
+};
/*
* XIVE END ESBs
*/
#define TYPE_XIVE_END_SOURCE "xive-end-source"
-#define XIVE_END_SOURCE(obj) \
- OBJECT_CHECK(XiveENDSource, (obj), TYPE_XIVE_END_SOURCE)
+OBJECT_DECLARE_SIMPLE_TYPE(XiveENDSource, XIVE_END_SOURCE)
-typedef struct XiveENDSource {
+struct XiveENDSource {
DeviceState parent;
uint32_t nr_ends;
- uint8_t block_id;
/* ESB memory region */
uint32_t esb_shift;
MemoryRegion esb_mmio;
XiveRouter *xrtr;
-} XiveENDSource;
+};
/*
* For legacy compatibility, the exceptions define up to 256 different
@@ -368,35 +499,16 @@ typedef struct XiveENDSource {
*/
#define XIVE_PRIORITY_MAX 7
-void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon);
-void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon);
-
/*
- * XIVE Thread interrupt Management (TM) context
+ * Convert a priority number to an Interrupt Pending Buffer (IPB)
+ * register, which indicates a pending interrupt at the priority
+ * corresponding to the bit number
*/
-
-#define TYPE_XIVE_TCTX "xive-tctx"
-#define XIVE_TCTX(obj) OBJECT_CHECK(XiveTCTX, (obj), TYPE_XIVE_TCTX)
-
-/*
- * XIVE Thread interrupt Management register rings :
- *
- * QW-0 User event-based exception state
- * QW-1 O/S OS context for priority management, interrupt acks
- * QW-2 Pool hypervisor pool context for virtual processors dispatched
- * QW-3 Physical physical thread context and security context
- */
-#define XIVE_TM_RING_COUNT 4
-#define XIVE_TM_RING_SIZE 0x10
-
-typedef struct XiveTCTX {
- DeviceState parent_obj;
-
- CPUState *cs;
- qemu_irq output;
-
- uint8_t regs[XIVE_TM_RING_COUNT * XIVE_TM_RING_SIZE];
-} XiveTCTX;
+static inline uint8_t xive_priority_to_ipb(uint8_t priority)
+{
+ return priority > XIVE_PRIORITY_MAX ?
+ 0 : 1 << (XIVE_PRIORITY_MAX - priority);
+}
/*
* XIVE Thread Interrupt Management Aera (TIMA)
@@ -411,14 +523,27 @@ typedef struct XiveTCTX {
#define XIVE_TM_OS_PAGE 0x2
#define XIVE_TM_USER_PAGE 0x3
-extern const MemoryRegionOps xive_tm_ops;
+void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
+ uint64_t value, unsigned size);
+uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
+ unsigned size);
void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon);
-Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp);
+Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp);
+void xive_tctx_reset(XiveTCTX *tctx);
+void xive_tctx_destroy(XiveTCTX *tctx);
+void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb);
+void xive_tctx_reset_os_signal(XiveTCTX *tctx);
-static inline uint32_t xive_nvt_cam_line(uint8_t nvt_blk, uint32_t nvt_idx)
-{
- return (nvt_blk << 19) | nvt_idx;
-}
+/*
+ * KVM XIVE device helpers
+ */
+
+int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp);
+void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val);
+int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp);
+int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp);
+int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp);
+int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp);
#endif /* PPC_XIVE_H */
diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h
new file mode 100644
index 0000000000..ab68f8d157
--- /dev/null
+++ b/include/hw/ppc/xive2.h
@@ -0,0 +1,111 @@
+/*
+ * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
+ *
+ * Copyright (c) 2019-2022, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PPC_XIVE2_H
+#define PPC_XIVE2_H
+
+#include "hw/ppc/xive.h"
+#include "hw/ppc/xive2_regs.h"
+#include "hw/sysbus.h"
+
+/*
+ * XIVE2 Router (POWER10)
+ */
+typedef struct Xive2Router {
+ SysBusDevice parent;
+
+ XiveFabric *xfb;
+} Xive2Router;
+
+#define TYPE_XIVE2_ROUTER "xive2-router"
+OBJECT_DECLARE_TYPE(Xive2Router, Xive2RouterClass, XIVE2_ROUTER);
+
+/*
+ * Configuration flags
+ */
+
+#define XIVE2_GEN1_TIMA_OS 0x00000001
+#define XIVE2_VP_SAVE_RESTORE 0x00000002
+#define XIVE2_THREADID_8BITS 0x00000004
+
+typedef struct Xive2RouterClass {
+ SysBusDeviceClass parent;
+
+ /* XIVE table accessors */
+ int (*get_eas)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ Xive2Eas *eas);
+ int (*get_pq)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq);
+ int (*set_pq)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq);
+ int (*get_end)(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end);
+ int (*write_end)(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end, uint8_t word_number);
+ int (*get_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp);
+ int (*write_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp, uint8_t word_number);
+ uint8_t (*get_block_id)(Xive2Router *xrtr);
+ uint32_t (*get_config)(Xive2Router *xrtr);
+} Xive2RouterClass;
+
+int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ Xive2Eas *eas);
+int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end);
+int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end, uint8_t word_number);
+int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp);
+int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp, uint8_t word_number);
+uint32_t xive2_router_get_config(Xive2Router *xrtr);
+
+void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
+
+/*
+ * XIVE2 Presenter (POWER10)
+ */
+
+int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
+ uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint32_t logic_serv);
+
+/*
+ * XIVE2 END ESBs (POWER10)
+ */
+
+#define TYPE_XIVE2_END_SOURCE "xive2-end-source"
+OBJECT_DECLARE_SIMPLE_TYPE(Xive2EndSource, XIVE2_END_SOURCE)
+
+typedef struct Xive2EndSource {
+ DeviceState parent;
+
+ uint32_t nr_ends;
+
+ /* ESB memory region */
+ uint32_t esb_shift;
+ MemoryRegion esb_mmio;
+
+ Xive2Router *xrtr;
+} Xive2EndSource;
+
+/*
+ * XIVE2 Thread Interrupt Management Area (POWER10)
+ */
+
+void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
+ uint64_t value, unsigned size);
+uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size);
+
+#endif /* PPC_XIVE2_H */
diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h
new file mode 100644
index 0000000000..816f5d0e84
--- /dev/null
+++ b/include/hw/ppc/xive2_regs.h
@@ -0,0 +1,212 @@
+/*
+ * QEMU PowerPC XIVE2 internal structure definitions (POWER10)
+ *
+ * Copyright (c) 2019-2022, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PPC_XIVE2_REGS_H
+#define PPC_XIVE2_REGS_H
+
+#include "qemu/bswap.h"
+
+/*
+ * Thread Interrupt Management Area (TIMA)
+ *
+ * In Gen1 mode (P9 compat mode) word 2 is the same. However in Gen2
+ * mode (P10), the CAM line is slightly different as the VP space was
+ * increased.
+ */
+#define TM2_QW0W2_VU PPC_BIT32(0)
+#define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31)
+#define TM2_QW1W2_VO PPC_BIT32(0)
+#define TM2_QW1W2_HO PPC_BIT32(1)
+#define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31)
+#define TM2_QW2W2_VP PPC_BIT32(0)
+#define TM2_QW2W2_HP PPC_BIT32(1)
+#define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31)
+#define TM2_QW3W2_VT PPC_BIT32(0)
+#define TM2_QW3W2_HT PPC_BIT32(1)
+#define TM2_QW3W2_LP PPC_BIT32(6)
+#define TM2_QW3W2_LE PPC_BIT32(7)
+
+/*
+ * Event Assignment Structure (EAS)
+ */
+
+typedef struct Xive2Eas {
+ uint64_t w;
+#define EAS2_VALID PPC_BIT(0)
+#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */
+#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */
+#define EAS2_MASKED PPC_BIT(32) /* Masked */
+#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */
+} Xive2Eas;
+
+#define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID)
+#define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED)
+
+void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon);
+
+/*
+ * Event Notifification Descriptor (END)
+ */
+
+typedef struct Xive2End {
+ uint32_t w0;
+#define END2_W0_VALID PPC_BIT32(0) /* "v" bit */
+#define END2_W0_ENQUEUE PPC_BIT32(5) /* "q" bit */
+#define END2_W0_UCOND_NOTIFY PPC_BIT32(6) /* "n" bit */
+#define END2_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit */
+#define END2_W0_BACKLOG PPC_BIT32(8) /* "b" bit */
+#define END2_W0_PRECL_ESC_CTL PPC_BIT32(9) /* "p" bit */
+#define END2_W0_UNCOND_ESCALATE PPC_BIT32(10) /* "u" bit */
+#define END2_W0_ESCALATE_CTL PPC_BIT32(11) /* "e" bit */
+#define END2_W0_ADAPTIVE_ESC PPC_BIT32(12) /* "a" bit */
+#define END2_W0_ESCALATE_END PPC_BIT32(13) /* "N" bit */
+#define END2_W0_FIRMWARE1 PPC_BIT32(16) /* Owned by FW */
+#define END2_W0_FIRMWARE2 PPC_BIT32(17) /* Owned by FW */
+#define END2_W0_AEC_SIZE PPC_BITMASK32(18, 19)
+#define END2_W0_AEG_SIZE PPC_BITMASK32(20, 23)
+#define END2_W0_EQ_VG_PREDICT PPC_BITMASK32(24, 31) /* Owned by HW */
+ uint32_t w1;
+#define END2_W1_ESn PPC_BITMASK32(0, 1)
+#define END2_W1_ESn_P PPC_BIT32(0)
+#define END2_W1_ESn_Q PPC_BIT32(1)
+#define END2_W1_ESe PPC_BITMASK32(2, 3)
+#define END2_W1_ESe_P PPC_BIT32(2)
+#define END2_W1_ESe_Q PPC_BIT32(3)
+#define END2_W1_GEN_FLIPPED PPC_BIT32(8)
+#define END2_W1_GENERATION PPC_BIT32(9)
+#define END2_W1_PAGE_OFF PPC_BITMASK32(10, 31)
+ uint32_t w2;
+#define END2_W2_RESERVED PPC_BITMASK32(4, 7)
+#define END2_W2_EQ_ADDR_HI PPC_BITMASK32(8, 31)
+ uint32_t w3;
+#define END2_W3_EQ_ADDR_LO PPC_BITMASK32(0, 24)
+#define END2_W3_QSIZE PPC_BITMASK32(28, 31)
+ uint32_t w4;
+#define END2_W4_END_BLOCK PPC_BITMASK32(4, 7)
+#define END2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31)
+#define END2_W4_ESB_BLOCK PPC_BITMASK32(0, 3)
+#define END2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31)
+ uint32_t w5;
+#define END2_W5_ESC_END_DATA PPC_BITMASK32(1, 31)
+ uint32_t w6;
+#define END2_W6_FORMAT_BIT PPC_BIT32(0)
+#define END2_W6_IGNORE PPC_BIT32(1)
+#define END2_W6_VP_BLOCK PPC_BITMASK32(4, 7)
+#define END2_W6_VP_OFFSET PPC_BITMASK32(8, 31)
+#define END2_W6_VP_OFFSET_GEN1 PPC_BITMASK32(13, 31)
+ uint32_t w7;
+#define END2_W7_TOPO PPC_BITMASK32(0, 3) /* Owned by HW */
+#define END2_W7_F0_PRIORITY PPC_BITMASK32(8, 15)
+#define END2_W7_F1_LOG_SERVER_ID PPC_BITMASK32(4, 31)
+} Xive2End;
+
+#define xive2_end_is_valid(end) (be32_to_cpu((end)->w0) & END2_W0_VALID)
+#define xive2_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END2_W0_ENQUEUE)
+#define xive2_end_is_notify(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_UCOND_NOTIFY)
+#define xive2_end_is_backlog(end) (be32_to_cpu((end)->w0) & END2_W0_BACKLOG)
+#define xive2_end_is_escalate(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_ESCALATE_CTL)
+#define xive2_end_is_uncond_escalation(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_UNCOND_ESCALATE)
+#define xive2_end_is_silent_escalation(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_SILENT_ESCALATE)
+#define xive2_end_is_escalate_end(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_ESCALATE_END)
+#define xive2_end_is_firmware1(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_FIRMWARE1)
+#define xive2_end_is_firmware2(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_FIRMWARE2)
+
+static inline uint64_t xive2_end_qaddr(Xive2End *end)
+{
+ return ((uint64_t) be32_to_cpu(end->w2) & END2_W2_EQ_ADDR_HI) << 32 |
+ (be32_to_cpu(end->w3) & END2_W3_EQ_ADDR_LO);
+}
+
+void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon);
+void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width,
+ Monitor *mon);
+void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
+ Monitor *mon);
+
+/*
+ * Notification Virtual Processor (NVP)
+ */
+typedef struct Xive2Nvp {
+ uint32_t w0;
+#define NVP2_W0_VALID PPC_BIT32(0)
+#define NVP2_W0_HW PPC_BIT32(7)
+#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */
+ uint32_t w1;
+#define NVP2_W1_CO PPC_BIT32(13)
+#define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15)
+#define NVP2_W1_CO_THRID_VALID PPC_BIT32(16)
+#define NVP2_W1_CO_THRID PPC_BITMASK32(17, 31)
+ uint32_t w2;
+#define NVP2_W2_CPPR PPC_BITMASK32(0, 7)
+#define NVP2_W2_IPB PPC_BITMASK32(8, 15)
+#define NVP2_W2_LSMFB PPC_BITMASK32(16, 23)
+ uint32_t w3;
+ uint32_t w4;
+#define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */
+#define NVP2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31) /* N:0 */
+#define NVP2_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7) /* N:1 */
+#define NVP2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) /* N:1 */
+ uint32_t w5;
+#define NVP2_W5_PSIZE PPC_BITMASK32(0, 1)
+#define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7)
+#define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31)
+ uint32_t w6;
+ uint32_t w7;
+} Xive2Nvp;
+
+#define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID)
+#define xive2_nvp_is_hw(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_HW)
+#define xive2_nvp_is_co(nvp) (be32_to_cpu((nvp)->w1) & NVP2_W1_CO)
+
+/*
+ * The VP number space in a block is defined by the END2_W6_VP_OFFSET
+ * field of the XIVE END. When running in Gen1 mode (P9 compat mode),
+ * the VP space is reduced to (1 << 19) VPs per block
+ */
+#define XIVE2_NVP_SHIFT 24
+#define XIVE2_NVP_COUNT (1 << XIVE2_NVP_SHIFT)
+
+static inline uint32_t xive2_nvp_cam_line(uint8_t nvp_blk, uint32_t nvp_idx)
+{
+ return (nvp_blk << XIVE2_NVP_SHIFT) | nvp_idx;
+}
+
+static inline uint32_t xive2_nvp_idx(uint32_t cam_line)
+{
+ return cam_line & ((1 << XIVE2_NVP_SHIFT) - 1);
+}
+
+static inline uint32_t xive2_nvp_blk(uint32_t cam_line)
+{
+ return (cam_line >> XIVE2_NVP_SHIFT) & 0xf;
+}
+
+/*
+ * Notification Virtual Group or Crowd (NVG/NVC)
+ */
+typedef struct Xive2Nvgc {
+ uint32_t w0;
+#define NVGC2_W0_VALID PPC_BIT32(0)
+ uint32_t w1;
+ uint32_t w2;
+ uint32_t w3;
+ uint32_t w4;
+ uint32_t w5;
+ uint32_t w6;
+ uint32_t w7;
+} Xive2Nvgc;
+
+#endif /* PPC_XIVE2_REGS_H */
diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h
index bf36678a24..4a3c9badd3 100644
--- a/include/hw/ppc/xive_regs.h
+++ b/include/hw/ppc/xive_regs.h
@@ -16,15 +16,54 @@
#ifndef PPC_XIVE_REGS_H
#define PPC_XIVE_REGS_H
+#include "qemu/bswap.h"
+#include "qemu/host-utils.h"
+
/*
* Interrupt source number encoding on PowerBUS
*/
-#define XIVE_SRCNO_BLOCK(srcno) (((srcno) >> 28) & 0xf)
-#define XIVE_SRCNO_INDEX(srcno) ((srcno) & 0x0fffffff)
-#define XIVE_SRCNO(blk, idx) ((uint32_t)(blk) << 28 | (idx))
+/*
+ * Trigger data definition
+ *
+ * The trigger definition is used for triggers both for HW source
+ * interrupts (PHB, PSI), as well as for rerouting interrupts between
+ * Interrupt Controller.
+ *
+ * HW source controllers set bit0 of word0 to ‘0’ as they provide EAS
+ * information (EAS block + EAS index) in the 8 byte data and not END
+ * information, which is use for rerouting interrupts.
+ *
+ * bit1 of word0 to ‘1’ signals that the state bit check has been
+ * performed.
+ */
+#define XIVE_TRIGGER_END PPC_BIT(0)
+#define XIVE_TRIGGER_PQ PPC_BIT(1)
+
+/*
+ * QEMU macros to manipulate the trigger payload in native endian
+ */
+#define XIVE_EAS_BLOCK(n) (((n) >> 28) & 0xf)
+#define XIVE_EAS_INDEX(n) ((n) & 0x0fffffff)
+#define XIVE_EAS(blk, idx) ((uint32_t)(blk) << 28 | (idx))
#define TM_SHIFT 16
+/*
+ * TIMA addresses are 12-bits (4k page).
+ * The MSB indicates a special op with side effect, which can be
+ * refined with bit 10 (see below).
+ * The registers, logically grouped in 4 rings (a quad-word each), are
+ * defined on the 6 LSBs (offset below 0x40)
+ * In between, we can add a cache line index from 0...3 (ie, 0, 0x80,
+ * 0x100, 0x180) to select a specific snooper. Those 'snoop port
+ * address' bits should be dropped when processing the operations as
+ * they are all equivalent.
+ */
+#define TM_ADDRESS_MASK 0xC3F
+#define TM_SPECIAL_OP 0x800
+#define TM_RING_OFFSET 0x30
+#define TM_REG_OFFSET 0x3F
+
/* TM register offsets */
#define TM_QW0_USER 0x000 /* All rings */
#define TM_QW1_OS 0x010 /* Ring 0..2 */
@@ -48,7 +87,7 @@
* QW word 2 contains the valid bit at the top and other fields
* depending on the QW.
*/
-#define TM_WORD2 0x8
+#define TM_WORD2 0x8
#define TM_QW0W2_VU PPC_BIT32(0)
#define TM_QW0W2_LOGIC_SERV PPC_BITMASK32(1, 31) /* XX 2,31 ? */
#define TM_QW1W2_VO PPC_BIT32(0)
@@ -128,6 +167,8 @@ typedef struct XiveEAS {
#define xive_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS_VALID)
#define xive_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS_MASKED)
+void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon);
+
static inline uint64_t xive_get_field64(uint64_t mask, uint64_t word)
{
return (be64_to_cpu(word) & mask) >> ctz64(mask);
@@ -207,15 +248,34 @@ typedef struct XiveEND {
#define xive_end_is_notify(end) (be32_to_cpu((end)->w0) & END_W0_UCOND_NOTIFY)
#define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG)
#define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL)
+#define xive_end_is_uncond_escalation(end) \
+ (be32_to_cpu((end)->w0) & END_W0_UNCOND_ESCALATE)
+#define xive_end_is_silent_escalation(end) \
+ (be32_to_cpu((end)->w0) & END_W0_SILENT_ESCALATE)
+#define xive_end_is_firmware(end) \
+ (be32_to_cpu((end)->w0) & END_W0_FIRMWARE)
+
+static inline uint64_t xive_end_qaddr(XiveEND *end)
+{
+ return ((uint64_t) be32_to_cpu(end->w2) & 0x0fffffff) << 32 |
+ be32_to_cpu(end->w3);
+}
+
+void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon);
+void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon);
+void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon);
/* Notification Virtual Target (NVT) */
typedef struct XiveNVT {
uint32_t w0;
#define NVT_W0_VALID PPC_BIT32(0)
uint32_t w1;
+#define NVT_W1_EQ_BLOCK PPC_BITMASK32(0, 3)
+#define NVT_W1_EQ_INDEX PPC_BITMASK32(4, 31)
uint32_t w2;
uint32_t w3;
uint32_t w4;
+#define NVT_W4_IPB PPC_BITMASK32(16, 23)
uint32_t w5;
uint32_t w6;
uint32_t w7;
@@ -232,4 +292,26 @@ typedef struct XiveNVT {
#define xive_nvt_is_valid(nvt) (be32_to_cpu((nvt)->w0) & NVT_W0_VALID)
+/*
+ * The VP number space in a block is defined by the END_W6_NVT_INDEX
+ * field of the XIVE END
+ */
+#define XIVE_NVT_SHIFT 19
+#define XIVE_NVT_COUNT (1 << XIVE_NVT_SHIFT)
+
+static inline uint32_t xive_nvt_cam_line(uint8_t nvt_blk, uint32_t nvt_idx)
+{
+ return (nvt_blk << XIVE_NVT_SHIFT) | nvt_idx;
+}
+
+static inline uint32_t xive_nvt_idx(uint32_t cam_line)
+{
+ return cam_line & ((1 << XIVE_NVT_SHIFT) - 1);
+}
+
+static inline uint32_t xive_nvt_blk(uint32_t cam_line)
+{
+ return (cam_line >> XIVE_NVT_SHIFT) & 0xf;
+}
+
#endif /* PPC_XIVE_REGS_H */
diff --git a/include/hw/ptimer.h b/include/hw/ptimer.h
index 0731d9aef1..4dc02b0de4 100644
--- a/include/hw/ptimer.h
+++ b/include/hw/ptimer.h
@@ -8,27 +8,42 @@
#ifndef PTIMER_H
#define PTIMER_H
-#include "qemu-common.h"
#include "qemu/timer.h"
-#include "migration/vmstate.h"
-/* The ptimer API implements a simple periodic countdown timer.
+/*
+ * The ptimer API implements a simple periodic countdown timer.
* The countdown timer has a value (which can be read and written via
* ptimer_get_count() and ptimer_set_count()). When it is enabled
* using ptimer_run(), the value will count downwards at the frequency
* which has been configured using ptimer_set_period() or ptimer_set_freq().
- * When it reaches zero it will trigger a QEMU bottom half handler, and
+ * When it reaches zero it will trigger a callback function, and
* can be set to either reload itself from a specified limit value
* and keep counting down, or to stop (as a one-shot timer).
*
+ * A transaction-based API is used for modifying ptimer state: all calls
+ * to functions which modify ptimer state must be between matched calls to
+ * ptimer_transaction_begin() and ptimer_transaction_commit().
+ * When ptimer_transaction_commit() is called it will evaluate the state
+ * of the timer after all the changes in the transaction, and call the
+ * callback if necessary. (See the ptimer_init() documentation for the full
+ * list of state-modifying functions and detailed semantics of the callback.)
+ *
* Forgetting to set the period/frequency (or setting it to zero) is a
* bug in the QEMU device and will cause warning messages to be printed
* to stderr when the guest attempts to enable the timer.
*/
-/* The default ptimer policy retains backward compatibility with the legacy
- * timers. Custom policies are adjusting the default one. Consider providing
- * a correct policy for your timer.
+/*
+ * The 'legacy' ptimer policy retains backward compatibility with the
+ * traditional ptimer behaviour from before policy flags were introduced.
+ * It has several weird behaviours which don't match typical hardware
+ * timer behaviour. For a new device using ptimers, you should not
+ * use PTIMER_POLICY_LEGACY, but instead check the actual behaviour
+ * that you need and specify the right set of policy flags to get that.
+ *
+ * If you are overhauling an existing device that uses PTIMER_POLICY_LEGACY
+ * and are in a position to check or test the real hardware behaviour,
+ * consider updating it to specify the right policy flags.
*
* The rough edges of the default policy:
* - Starting to run with a period = 0 emits error message and stops the
@@ -47,7 +62,7 @@
* since the last period, effectively restarting the timer with a
* counter = counter value at the moment of change (.i.e. one less).
*/
-#define PTIMER_POLICY_DEFAULT 0
+#define PTIMER_POLICY_LEGACY 0
/* Periodic timer counter stays with "0" for a one period before wrapping
* around. */
@@ -84,25 +99,67 @@ typedef void (*ptimer_cb)(void *opaque);
/**
* ptimer_init - Allocate and return a new ptimer
- * @bh: QEMU bottom half which is run on timer expiry
+ * @callback: function to call on ptimer expiry
+ * @callback_opaque: opaque pointer passed to @callback
* @policy: PTIMER_POLICY_* bits specifying behaviour
*
* The ptimer returned must be freed using ptimer_free().
- * The ptimer takes ownership of @bh and will delete it
- * when the ptimer is eventually freed.
+ *
+ * If a ptimer is created using this API then will use the
+ * transaction-based API for modifying ptimer state: all calls
+ * to functions which modify ptimer state:
+ * - ptimer_set_period()
+ * - ptimer_set_freq()
+ * - ptimer_set_limit()
+ * - ptimer_set_count()
+ * - ptimer_run()
+ * - ptimer_stop()
+ * must be between matched calls to ptimer_transaction_begin()
+ * and ptimer_transaction_commit(). When ptimer_transaction_commit()
+ * is called it will evaluate the state of the timer after all the
+ * changes in the transaction, and call the callback if necessary.
+ *
+ * The callback function is always called from within a transaction
+ * begin/commit block, so the callback should not call the
+ * ptimer_transaction_begin() function itself. If the callback changes
+ * the ptimer state such that another ptimer expiry is triggered, then
+ * the callback will be called a second time after the first call returns.
*/
-ptimer_state *ptimer_init(QEMUBH *bh, uint8_t policy_mask);
+ptimer_state *ptimer_init(ptimer_cb callback,
+ void *callback_opaque,
+ uint8_t policy_mask);
/**
* ptimer_free - Free a ptimer
* @s: timer to free
*
- * Free a ptimer created using ptimer_init() (including
- * deleting the bottom half which it is using).
+ * Free a ptimer created using ptimer_init().
*/
void ptimer_free(ptimer_state *s);
/**
+ * ptimer_transaction_begin() - Start a ptimer modification transaction
+ *
+ * This function must be called before making any calls to functions
+ * which modify the ptimer's state (see the ptimer_init() documentation
+ * for a list of these), and must always have a matched call to
+ * ptimer_transaction_commit().
+ * It is an error to call this function for a BH-based ptimer;
+ * attempting to do this will trigger an assert.
+ */
+void ptimer_transaction_begin(ptimer_state *s);
+
+/**
+ * ptimer_transaction_commit() - Commit a ptimer modification transaction
+ *
+ * This function must be called after calls to functions which modify
+ * the ptimer's state, and completes the update of the ptimer. If the
+ * ptimer state now means that we should trigger the timer expiry
+ * callback, it will be called directly.
+ */
+void ptimer_transaction_commit(ptimer_state *s);
+
+/**
* ptimer_set_period - Set counter increment interval in nanoseconds
* @s: ptimer to configure
* @period: period of the counter in nanoseconds
@@ -110,10 +167,35 @@ void ptimer_free(ptimer_state *s);
* Note that if your counter behaviour is specified as having a
* particular frequency rather than a period then ptimer_set_freq()
* may be more appropriate.
+ *
+ * This function will assert if it is called outside a
+ * ptimer_transaction_begin/commit block.
*/
void ptimer_set_period(ptimer_state *s, int64_t period);
/**
+ * ptimer_set_period_from_clock - Set counter increment from a Clock
+ * @s: ptimer to configure
+ * @clk: pointer to Clock object to take period from
+ * @divisor: value to scale the clock frequency down by
+ *
+ * If the ptimer is being driven from a Clock, this is the preferred
+ * way to tell the ptimer about the period, because it avoids any
+ * possible rounding errors that might happen if the internal
+ * representation of the Clock period was converted to either a period
+ * in ns or a frequency in Hz.
+ *
+ * If the ptimer should run at the same frequency as the clock,
+ * pass 1 as the @divisor; if the ptimer should run at half the
+ * frequency, pass 2, and so on.
+ *
+ * This function will assert if it is called outside a
+ * ptimer_transaction_begin/commit block.
+ */
+void ptimer_set_period_from_clock(ptimer_state *s, const Clock *clock,
+ unsigned int divisor);
+
+/**
* ptimer_set_freq - Set counter frequency in Hz
* @s: ptimer to configure
* @freq: counter frequency in Hz
@@ -123,6 +205,9 @@ void ptimer_set_period(ptimer_state *s, int64_t period);
* as setting the frequency then this function is more appropriate,
* because it allows specifying an effective period which is
* precise to fractions of a nanosecond, avoiding rounding errors.
+ *
+ * This function will assert if it is called outside a
+ * ptimer_transaction_begin/commit block.
*/
void ptimer_set_freq(ptimer_state *s, uint32_t freq);
@@ -150,6 +235,9 @@ uint64_t ptimer_get_limit(ptimer_state *s);
* Set the limit value of the down-counter. The @reload flag can
* be used to emulate the behaviour of timers which immediately
* reload the counter when their reload register is written to.
+ *
+ * This function will assert if it is called outside a
+ * ptimer_transaction_begin/commit block.
*/
void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload);
@@ -171,6 +259,9 @@ uint64_t ptimer_get_count(ptimer_state *s);
* Set the value of the down-counter. If the counter is currently
* enabled this will arrange for a timer callback at the appropriate
* point in the future.
+ *
+ * This function will assert if it is called outside a
+ * ptimer_transaction_begin/commit block.
*/
void ptimer_set_count(ptimer_state *s, uint64_t count);
@@ -179,11 +270,15 @@ void ptimer_set_count(ptimer_state *s, uint64_t count);
* @s: ptimer
* @oneshot: non-zero if this timer should only count down once
*
- * Start a ptimer counting down; when it reaches zero the bottom half
- * passed to ptimer_init() will be invoked. If the @oneshot argument is zero,
+ * Start a ptimer counting down; when it reaches zero the callback function
+ * passed to ptimer_init() will be invoked.
+ * If the @oneshot argument is zero,
* the counter value will then be reloaded from the limit and it will
* start counting down again. If @oneshot is non-zero, then the counter
* will disable itself when it reaches zero.
+ *
+ * This function will assert if it is called outside a
+ * ptimer_transaction_begin/commit block.
*/
void ptimer_run(ptimer_state *s, int oneshot);
@@ -196,6 +291,9 @@ void ptimer_run(ptimer_state *s, int oneshot);
*
* Note that this can cause it to "lose" time, even if it is immediately
* restarted.
+ *
+ * This function will assert if it is called outside a
+ * ptimer_transaction_begin/commit block.
*/
void ptimer_stop(ptimer_state *s);
diff --git a/include/hw/qdev-clock.h b/include/hw/qdev-clock.h
new file mode 100644
index 0000000000..ffa0f7ba09
--- /dev/null
+++ b/include/hw/qdev-clock.h
@@ -0,0 +1,164 @@
+/*
+ * Device's clock input and output
+ *
+ * Copyright GreenSocs 2016-2020
+ *
+ * Authors:
+ * Frederic Konrad
+ * Damien Hedde
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QDEV_CLOCK_H
+#define QDEV_CLOCK_H
+
+#include "hw/clock.h"
+
+/**
+ * qdev_init_clock_in:
+ * @dev: the device to add an input clock to
+ * @name: the name of the clock (can't be NULL).
+ * @callback: optional callback to be called on update or NULL.
+ * @opaque: argument for the callback
+ * @events: the events the callback should be called for
+ * (logical OR of ClockEvent enum values)
+ * @returns: a pointer to the newly added clock
+ *
+ * Add an input clock to device @dev as a clock named @name.
+ * This adds a child<> property.
+ * The callback will be called with @opaque as opaque parameter.
+ */
+Clock *qdev_init_clock_in(DeviceState *dev, const char *name,
+ ClockCallback *callback, void *opaque,
+ unsigned int events);
+
+/**
+ * qdev_init_clock_out:
+ * @dev: the device to add an output clock to
+ * @name: the name of the clock (can't be NULL).
+ * @returns: a pointer to the newly added clock
+ *
+ * Add an output clock to device @dev as a clock named @name.
+ * This adds a child<> property.
+ */
+Clock *qdev_init_clock_out(DeviceState *dev, const char *name);
+
+/**
+ * qdev_get_clock_in:
+ * @dev: the device which has the clock
+ * @name: the name of the clock (can't be NULL).
+ * @returns: a pointer to the clock
+ *
+ * Get the input clock @name from @dev or NULL if does not exist.
+ */
+Clock *qdev_get_clock_in(DeviceState *dev, const char *name);
+
+/**
+ * qdev_get_clock_out:
+ * @dev: the device which has the clock
+ * @name: the name of the clock (can't be NULL).
+ * @returns: a pointer to the clock
+ *
+ * Get the output clock @name from @dev or NULL if does not exist.
+ */
+Clock *qdev_get_clock_out(DeviceState *dev, const char *name);
+
+/**
+ * qdev_connect_clock_in:
+ * @dev: a device
+ * @name: the name of an input clock in @dev
+ * @source: the source clock (an output clock of another device for example)
+ *
+ * Set the source clock of input clock @name of device @dev to @source.
+ * @source period update will be propagated to @name clock.
+ *
+ * Must be called before @dev is realized.
+ */
+void qdev_connect_clock_in(DeviceState *dev, const char *name, Clock *source);
+
+/**
+ * qdev_alias_clock:
+ * @dev: the device which has the clock
+ * @name: the name of the clock in @dev (can't be NULL)
+ * @alias_dev: the device to add the clock
+ * @alias_name: the name of the clock in @container
+ * @returns: a pointer to the clock
+ *
+ * Add a clock @alias_name in @alias_dev which is an alias of the clock @name
+ * in @dev. The direction _in_ or _out_ will the same as the original.
+ * An alias clock must not be modified or used by @alias_dev and should
+ * typically be only only for device composition purpose.
+ */
+Clock *qdev_alias_clock(DeviceState *dev, const char *name,
+ DeviceState *alias_dev, const char *alias_name);
+
+/**
+ * qdev_finalize_clocklist:
+ * @dev: the device being finalized
+ *
+ * Clear the clocklist from @dev. Only used internally in qdev.
+ */
+void qdev_finalize_clocklist(DeviceState *dev);
+
+/**
+ * ClockPortInitElem:
+ * @name: name of the clock (can't be NULL)
+ * @output: indicates whether the clock is input or output
+ * @callback: for inputs, optional callback to be called on clock's update
+ * with device as opaque
+ * @callback_events: mask of ClockEvent values for when callback is called
+ * @offset: optional offset to store the ClockIn or ClockOut pointer in device
+ * state structure (0 means unused)
+ */
+struct ClockPortInitElem {
+ const char *name;
+ bool is_output;
+ ClockCallback *callback;
+ unsigned int callback_events;
+ size_t offset;
+};
+
+#define clock_offset_value(devstate, field) \
+ (offsetof(devstate, field) + \
+ type_check(Clock *, typeof_field(devstate, field)))
+
+#define QDEV_CLOCK(out_not_in, devstate, field, cb, cbevents) { \
+ .name = (stringify(field)), \
+ .is_output = out_not_in, \
+ .callback = cb, \
+ .callback_events = cbevents, \
+ .offset = clock_offset_value(devstate, field), \
+}
+
+/**
+ * QDEV_CLOCK_(IN|OUT):
+ * @devstate: structure type. @dev argument of qdev_init_clocks below must be
+ * a pointer to that same type.
+ * @field: a field in @_devstate (must be Clock*)
+ * @callback: (for input only) callback (or NULL) to be called with the device
+ * state as argument
+ * @cbevents: (for input only) ClockEvent mask for when callback is called
+ *
+ * The name of the clock will be derived from @field
+ */
+#define QDEV_CLOCK_IN(devstate, field, callback, cbevents) \
+ QDEV_CLOCK(false, devstate, field, callback, cbevents)
+
+#define QDEV_CLOCK_OUT(devstate, field) \
+ QDEV_CLOCK(true, devstate, field, NULL, 0)
+
+#define QDEV_CLOCK_END { .name = NULL }
+
+typedef struct ClockPortInitElem ClockPortInitArray[];
+
+/**
+ * qdev_init_clocks:
+ * @dev: the device to add clocks to
+ * @clocks: a QDEV_CLOCK_END-terminated array which contains the
+ * clocks information.
+ */
+void qdev_init_clocks(DeviceState *dev, const ClockPortInitArray clocks);
+
+#endif /* QDEV_CLOCK_H */
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index 9614f76ae6..9228e96c87 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -1,20 +1,80 @@
#ifndef QDEV_CORE_H
#define QDEV_CORE_H
+#include "qemu/atomic.h"
#include "qemu/queue.h"
#include "qemu/bitmap.h"
+#include "qemu/rcu.h"
+#include "qemu/rcu_queue.h"
#include "qom/object.h"
-#include "hw/irq.h"
#include "hw/hotplug.h"
+#include "hw/resettable.h"
+
+/**
+ * DOC: The QEMU Device API
+ *
+ * All modern devices should represented as a derived QOM class of
+ * TYPE_DEVICE. The device API introduces the additional methods of
+ * @realize and @unrealize to represent additional stages in a device
+ * objects life cycle.
+ *
+ * Realization
+ * -----------
+ *
+ * Devices are constructed in two stages:
+ *
+ * 1) object instantiation via object_initialize() and
+ * 2) device realization via the #DeviceState.realized property
+ *
+ * The former may not fail (and must not abort or exit, since it is called
+ * during device introspection already), and the latter may return error
+ * information to the caller and must be re-entrant.
+ * Trivial field initializations should go into #TypeInfo.instance_init.
+ * Operations depending on @props static properties should go into @realize.
+ * After successful realization, setting static properties will fail.
+ *
+ * As an interim step, the #DeviceState.realized property can also be
+ * set with qdev_realize(). In the future, devices will propagate this
+ * state change to their children and along busses they expose. The
+ * point in time will be deferred to machine creation, so that values
+ * set in @realize will not be introspectable beforehand. Therefore
+ * devices must not create children during @realize; they should
+ * initialize them via object_initialize() in their own
+ * #TypeInfo.instance_init and forward the realization events
+ * appropriately.
+ *
+ * Any type may override the @realize and/or @unrealize callbacks but needs
+ * to call the parent type's implementation if keeping their functionality
+ * is desired. Refer to QOM documentation for further discussion and examples.
+ *
+ * .. note::
+ * Since TYPE_DEVICE doesn't implement @realize and @unrealize, types
+ * derived directly from it need not call their parent's @realize and
+ * @unrealize. For other types consult the documentation and
+ * implementation of the respective parent types.
+ *
+ * Hiding a device
+ * ---------------
+ *
+ * To hide a device, a DeviceListener function hide_device() needs to
+ * be registered. It can be used to defer adding a device and
+ * therefore hide it from the guest. The handler registering to this
+ * DeviceListener can save the QOpts passed to it for re-using it
+ * later. It must return if it wants the device to be hidden or
+ * visible. When the handler function decides the device shall be
+ * visible it will be added with qdev_device_add() and realized as any
+ * other device. Otherwise qdev_device_add() will return early without
+ * adding the device. The guest will not see a "hidden" device until
+ * it was marked visible and qdev_device_add called again.
+ *
+ */
enum {
DEV_NVECTORS_UNSPECIFIED = -1,
};
#define TYPE_DEVICE "device"
-#define DEVICE(obj) OBJECT_CHECK(DeviceState, (obj), TYPE_DEVICE)
-#define DEVICE_CLASS(klass) OBJECT_CLASS_CHECK(DeviceClass, (klass), TYPE_DEVICE)
-#define DEVICE_GET_CLASS(obj) OBJECT_GET_CLASS(DeviceClass, (obj), TYPE_DEVICE)
+OBJECT_DECLARE_TYPE(DeviceState, DeviceClass, DEVICE)
typedef enum DeviceCategory {
DEVICE_CATEGORY_BRIDGE,
@@ -26,19 +86,18 @@ typedef enum DeviceCategory {
DEVICE_CATEGORY_SOUND,
DEVICE_CATEGORY_MISC,
DEVICE_CATEGORY_CPU,
+ DEVICE_CATEGORY_WATCHDOG,
DEVICE_CATEGORY_MAX
} DeviceCategory;
typedef void (*DeviceRealize)(DeviceState *dev, Error **errp);
-typedef void (*DeviceUnrealize)(DeviceState *dev, Error **errp);
+typedef void (*DeviceUnrealize)(DeviceState *dev);
typedef void (*DeviceReset)(DeviceState *dev);
typedef void (*BusRealize)(BusState *bus, Error **errp);
-typedef void (*BusUnrealize)(BusState *bus, Error **errp);
-
-struct VMStateDescription;
+typedef void (*BusUnrealize)(BusState *bus);
/**
- * DeviceClass:
+ * struct DeviceClass - The base class for all devices.
* @props: Properties accessing state fields.
* @realize: Callback function invoked when the #DeviceState:realized
* property is changed to %true.
@@ -47,53 +106,37 @@ struct VMStateDescription;
* @hotpluggable: indicates if #DeviceClass is hotpluggable, available
* as readonly "hotpluggable" property of #DeviceState instance
*
- * # Realization #
- * Devices are constructed in two stages,
- * 1) object instantiation via object_initialize() and
- * 2) device realization via #DeviceState:realized property.
- * The former may not fail (and must not abort or exit, since it is called
- * during device introspection already), and the latter may return error
- * information to the caller and must be re-entrant.
- * Trivial field initializations should go into #TypeInfo.instance_init.
- * Operations depending on @props static properties should go into @realize.
- * After successful realization, setting static properties will fail.
- *
- * As an interim step, the #DeviceState:realized property can also be
- * set with qdev_init_nofail().
- * In the future, devices will propagate this state change to their children
- * and along busses they expose.
- * The point in time will be deferred to machine creation, so that values
- * set in @realize will not be introspectable beforehand. Therefore devices
- * must not create children during @realize; they should initialize them via
- * object_initialize() in their own #TypeInfo.instance_init and forward the
- * realization events appropriately.
- *
- * Any type may override the @realize and/or @unrealize callbacks but needs
- * to call the parent type's implementation if keeping their functionality
- * is desired. Refer to QOM documentation for further discussion and examples.
- *
- * <note>
- * <para>
- * Since TYPE_DEVICE doesn't implement @realize and @unrealize, types
- * derived directly from it need not call their parent's @realize and
- * @unrealize.
- * For other types consult the documentation and implementation of the
- * respective parent types.
- * </para>
- * </note>
- */
-typedef struct DeviceClass {
- /*< private >*/
+ */
+struct DeviceClass {
+ /* private: */
ObjectClass parent_class;
- /*< public >*/
+ /* public: */
+
+ /**
+ * @categories: device categories device belongs to
+ */
DECLARE_BITMAP(categories, DEVICE_CATEGORY_MAX);
+ /**
+ * @fw_name: name used to identify device to firmware interfaces
+ */
const char *fw_name;
+ /**
+ * @desc: human readable description of device
+ */
const char *desc;
- Property *props;
- /*
- * Can this device be instantiated with -device / device_add?
+ /**
+ * @props_: properties associated with device, should only be
+ * assigned by using device_class_set_props(). The underscore
+ * ensures a compile-time error if someone attempts to assign
+ * dc->props directly.
+ */
+ Property *props_;
+
+ /**
+ * @user_creatable: Can user instantiate with -device / device_add?
+ *
* All devices should support instantiation with device_add, and
* this flag should not exist. But we're not there, yet. Some
* devices fail to instantiate with cryptic error messages.
@@ -101,22 +144,37 @@ typedef struct DeviceClass {
* behavior would be cruel; clearing this flag will protect them.
* It should never be cleared without a comment explaining why it
* is cleared.
+ *
* TODO remove once we're there
*/
bool user_creatable;
bool hotpluggable;
/* callbacks */
+ /**
+ * @reset: deprecated device reset method pointer
+ *
+ * Modern code should use the ResettableClass interface to
+ * implement a multi-phase reset.
+ *
+ * TODO: remove once every reset callback is unused
+ */
DeviceReset reset;
DeviceRealize realize;
DeviceUnrealize unrealize;
- /* device state */
- const struct VMStateDescription *vmsd;
+ /**
+ * @vmsd: device state serialisation description for
+ * migration/save/restore
+ */
+ const VMStateDescription *vmsd;
- /* Private to qdev / bus. */
+ /**
+ * @bus_type: bus type
+ * private: to qdev / bus.
+ */
const char *bus_type;
-} DeviceClass;
+};
typedef struct NamedGPIOList NamedGPIOList;
@@ -128,42 +186,134 @@ struct NamedGPIOList {
QLIST_ENTRY(NamedGPIOList) node;
};
+typedef struct Clock Clock;
+typedef struct NamedClockList NamedClockList;
+
+struct NamedClockList {
+ char *name;
+ Clock *clock;
+ bool output;
+ bool alias;
+ QLIST_ENTRY(NamedClockList) node;
+};
+
+typedef struct {
+ bool engaged_in_io;
+} MemReentrancyGuard;
+
+
+typedef QLIST_HEAD(, NamedGPIOList) NamedGPIOListHead;
+typedef QLIST_HEAD(, NamedClockList) NamedClockListHead;
+typedef QLIST_HEAD(, BusState) BusStateHead;
+
/**
- * DeviceState:
- * @realized: Indicates whether the device has been fully constructed.
+ * struct DeviceState - common device state, accessed with qdev helpers
*
* This structure should not be accessed directly. We declare it here
* so that it can be embedded in individual device state structures.
*/
struct DeviceState {
- /*< private >*/
+ /* private: */
Object parent_obj;
- /*< public >*/
+ /* public: */
- const char *id;
+ /**
+ * @id: global device id
+ */
+ char *id;
+ /**
+ * @canonical_path: canonical path of realized device in the QOM tree
+ */
char *canonical_path;
+ /**
+ * @realized: has device been realized?
+ */
bool realized;
+ /**
+ * @pending_deleted_event: track pending deletion events during unplug
+ */
bool pending_deleted_event;
- QemuOpts *opts;
+ /**
+ * @pending_deleted_expires_ms: optional timeout for deletion events
+ */
+ int64_t pending_deleted_expires_ms;
+ /**
+ * @opts: QDict of options for the device
+ */
+ QDict *opts;
+ /**
+ * @hotplugged: was device added after PHASE_MACHINE_READY?
+ */
int hotplugged;
+ /**
+ * @allow_unplug_during_migration: can device be unplugged during migration
+ */
+ bool allow_unplug_during_migration;
+ /**
+ * @parent_bus: bus this device belongs to
+ */
BusState *parent_bus;
- QLIST_HEAD(, NamedGPIOList) gpios;
- QLIST_HEAD(, BusState) child_bus;
+ /**
+ * @gpios: QLIST of named GPIOs the device provides.
+ */
+ NamedGPIOListHead gpios;
+ /**
+ * @clocks: QLIST of named clocks the device provides.
+ */
+ NamedClockListHead clocks;
+ /**
+ * @child_bus: QLIST of child buses
+ */
+ BusStateHead child_bus;
+ /**
+ * @num_child_bus: number of @child_bus entries
+ */
int num_child_bus;
+ /**
+ * @instance_id_alias: device alias for handling legacy migration setups
+ */
int instance_id_alias;
+ /**
+ * @alias_required_for_version: indicates @instance_id_alias is
+ * needed for migration
+ */
int alias_required_for_version;
+ /**
+ * @reset: ResettableState for the device; handled by Resettable interface.
+ */
+ ResettableState reset;
+ /**
+ * @unplug_blockers: list of reasons to block unplugging of device
+ */
+ GSList *unplug_blockers;
+ /**
+ * @mem_reentrancy_guard: Is the device currently in mmio/pio/dma?
+ *
+ * Used to prevent re-entrancy confusing things.
+ */
+ MemReentrancyGuard mem_reentrancy_guard;
};
struct DeviceListener {
void (*realize)(DeviceListener *listener, DeviceState *dev);
void (*unrealize)(DeviceListener *listener, DeviceState *dev);
+ /*
+ * This callback is called upon init of the DeviceState and
+ * informs qdev if a device should be visible or hidden. We can
+ * hide a failover device depending for example on the device
+ * opts.
+ *
+ * On errors, it returns false and errp is set. Device creation
+ * should fail in this case.
+ */
+ bool (*hide_device)(DeviceListener *listener, const QDict *device_opts,
+ bool from_json, Error **errp);
QTAILQ_ENTRY(DeviceListener) link;
};
#define TYPE_BUS "bus"
-#define BUS(obj) OBJECT_CHECK(BusState, (obj), TYPE_BUS)
-#define BUS_CLASS(klass) OBJECT_CLASS_CHECK(BusClass, (klass), TYPE_BUS)
-#define BUS_GET_CLASS(obj) OBJECT_GET_CLASS(BusClass, (obj), TYPE_BUS)
+DECLARE_OBJ_CHECKERS(BusState, BusClass,
+ BUS, TYPE_BUS)
struct BusClass {
ObjectClass parent_class;
@@ -171,13 +321,22 @@ struct BusClass {
/* FIXME first arg should be BusState */
void (*print_dev)(Monitor *mon, DeviceState *dev, int indent);
char *(*get_dev_path)(DeviceState *dev);
+
/*
* This callback is used to create Open Firmware device path in accordance
* with OF spec http://forthworks.com/standards/of1275.pdf. Individual bus
* bindings can be found at http://playground.sun.com/1275/bindings/.
*/
char *(*get_fw_dev_path)(DeviceState *dev);
- void (*reset)(BusState *bus);
+
+ /*
+ * Return whether the device can be added to @bus,
+ * based on the address that was set (via device properties)
+ * before realize. If not, on return @errp contains the
+ * human-readable error message.
+ */
+ bool (*check_address)(BusState *bus, DeviceState *dev, Error **errp);
+
BusRealize realize;
BusUnrealize unrealize;
@@ -188,6 +347,7 @@ struct BusClass {
};
typedef struct BusChild {
+ struct rcu_head rcu;
DeviceState *child;
int index;
QTAILQ_ENTRY(BusChild) sibling;
@@ -195,61 +355,53 @@ typedef struct BusChild {
#define QDEV_HOTPLUG_HANDLER_PROPERTY "hotplug-handler"
+typedef QTAILQ_HEAD(, BusChild) BusChildHead;
+typedef QLIST_ENTRY(BusState) BusStateEntry;
+
/**
- * BusState:
+ * struct BusState:
+ * @obj: parent object
+ * @parent: parent Device
+ * @name: name of bus
* @hotplug_handler: link to a hotplug handler associated with bus.
+ * @max_index: max number of child buses
+ * @realized: is the bus itself realized?
+ * @full: is the bus full?
+ * @num_children: current number of child buses
*/
struct BusState {
+ /* private: */
Object obj;
+ /* public: */
DeviceState *parent;
char *name;
HotplugHandler *hotplug_handler;
int max_index;
bool realized;
- QTAILQ_HEAD(, BusChild) children;
- QLIST_ENTRY(BusState) sibling;
-};
-
-/**
- * Property:
- * @set_default: true if the default value should be set from @defval,
- * in which case @info->set_default_value must not be NULL
- * (if false then no default value is set by the property system
- * and the field retains whatever value it was given by instance_init).
- * @defval: default value for the property. This is used only if @set_default
- * is true.
- */
-struct Property {
- const char *name;
- const PropertyInfo *info;
- ptrdiff_t offset;
- uint8_t bitnr;
- bool set_default;
- union {
- int64_t i;
- uint64_t u;
- } defval;
- int arrayoffset;
- const PropertyInfo *arrayinfo;
- int arrayfieldsize;
- const char *link_type;
-};
+ bool full;
+ int num_children;
-struct PropertyInfo {
- const char *name;
- const char *description;
- const QEnumLookup *enum_table;
- int (*print)(DeviceState *dev, Property *prop, char *dest, size_t len);
- void (*set_default_value)(Object *obj, const Property *prop);
- void (*create)(Object *obj, Property *prop, Error **errp);
- ObjectPropertyAccessor *get;
- ObjectPropertyAccessor *set;
- ObjectPropertyRelease *release;
+ /**
+ * @children: an RCU protected QTAILQ, thus readers must use RCU
+ * to access it, and writers must hold the big qemu lock
+ */
+ BusChildHead children;
+ /**
+ * @sibling: next bus
+ */
+ BusStateEntry sibling;
+ /**
+ * @reset: ResettableState for the bus; handled by Resettable interface.
+ */
+ ResettableState reset;
};
/**
- * GlobalProperty:
+ * typedef GlobalProperty - a global property type
+ *
* @used: Set to true if property was used when initializing a device.
+ * @optional: If set to true, GlobalProperty will be skipped without errors
+ * if the property doesn't exist.
*
* An error is fatal for non-hotplugged devices, when the global is applied.
*/
@@ -258,6 +410,7 @@ typedef struct GlobalProperty {
const char *property;
const char *value;
bool used;
+ bool optional;
} GlobalProperty;
static inline void
@@ -272,12 +425,125 @@ compat_props_add(GPtrArray *arr,
/*** Board API. This should go away once we have a machine config file. ***/
-DeviceState *qdev_create(BusState *bus, const char *name);
-DeviceState *qdev_try_create(BusState *bus, const char *name);
-void qdev_init_nofail(DeviceState *dev);
+/**
+ * qdev_new: Create a device on the heap
+ * @name: device type to create (we assert() that this type exists)
+ *
+ * This only allocates the memory and initializes the device state
+ * structure, ready for the caller to set properties if they wish.
+ * The device still needs to be realized.
+ *
+ * Return: a derived DeviceState object with a reference count of 1.
+ */
+DeviceState *qdev_new(const char *name);
+
+/**
+ * qdev_try_new: Try to create a device on the heap
+ * @name: device type to create
+ *
+ * This is like qdev_new(), except it returns %NULL when type @name
+ * does not exist, rather than asserting.
+ *
+ * Return: a derived DeviceState object with a reference count of 1 or
+ * NULL if type @name does not exist.
+ */
+DeviceState *qdev_try_new(const char *name);
+
+/**
+ * qdev_is_realized() - check if device is realized
+ * @dev: The device to check.
+ *
+ * Context: May be called outside big qemu lock.
+ * Return: true if the device has been fully constructed, false otherwise.
+ */
+static inline bool qdev_is_realized(DeviceState *dev)
+{
+ return qatomic_load_acquire(&dev->realized);
+}
+
+/**
+ * qdev_realize: Realize @dev.
+ * @dev: device to realize
+ * @bus: bus to plug it into (may be NULL)
+ * @errp: pointer to error object
+ *
+ * "Realize" the device, i.e. perform the second phase of device
+ * initialization.
+ * @dev must not be plugged into a bus already.
+ * If @bus, plug @dev into @bus. This takes a reference to @dev.
+ * If @dev has no QOM parent, make one up, taking another reference.
+ *
+ * If you created @dev using qdev_new(), you probably want to use
+ * qdev_realize_and_unref() instead.
+ *
+ * Return: true on success, else false setting @errp with error
+ */
+bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp);
+
+/**
+ * qdev_realize_and_unref: Realize @dev and drop a reference
+ * @dev: device to realize
+ * @bus: bus to plug it into (may be NULL)
+ * @errp: pointer to error object
+ *
+ * Realize @dev and drop a reference.
+ * This is like qdev_realize(), except the caller must hold a
+ * (private) reference, which is dropped on return regardless of
+ * success or failure. Intended use::
+ *
+ * dev = qdev_new();
+ * [...]
+ * qdev_realize_and_unref(dev, bus, errp);
+ *
+ * Now @dev can go away without further ado.
+ *
+ * If you are embedding the device into some other QOM device and
+ * initialized it via some variant on object_initialize_child() then
+ * do not use this function, because that family of functions arrange
+ * for the only reference to the child device to be held by the parent
+ * via the child<> property, and so the reference-count-drop done here
+ * would be incorrect. For that use case you want qdev_realize().
+ *
+ * Return: true on success, else false setting @errp with error
+ */
+bool qdev_realize_and_unref(DeviceState *dev, BusState *bus, Error **errp);
+
+/**
+ * qdev_unrealize: Unrealize a device
+ * @dev: device to unrealize
+ *
+ * This function will "unrealize" a device, which is the first phase
+ * of correctly destroying a device that has been realized. It will:
+ *
+ * - unrealize any child buses by calling qbus_unrealize()
+ * (this will recursively unrealize any devices on those buses)
+ * - call the unrealize method of @dev
+ *
+ * The device can then be freed by causing its reference count to go
+ * to zero.
+ *
+ * Warning: most devices in QEMU do not expect to be unrealized. Only
+ * devices which are hot-unpluggable should be unrealized (as part of
+ * the unplugging process); all other devices are expected to last for
+ * the life of the simulation and should not be unrealized and freed.
+ */
+void qdev_unrealize(DeviceState *dev);
void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id,
int required_for_version);
+HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev);
HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev);
+bool qdev_hotplug_allowed(DeviceState *dev, Error **errp);
+
+/**
+ * qdev_get_hotplug_handler() - Get handler responsible for device wiring
+ * @dev: the device we want the HOTPLUG_HANDLER for.
+ *
+ * Note: in case @dev has a parent bus, it will be returned as handler unless
+ * machine handler overrides it.
+ *
+ * Return: pointer to object that implements TYPE_HOTPLUG_HANDLER interface
+ * or NULL if there aren't any.
+ */
HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev);
void qdev_unplug(DeviceState *dev, Error **errp);
void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev,
@@ -285,13 +551,190 @@ void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev,
void qdev_machine_creation_done(void);
bool qdev_machine_modified(void);
+/**
+ * qdev_add_unplug_blocker: Add an unplug blocker to a device
+ *
+ * @dev: Device to be blocked from unplug
+ * @reason: Reason for blocking
+ */
+void qdev_add_unplug_blocker(DeviceState *dev, Error *reason);
+
+/**
+ * qdev_del_unplug_blocker: Remove an unplug blocker from a device
+ *
+ * @dev: Device to be unblocked
+ * @reason: Pointer to the Error used with qdev_add_unplug_blocker.
+ * Used as a handle to lookup the blocker for deletion.
+ */
+void qdev_del_unplug_blocker(DeviceState *dev, Error *reason);
+
+/**
+ * qdev_unplug_blocked: Confirm if a device is blocked from unplug
+ *
+ * @dev: Device to be tested
+ * @errp: The reasons why the device is blocked, if any
+ *
+ * Returns: true (also setting @errp) if device is blocked from unplug,
+ * false otherwise
+ */
+bool qdev_unplug_blocked(DeviceState *dev, Error **errp);
+
+/**
+ * typedef GpioPolarity - Polarity of a GPIO line
+ *
+ * GPIO lines use either positive (active-high) logic,
+ * or negative (active-low) logic.
+ *
+ * In active-high logic (%GPIO_POLARITY_ACTIVE_HIGH), a pin is
+ * active when the voltage on the pin is high (relative to ground);
+ * whereas in active-low logic (%GPIO_POLARITY_ACTIVE_LOW), a pin
+ * is active when the voltage on the pin is low (or grounded).
+ */
+typedef enum {
+ GPIO_POLARITY_ACTIVE_LOW,
+ GPIO_POLARITY_ACTIVE_HIGH
+} GpioPolarity;
+
+/**
+ * qdev_get_gpio_in: Get one of a device's anonymous input GPIO lines
+ * @dev: Device whose GPIO we want
+ * @n: Number of the anonymous GPIO line (which must be in range)
+ *
+ * Returns the qemu_irq corresponding to an anonymous input GPIO line
+ * (which the device has set up with qdev_init_gpio_in()). The index
+ * @n of the GPIO line must be valid (i.e. be at least 0 and less than
+ * the total number of anonymous input GPIOs the device has); this
+ * function will assert() if passed an invalid index.
+ *
+ * This function is intended to be used by board code or SoC "container"
+ * device models to wire up the GPIO lines; usually the return value
+ * will be passed to qdev_connect_gpio_out() or a similar function to
+ * connect another device's output GPIO line to this input.
+ *
+ * For named input GPIO lines, use qdev_get_gpio_in_named().
+ *
+ * Return: qemu_irq corresponding to anonymous input GPIO line
+ */
qemu_irq qdev_get_gpio_in(DeviceState *dev, int n);
+
+/**
+ * qdev_get_gpio_in_named: Get one of a device's named input GPIO lines
+ * @dev: Device whose GPIO we want
+ * @name: Name of the input GPIO array
+ * @n: Number of the GPIO line in that array (which must be in range)
+ *
+ * Returns the qemu_irq corresponding to a named input GPIO line
+ * (which the device has set up with qdev_init_gpio_in_named()).
+ * The @name string must correspond to an input GPIO array which exists on
+ * the device, and the index @n of the GPIO line must be valid (i.e.
+ * be at least 0 and less than the total number of input GPIOs in that
+ * array); this function will assert() if passed an invalid name or index.
+ *
+ * For anonymous input GPIO lines, use qdev_get_gpio_in().
+ *
+ * Return: qemu_irq corresponding to named input GPIO line
+ */
qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n);
+/**
+ * qdev_connect_gpio_out: Connect one of a device's anonymous output GPIO lines
+ * @dev: Device whose GPIO to connect
+ * @n: Number of the anonymous output GPIO line (which must be in range)
+ * @pin: qemu_irq to connect the output line to
+ *
+ * This function connects an anonymous output GPIO line on a device
+ * up to an arbitrary qemu_irq, so that when the device asserts that
+ * output GPIO line, the qemu_irq's callback is invoked.
+ * The index @n of the GPIO line must be valid (i.e. be at least 0 and
+ * less than the total number of anonymous output GPIOs the device has
+ * created with qdev_init_gpio_out()); otherwise this function will assert().
+ *
+ * Outbound GPIO lines can be connected to any qemu_irq, but the common
+ * case is connecting them to another device's inbound GPIO line, using
+ * the qemu_irq returned by qdev_get_gpio_in() or qdev_get_gpio_in_named().
+ *
+ * It is not valid to try to connect one outbound GPIO to multiple
+ * qemu_irqs at once, or to connect multiple outbound GPIOs to the
+ * same qemu_irq. (Warning: there is no assertion or other guard to
+ * catch this error: the model will just not do the right thing.)
+ * Instead, for fan-out you can use the TYPE_SPLIT_IRQ device: connect
+ * a device's outbound GPIO to the splitter's input, and connect each
+ * of the splitter's outputs to a different device. For fan-in you
+ * can use the TYPE_OR_IRQ device, which is a model of a logical OR
+ * gate with multiple inputs and one output.
+ *
+ * For named output GPIO lines, use qdev_connect_gpio_out_named().
+ */
void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin);
+
+/**
+ * qdev_connect_gpio_out_named: Connect one of a device's named output
+ * GPIO lines
+ * @dev: Device whose GPIO to connect
+ * @name: Name of the output GPIO array
+ * @n: Number of the anonymous output GPIO line (which must be in range)
+ * @input_pin: qemu_irq to connect the output line to
+ *
+ * This function connects an anonymous output GPIO line on a device
+ * up to an arbitrary qemu_irq, so that when the device asserts that
+ * output GPIO line, the qemu_irq's callback is invoked.
+ * The @name string must correspond to an output GPIO array which exists on
+ * the device, and the index @n of the GPIO line must be valid (i.e.
+ * be at least 0 and less than the total number of input GPIOs in that
+ * array); this function will assert() if passed an invalid name or index.
+ *
+ * Outbound GPIO lines can be connected to any qemu_irq, but the common
+ * case is connecting them to another device's inbound GPIO line, using
+ * the qemu_irq returned by qdev_get_gpio_in() or qdev_get_gpio_in_named().
+ *
+ * It is not valid to try to connect one outbound GPIO to multiple
+ * qemu_irqs at once, or to connect multiple outbound GPIOs to the
+ * same qemu_irq; see qdev_connect_gpio_out() for details.
+ *
+ * For anonymous output GPIO lines, use qdev_connect_gpio_out().
+ */
void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n,
- qemu_irq pin);
+ qemu_irq input_pin);
+
+/**
+ * qdev_get_gpio_out_connector: Get the qemu_irq connected to an output GPIO
+ * @dev: Device whose output GPIO we are interested in
+ * @name: Name of the output GPIO array
+ * @n: Number of the output GPIO line within that array
+ *
+ * Returns whatever qemu_irq is currently connected to the specified
+ * output GPIO line of @dev. This will be NULL if the output GPIO line
+ * has never been wired up to the anything. Note that the qemu_irq
+ * returned does not belong to @dev -- it will be the input GPIO or
+ * IRQ of whichever device the board code has connected up to @dev's
+ * output GPIO.
+ *
+ * You probably don't need to use this function -- it is used only
+ * by the platform-bus subsystem.
+ *
+ * Return: qemu_irq associated with GPIO or NULL if un-wired.
+ */
qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n);
+
+/**
+ * qdev_intercept_gpio_out: Intercept an existing GPIO connection
+ * @dev: Device to intercept the outbound GPIO line from
+ * @icpt: New qemu_irq to connect instead
+ * @name: Name of the output GPIO array
+ * @n: Number of the GPIO line in the array
+ *
+ * .. note::
+ * This function is provided only for use by the qtest testing framework
+ * and is not suitable for use in non-testing parts of QEMU.
+ *
+ * This function breaks an existing connection of an outbound GPIO
+ * line from @dev, and replaces it with the new qemu_irq @icpt, as if
+ * ``qdev_connect_gpio_out_named(dev, icpt, name, n)`` had been called.
+ * The previously connected qemu_irq is returned, so it can be restored
+ * by a second call to qdev_intercept_gpio_out() if desired.
+ *
+ * Return: old disconnected qemu_irq if one existed
+ */
qemu_irq qdev_intercept_gpio_out(DeviceState *dev, qemu_irq icpt,
const char *name, int n);
@@ -299,16 +742,70 @@ BusState *qdev_get_child_bus(DeviceState *dev, const char *name);
/*** Device API. ***/
-/* Register device properties. */
-/* GPIO inputs also double as IRQ sinks. */
+/**
+ * qdev_init_gpio_in: create an array of anonymous input GPIO lines
+ * @dev: Device to create input GPIOs for
+ * @handler: Function to call when GPIO line value is set
+ * @n: Number of GPIO lines to create
+ *
+ * Devices should use functions in the qdev_init_gpio_in* family in
+ * their instance_init or realize methods to create any input GPIO
+ * lines they need. There is no functional difference between
+ * anonymous and named GPIO lines. Stylistically, named GPIOs are
+ * preferable (easier to understand at callsites) unless a device
+ * has exactly one uniform kind of GPIO input whose purpose is obvious.
+ * Note that input GPIO lines can serve as 'sinks' for IRQ lines.
+ *
+ * See qdev_get_gpio_in() for how code that uses such a device can get
+ * hold of an input GPIO line to manipulate it.
+ */
void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n);
+
+/**
+ * qdev_init_gpio_out: create an array of anonymous output GPIO lines
+ * @dev: Device to create output GPIOs for
+ * @pins: Pointer to qemu_irq or qemu_irq array for the GPIO lines
+ * @n: Number of GPIO lines to create
+ *
+ * Devices should use functions in the qdev_init_gpio_out* family
+ * in their instance_init or realize methods to create any output
+ * GPIO lines they need. There is no functional difference between
+ * anonymous and named GPIO lines. Stylistically, named GPIOs are
+ * preferable (easier to understand at callsites) unless a device
+ * has exactly one uniform kind of GPIO output whose purpose is obvious.
+ *
+ * The @pins argument should be a pointer to either a "qemu_irq"
+ * (if @n == 1) or a "qemu_irq []" array (if @n > 1) in the device's
+ * state structure. The device implementation can then raise and
+ * lower the GPIO line by calling qemu_set_irq(). (If anything is
+ * connected to the other end of the GPIO this will cause the handler
+ * function for that input GPIO to be called.)
+ *
+ * See qdev_connect_gpio_out() for how code that uses such a device
+ * can connect to one of its output GPIO lines.
+ *
+ * There is no need to release the @pins allocated array because it
+ * will be automatically released when @dev calls its instance_finalize()
+ * handler.
+ */
void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n);
+
+/**
+ * qdev_init_gpio_out_named: create an array of named output GPIO lines
+ * @dev: Device to create output GPIOs for
+ * @pins: Pointer to qemu_irq or qemu_irq array for the GPIO lines
+ * @name: Name to give this array of GPIO lines
+ * @n: Number of GPIO lines to create
+ *
+ * Like qdev_init_gpio_out(), but creates an array of GPIO output lines
+ * with a name. Code using the device can then connect these GPIO lines
+ * using qdev_connect_gpio_out_named().
+ */
void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins,
const char *name, int n);
+
/**
- * qdev_init_gpio_in_named_with_opaque: create an array of input GPIO lines
- * for the specified device
- *
+ * qdev_init_gpio_in_named_with_opaque() - create an array of input GPIO lines
* @dev: Device to create input GPIOs for
* @handler: Function to call when GPIO line value is set
* @opaque: Opaque data pointer to pass to @handler
@@ -321,8 +818,11 @@ void qdev_init_gpio_in_named_with_opaque(DeviceState *dev,
const char *name, int n);
/**
- * qdev_init_gpio_in_named: create an array of input GPIO lines
- * for the specified device
+ * qdev_init_gpio_in_named() - create an array of input GPIO lines
+ * @dev: device to add array to
+ * @handler: a &typedef qemu_irq_handler function to call when GPIO is set
+ * @name: Name of the GPIO input (must be unique for this device)
+ * @n: Number of GPIO lines in this input set
*
* Like qdev_init_gpio_in_named_with_opaque(), but the opaque pointer
* passed to the handler is @dev (which is the most commonly desired behaviour).
@@ -334,10 +834,29 @@ static inline void qdev_init_gpio_in_named(DeviceState *dev,
qdev_init_gpio_in_named_with_opaque(dev, handler, dev, name, n);
}
+/**
+ * qdev_pass_gpios: create GPIO lines on container which pass through to device
+ * @dev: Device which has GPIO lines
+ * @container: Container device which needs to expose them
+ * @name: Name of GPIO array to pass through (NULL for the anonymous GPIO array)
+ *
+ * In QEMU, complicated devices like SoCs are often modelled with a
+ * "container" QOM device which itself contains other QOM devices and
+ * which wires them up appropriately. This function allows the container
+ * to create GPIO arrays on itself which simply pass through to a GPIO
+ * array of one of its internal devices.
+ *
+ * If @dev has both input and output GPIOs named @name then both will
+ * be passed through. It is not possible to pass a subset of the array
+ * with this function.
+ *
+ * To users of the container device, the GPIO array created on @container
+ * behaves exactly like any other.
+ */
void qdev_pass_gpios(DeviceState *dev, DeviceState *container,
const char *name);
-BusState *qdev_get_parent_bus(DeviceState *dev);
+BusState *qdev_get_parent_bus(const DeviceState *dev);
/*** BUS API. ***/
@@ -347,9 +866,12 @@ DeviceState *qdev_find_recursive(BusState *bus, const char *id);
typedef int (qbus_walkerfn)(BusState *bus, void *opaque);
typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque);
-void qbus_create_inplace(void *bus, size_t size, const char *typename,
- DeviceState *parent, const char *name);
-BusState *qbus_create(const char *typename, DeviceState *parent, const char *name);
+void qbus_init(void *bus, size_t size, const char *typename,
+ DeviceState *parent, const char *name);
+BusState *qbus_new(const char *typename, DeviceState *parent, const char *name);
+bool qbus_realize(BusState *bus, Error **errp);
+void qbus_unrealize(BusState *bus);
+
/* Returns > 0 if either devfn or busfn skip walk somewhere in cursion,
* < 0 if either devfn or busfn terminate walk somewhere in cursion,
* 0 otherwise. */
@@ -362,21 +884,39 @@ int qdev_walk_children(DeviceState *dev,
qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn,
void *opaque);
-void qdev_reset_all(DeviceState *dev);
-void qdev_reset_all_fn(void *opaque);
+/**
+ * device_cold_reset() - perform a recursive cold reset on a device
+ * @dev: device to reset.
+ *
+ * Reset device @dev and perform a recursive processing using the resettable
+ * interface. It triggers a RESET_TYPE_COLD.
+ */
+void device_cold_reset(DeviceState *dev);
+
+/**
+ * bus_cold_reset() - perform a recursive cold reset on a bus
+ * @bus: bus to reset
+ *
+ * Reset bus @bus and perform a recursive processing using the resettable
+ * interface. It triggers a RESET_TYPE_COLD.
+ */
+void bus_cold_reset(BusState *bus);
+
+/**
+ * device_is_in_reset() - check device reset state
+ * @dev: device to check
+ *
+ * Return: true if the device @dev is currently being reset.
+ */
+bool device_is_in_reset(DeviceState *dev);
/**
- * @qbus_reset_all:
- * @bus: Bus to be reset.
+ * bus_is_in_reset() - check bus reset state
+ * @bus: bus to check
*
- * Reset @bus and perform a bus-level ("hard") reset of all devices connected
- * to it, including recursive processing of all buses below @bus itself. A
- * hard reset means that qbus_reset_all will reset all state of the device.
- * For PCI devices, for example, this will include the base address registers
- * or configuration space.
+ * Return: true if the bus @bus is currently being reset.
*/
-void qbus_reset_all(BusState *bus);
-void qbus_reset_all_fn(void *opaque);
+bool bus_is_in_reset(BusState *bus);
/* This should go away once we get rid of the NULL bus hack */
BusState *sysbus_get_default(void);
@@ -385,59 +925,185 @@ char *qdev_get_fw_dev_path(DeviceState *dev);
char *qdev_get_own_fw_dev_path_from_handler(BusState *bus, DeviceState *dev);
/**
- * @qdev_machine_init
+ * device_class_set_props(): add a set of properties to an device
+ * @dc: the parent DeviceClass all devices inherit
+ * @props: an array of properties, terminate by DEFINE_PROP_END_OF_LIST()
*
- * Initialize platform devices before machine init. This is a hack until full
- * support for composition is added.
+ * This will add a set of properties to the object. It will fault if
+ * you attempt to add an existing property defined by a parent class.
+ * To modify an inherited property you need to use????
*/
-void qdev_machine_init(void);
+void device_class_set_props(DeviceClass *dc, Property *props);
/**
- * @device_reset
+ * device_class_set_parent_reset() - legacy set device reset handlers
+ * @dc: device class
+ * @dev_reset: function pointer to reset handler
+ * @parent_reset: function pointer to parents reset handler
+ *
+ * Modern code should use the ResettableClass interface to
+ * implement a multi-phase reset instead.
*
- * Reset a single device (by calling the reset method).
+ * TODO: remove the function when DeviceClass's reset method
+ * is not used anymore.
*/
-void device_reset(DeviceState *dev);
-
void device_class_set_parent_reset(DeviceClass *dc,
DeviceReset dev_reset,
DeviceReset *parent_reset);
+
+/**
+ * device_class_set_parent_realize() - set up for chaining realize fns
+ * @dc: The device class
+ * @dev_realize: the device realize function
+ * @parent_realize: somewhere to save the parents realize function
+ *
+ * This is intended to be used when the new realize function will
+ * eventually call its parent realization function during creation.
+ * This requires storing the function call somewhere (usually in the
+ * instance structure) so you can eventually call
+ * dc->parent_realize(dev, errp)
+ */
void device_class_set_parent_realize(DeviceClass *dc,
DeviceRealize dev_realize,
DeviceRealize *parent_realize);
+
+
+/**
+ * device_class_set_parent_unrealize() - set up for chaining unrealize fns
+ * @dc: The device class
+ * @dev_unrealize: the device realize function
+ * @parent_unrealize: somewhere to save the parents unrealize function
+ *
+ * This is intended to be used when the new unrealize function will
+ * eventually call its parent unrealization function during the
+ * unrealize phase. This requires storing the function call somewhere
+ * (usually in the instance structure) so you can eventually call
+ * dc->parent_unrealize(dev);
+ */
void device_class_set_parent_unrealize(DeviceClass *dc,
DeviceUnrealize dev_unrealize,
DeviceUnrealize *parent_unrealize);
-const struct VMStateDescription *qdev_get_vmsd(DeviceState *dev);
+const VMStateDescription *qdev_get_vmsd(DeviceState *dev);
const char *qdev_fw_name(DeviceState *dev);
+void qdev_assert_realized_properly(void);
Object *qdev_get_machine(void);
-void object_apply_compat_props(Object *obj);
+/**
+ * qdev_get_human_name() - Return a human-readable name for a device
+ * @dev: The device. Must be a valid and non-NULL pointer.
+ *
+ * .. note::
+ * This function is intended for user friendly error messages.
+ *
+ * Returns: A newly allocated string containing the device id if not null,
+ * else the object canonical path.
+ *
+ * Use g_free() to free it.
+ */
+char *qdev_get_human_name(DeviceState *dev);
/* FIXME: make this a link<> */
-void qdev_set_parent_bus(DeviceState *dev, BusState *bus);
+bool qdev_set_parent_bus(DeviceState *dev, BusState *bus, Error **errp);
-extern bool qdev_hotplug;
extern bool qdev_hot_removed;
char *qdev_get_dev_path(DeviceState *dev);
-GSList *qdev_build_hotpluggable_device_list(Object *peripheral);
+void qbus_set_hotplug_handler(BusState *bus, Object *handler);
+void qbus_set_bus_hotplug_handler(BusState *bus);
-void qbus_set_hotplug_handler(BusState *bus, DeviceState *handler,
- Error **errp);
+static inline bool qbus_is_hotpluggable(BusState *bus)
+{
+ HotplugHandler *plug_handler = bus->hotplug_handler;
+ bool ret = !!plug_handler;
-void qbus_set_bus_hotplug_handler(BusState *bus, Error **errp);
+ if (plug_handler) {
+ HotplugHandlerClass *hdc;
-static inline bool qbus_is_hotpluggable(BusState *bus)
+ hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler);
+ if (hdc->is_hotpluggable_bus) {
+ ret = hdc->is_hotpluggable_bus(plug_handler, bus);
+ }
+ }
+ return ret;
+}
+
+/**
+ * qbus_mark_full: Mark this bus as full, so no more devices can be attached
+ * @bus: Bus to mark as full
+ *
+ * By default, QEMU will allow devices to be plugged into a bus up
+ * to the bus class's device count limit. Calling this function
+ * marks a particular bus as full, so that no more devices can be
+ * plugged into it. In particular this means that the bus will not
+ * be considered as a candidate for plugging in devices created by
+ * the user on the commandline or via the monitor.
+ * If a machine has multiple buses of a given type, such as I2C,
+ * where some of those buses in the real hardware are used only for
+ * internal devices and some are exposed via expansion ports, you
+ * can use this function to mark the internal-only buses as full
+ * after you have created all their internal devices. Then user
+ * created devices will appear on the expansion-port bus where
+ * guest software expects them.
+ */
+static inline void qbus_mark_full(BusState *bus)
{
- return bus->hotplug_handler;
+ bus->full = true;
}
void device_listener_register(DeviceListener *listener);
void device_listener_unregister(DeviceListener *listener);
+/**
+ * qdev_should_hide_device() - check if device should be hidden
+ *
+ * @opts: options QDict
+ * @from_json: true if @opts entries are typed, false for all strings
+ * @errp: pointer to error object
+ *
+ * When a device is added via qdev_device_add() this will be called.
+ *
+ * Return: if the device should be added now or not.
+ */
+bool qdev_should_hide_device(const QDict *opts, bool from_json, Error **errp);
+
+typedef enum MachineInitPhase {
+ /* current_machine is NULL. */
+ PHASE_NO_MACHINE,
+
+ /* current_machine is not NULL, but current_machine->accel is NULL. */
+ PHASE_MACHINE_CREATED,
+
+ /*
+ * current_machine->accel is not NULL, but the machine properties have
+ * not been validated and machine_class->init has not yet been called.
+ */
+ PHASE_ACCEL_CREATED,
+
+ /*
+ * Late backend objects have been created and initialized.
+ */
+ PHASE_LATE_BACKENDS_CREATED,
+
+ /*
+ * machine_class->init has been called, thus creating any embedded
+ * devices and validating machine properties. Devices created at
+ * this time are considered to be cold-plugged.
+ */
+ PHASE_MACHINE_INITIALIZED,
+
+ /*
+ * QEMU is ready to start CPUs and devices created at this time
+ * are considered to be hot-plugged. The monitor is not restricted
+ * to "preconfig" commands.
+ */
+ PHASE_MACHINE_READY,
+} MachineInitPhase;
+
+bool phase_check(MachineInitPhase phase);
+void phase_advance(MachineInitPhase phase);
+
#endif
diff --git a/include/hw/qdev-dma.h b/include/hw/qdev-dma.h
index 8cfb0f348e..b00391aa0c 100644
--- a/include/hw/qdev-dma.h
+++ b/include/hw/qdev-dma.h
@@ -6,5 +6,11 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
+
+#ifndef HW_QDEV_DMA_H
+#define HW_QDEV_DMA_H
+
#define DEFINE_PROP_DMAADDR(_n, _s, _f, _d) \
DEFINE_PROP_UINT64(_n, _s, _f, _d)
+
+#endif
diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h
new file mode 100644
index 0000000000..438f65389f
--- /dev/null
+++ b/include/hw/qdev-properties-system.h
@@ -0,0 +1,97 @@
+#ifndef HW_QDEV_PROPERTIES_SYSTEM_H
+#define HW_QDEV_PROPERTIES_SYSTEM_H
+
+#include "hw/qdev-properties.h"
+
+extern const PropertyInfo qdev_prop_chr;
+extern const PropertyInfo qdev_prop_macaddr;
+extern const PropertyInfo qdev_prop_reserved_region;
+extern const PropertyInfo qdev_prop_multifd_compression;
+extern const PropertyInfo qdev_prop_mig_mode;
+extern const PropertyInfo qdev_prop_granule_mode;
+extern const PropertyInfo qdev_prop_zero_page_detection;
+extern const PropertyInfo qdev_prop_losttickpolicy;
+extern const PropertyInfo qdev_prop_blockdev_on_error;
+extern const PropertyInfo qdev_prop_bios_chs_trans;
+extern const PropertyInfo qdev_prop_fdc_drive_type;
+extern const PropertyInfo qdev_prop_drive;
+extern const PropertyInfo qdev_prop_drive_iothread;
+extern const PropertyInfo qdev_prop_netdev;
+extern const PropertyInfo qdev_prop_pci_devfn;
+extern const PropertyInfo qdev_prop_blocksize;
+extern const PropertyInfo qdev_prop_pci_host_devaddr;
+extern const PropertyInfo qdev_prop_uuid;
+extern const PropertyInfo qdev_prop_audiodev;
+extern const PropertyInfo qdev_prop_off_auto_pcibar;
+extern const PropertyInfo qdev_prop_pcie_link_speed;
+extern const PropertyInfo qdev_prop_pcie_link_width;
+extern const PropertyInfo qdev_prop_cpus390entitlement;
+extern const PropertyInfo qdev_prop_iothread_vq_mapping_list;
+
+#define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t)
+
+#define DEFINE_PROP_CHR(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_chr, CharBackend)
+#define DEFINE_PROP_NETDEV(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_netdev, NICPeers)
+#define DEFINE_PROP_DRIVE(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_drive, BlockBackend *)
+#define DEFINE_PROP_DRIVE_IOTHREAD(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_drive_iothread, BlockBackend *)
+#define DEFINE_PROP_MACADDR(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_macaddr, MACAddr)
+#define DEFINE_PROP_RESERVED_REGION(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_reserved_region, ReservedRegion)
+#define DEFINE_PROP_MULTIFD_COMPRESSION(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_multifd_compression, \
+ MultiFDCompression)
+#define DEFINE_PROP_MIG_MODE(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_mig_mode, \
+ MigMode)
+#define DEFINE_PROP_GRANULE_MODE(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_granule_mode, GranuleMode)
+#define DEFINE_PROP_ZERO_PAGE_DETECTION(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_zero_page_detection, \
+ ZeroPageDetection)
+#define DEFINE_PROP_LOSTTICKPOLICY(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_losttickpolicy, \
+ LostTickPolicy)
+#define DEFINE_PROP_BLOCKDEV_ON_ERROR(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_blockdev_on_error, \
+ BlockdevOnError)
+#define DEFINE_PROP_BIOS_CHS_TRANS(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_bios_chs_trans, int)
+#define DEFINE_PROP_BLOCKSIZE(_n, _s, _f) \
+ DEFINE_PROP_UNSIGNED(_n, _s, _f, 0, qdev_prop_blocksize, uint32_t)
+#define DEFINE_PROP_PCI_HOST_DEVADDR(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_pci_host_devaddr, PCIHostDeviceAddress)
+#define DEFINE_PROP_OFF_AUTO_PCIBAR(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_off_auto_pcibar, \
+ OffAutoPCIBAR)
+#define DEFINE_PROP_PCIE_LINK_SPEED(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pcie_link_speed, \
+ PCIExpLinkSpeed)
+#define DEFINE_PROP_PCIE_LINK_WIDTH(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pcie_link_width, \
+ PCIExpLinkWidth)
+
+#define DEFINE_PROP_UUID(_name, _state, _field) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_uuid, QemuUUID, \
+ .set_default = true)
+
+#define DEFINE_PROP_AUDIODEV(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_audiodev, QEMUSoundCard)
+
+#define DEFINE_PROP_UUID_NODEFAULT(_name, _state, _field) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_uuid, QemuUUID)
+
+#define DEFINE_PROP_CPUS390ENTITLEMENT(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \
+ CpuS390Entitlement)
+
+#define DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST(_name, _state, _field) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \
+ IOThreadVirtQueueMappingList *)
+
+#endif
diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h
index b6758c852e..09aa04ca1e 100644
--- a/include/hw/qdev-properties.h
+++ b/include/hw/qdev-properties.h
@@ -1,113 +1,119 @@
#ifndef QEMU_QDEV_PROPERTIES_H
#define QEMU_QDEV_PROPERTIES_H
-#include "qapi/qapi-types-block.h"
-#include "qapi/qapi-types-misc.h"
#include "hw/qdev-core.h"
+/**
+ * Property:
+ * @set_default: true if the default value should be set from @defval,
+ * in which case @info->set_default_value must not be NULL
+ * (if false then no default value is set by the property system
+ * and the field retains whatever value it was given by instance_init).
+ * @defval: default value for the property. This is used only if @set_default
+ * is true.
+ */
+struct Property {
+ const char *name;
+ const PropertyInfo *info;
+ ptrdiff_t offset;
+ uint8_t bitnr;
+ uint64_t bitmask;
+ bool set_default;
+ union {
+ int64_t i;
+ uint64_t u;
+ } defval;
+ int arrayoffset;
+ const PropertyInfo *arrayinfo;
+ int arrayfieldsize;
+ const char *link_type;
+};
+
+struct PropertyInfo {
+ const char *name;
+ const char *description;
+ const QEnumLookup *enum_table;
+ bool realized_set_allowed; /* allow setting property on realized device */
+ int (*print)(Object *obj, Property *prop, char *dest, size_t len);
+ void (*set_default_value)(ObjectProperty *op, const Property *prop);
+ ObjectProperty *(*create)(ObjectClass *oc, const char *name,
+ Property *prop);
+ ObjectPropertyAccessor *get;
+ ObjectPropertyAccessor *set;
+ ObjectPropertyRelease *release;
+};
+
+
/*** qdev-properties.c ***/
extern const PropertyInfo qdev_prop_bit;
extern const PropertyInfo qdev_prop_bit64;
extern const PropertyInfo qdev_prop_bool;
+extern const PropertyInfo qdev_prop_enum;
extern const PropertyInfo qdev_prop_uint8;
extern const PropertyInfo qdev_prop_uint16;
extern const PropertyInfo qdev_prop_uint32;
extern const PropertyInfo qdev_prop_int32;
extern const PropertyInfo qdev_prop_uint64;
+extern const PropertyInfo qdev_prop_uint64_checkmask;
extern const PropertyInfo qdev_prop_int64;
extern const PropertyInfo qdev_prop_size;
extern const PropertyInfo qdev_prop_string;
-extern const PropertyInfo qdev_prop_chr;
-extern const PropertyInfo qdev_prop_tpm;
-extern const PropertyInfo qdev_prop_ptr;
-extern const PropertyInfo qdev_prop_macaddr;
extern const PropertyInfo qdev_prop_on_off_auto;
-extern const PropertyInfo qdev_prop_losttickpolicy;
-extern const PropertyInfo qdev_prop_blockdev_on_error;
-extern const PropertyInfo qdev_prop_bios_chs_trans;
-extern const PropertyInfo qdev_prop_fdc_drive_type;
-extern const PropertyInfo qdev_prop_drive;
-extern const PropertyInfo qdev_prop_netdev;
-extern const PropertyInfo qdev_prop_pci_devfn;
-extern const PropertyInfo qdev_prop_blocksize;
-extern const PropertyInfo qdev_prop_pci_host_devaddr;
-extern const PropertyInfo qdev_prop_uuid;
-extern const PropertyInfo qdev_prop_arraylen;
+extern const PropertyInfo qdev_prop_size32;
+extern const PropertyInfo qdev_prop_array;
extern const PropertyInfo qdev_prop_link;
-extern const PropertyInfo qdev_prop_off_auto_pcibar;
-extern const PropertyInfo qdev_prop_pcie_link_speed;
-extern const PropertyInfo qdev_prop_pcie_link_width;
-#define DEFINE_PROP(_name, _state, _field, _prop, _type) { \
+#define DEFINE_PROP(_name, _state, _field, _prop, _type, ...) { \
.name = (_name), \
.info = &(_prop), \
.offset = offsetof(_state, _field) \
+ type_check(_type, typeof_field(_state, _field)), \
+ __VA_ARGS__ \
}
-#define DEFINE_PROP_SIGNED(_name, _state, _field, _defval, _prop, _type) { \
- .name = (_name), \
- .info = &(_prop), \
- .offset = offsetof(_state, _field) \
- + type_check(_type,typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.i = (_type)_defval, \
- }
+#define DEFINE_PROP_SIGNED(_name, _state, _field, _defval, _prop, _type) \
+ DEFINE_PROP(_name, _state, _field, _prop, _type, \
+ .set_default = true, \
+ .defval.i = (_type)_defval)
-#define DEFINE_PROP_SIGNED_NODEFAULT(_name, _state, _field, _prop, _type) { \
- .name = (_name), \
- .info = &(_prop), \
- .offset = offsetof(_state, _field) \
- + type_check(_type, typeof_field(_state, _field)), \
- }
+#define DEFINE_PROP_SIGNED_NODEFAULT(_name, _state, _field, _prop, _type) \
+ DEFINE_PROP(_name, _state, _field, _prop, _type)
-#define DEFINE_PROP_BIT(_name, _state, _field, _bit, _defval) { \
- .name = (_name), \
- .info = &(qdev_prop_bit), \
- .bitnr = (_bit), \
- .offset = offsetof(_state, _field) \
- + type_check(uint32_t,typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.u = (bool)_defval, \
- }
+#define DEFINE_PROP_BIT(_name, _state, _field, _bit, _defval) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_bit, uint32_t, \
+ .bitnr = (_bit), \
+ .set_default = true, \
+ .defval.u = (bool)_defval)
-#define DEFINE_PROP_UNSIGNED(_name, _state, _field, _defval, _prop, _type) { \
- .name = (_name), \
- .info = &(_prop), \
- .offset = offsetof(_state, _field) \
- + type_check(_type, typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.u = (_type)_defval, \
- }
+#define DEFINE_PROP_UNSIGNED(_name, _state, _field, _defval, _prop, _type) \
+ DEFINE_PROP(_name, _state, _field, _prop, _type, \
+ .set_default = true, \
+ .defval.u = (_type)_defval)
-#define DEFINE_PROP_UNSIGNED_NODEFAULT(_name, _state, _field, _prop, _type) { \
- .name = (_name), \
- .info = &(_prop), \
- .offset = offsetof(_state, _field) \
- + type_check(_type, typeof_field(_state, _field)), \
- }
+#define DEFINE_PROP_UNSIGNED_NODEFAULT(_name, _state, _field, _prop, _type) \
+ DEFINE_PROP(_name, _state, _field, _prop, _type)
-#define DEFINE_PROP_BIT64(_name, _state, _field, _bit, _defval) { \
- .name = (_name), \
- .info = &(qdev_prop_bit64), \
- .bitnr = (_bit), \
- .offset = offsetof(_state, _field) \
- + type_check(uint64_t, typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.u = (bool)_defval, \
- }
+#define DEFINE_PROP_BIT64(_name, _state, _field, _bit, _defval) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_bit64, uint64_t, \
+ .bitnr = (_bit), \
+ .set_default = true, \
+ .defval.u = (bool)_defval)
-#define DEFINE_PROP_BOOL(_name, _state, _field, _defval) { \
- .name = (_name), \
- .info = &(qdev_prop_bool), \
- .offset = offsetof(_state, _field) \
- + type_check(bool, typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.u = (bool)_defval, \
- }
+#define DEFINE_PROP_BOOL(_name, _state, _field, _defval) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_bool, bool, \
+ .set_default = true, \
+ .defval.u = (bool)_defval)
-#define PROP_ARRAY_LEN_PREFIX "len-"
+/**
+ * The DEFINE_PROP_UINT64_CHECKMASK macro checks a user-supplied value
+ * against corresponding bitmask, rejects the value if it violates.
+ * The default value is set in instance_init().
+ */
+#define DEFINE_PROP_UINT64_CHECKMASK(_name, _state, _field, _bitmask) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_uint64_checkmask, uint64_t, \
+ .bitmask = (_bitmask), \
+ .set_default = false)
/**
* DEFINE_PROP_ARRAY:
@@ -119,40 +125,30 @@ extern const PropertyInfo qdev_prop_pcie_link_width;
* @_arrayprop: PropertyInfo defining what property the array elements have
* @_arraytype: C type of the array elements
*
- * Define device properties for a variable-length array _name. A
- * static property "len-arrayname" is defined. When the device creator
- * sets this property to the desired length of array, further dynamic
- * properties "arrayname[0]", "arrayname[1]", ... are defined so the
- * device creator can set the array element values. Setting the
- * "len-arrayname" property more than once is an error.
+ * Define device properties for a variable-length array _name. The array is
+ * represented as a list in the visitor interface.
+ *
+ * @_arraytype is required to be movable with memcpy().
*
- * When the array length is set, the @_field member of the device
+ * When the array property is set, the @_field member of the device
* struct is set to the array length, and @_arrayfield is set to point
- * to (zero-initialised) memory allocated for the array. For a zero
- * length array, @_field will be set to 0 and @_arrayfield to NULL.
+ * to the memory allocated for the array.
+ *
* It is the responsibility of the device deinit code to free the
* @_arrayfield memory.
*/
#define DEFINE_PROP_ARRAY(_name, _state, _field, \
- _arrayfield, _arrayprop, _arraytype) { \
- .name = (PROP_ARRAY_LEN_PREFIX _name), \
- .info = &(qdev_prop_arraylen), \
- .set_default = true, \
- .defval.u = 0, \
- .offset = offsetof(_state, _field) \
- + type_check(uint32_t, typeof_field(_state, _field)), \
- .arrayinfo = &(_arrayprop), \
- .arrayfieldsize = sizeof(_arraytype), \
- .arrayoffset = offsetof(_state, _arrayfield), \
- }
+ _arrayfield, _arrayprop, _arraytype) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_array, uint32_t, \
+ .set_default = true, \
+ .defval.u = 0, \
+ .arrayinfo = &(_arrayprop), \
+ .arrayfieldsize = sizeof(_arraytype), \
+ .arrayoffset = offsetof(_state, _arrayfield))
-#define DEFINE_PROP_LINK(_name, _state, _field, _type, _ptr_type) { \
- .name = (_name), \
- .info = &(qdev_prop_link), \
- .offset = offsetof(_state, _field) \
- + type_check(_ptr_type, typeof_field(_state, _field)), \
- .link_type = _type, \
- }
+#define DEFINE_PROP_LINK(_name, _state, _field, _type, _ptr_type) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_link, _ptr_type, \
+ .link_type = _type)
#define DEFINE_PROP_UINT8(_n, _s, _f, _d) \
DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_uint8, uint8_t)
@@ -168,77 +164,28 @@ extern const PropertyInfo qdev_prop_pcie_link_width;
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_int64, int64_t)
#define DEFINE_PROP_SIZE(_n, _s, _f, _d) \
DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size, uint64_t)
-#define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t)
-
-/*
- * Please avoid pointer properties. If you must use them, you must
- * cover them in their device's class init function as follows:
- *
- * - If the property must be set, the device cannot be used with
- * device_add, so add code like this:
- * |* Reason: pointer property "NAME-OF-YOUR-PROP" *|
- * DeviceClass *dc = DEVICE_CLASS(class);
- * dc->user_creatable = false;
- *
- * - If the property may safely remain null, document it like this:
- * |*
- * * Note: pointer property "interrupt_vector" may remain null, thus
- * * no need for dc->user_creatable = false;
- * *|
- */
-#define DEFINE_PROP_PTR(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_ptr, void*)
-
-#define DEFINE_PROP_CHR(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_chr, CharBackend)
#define DEFINE_PROP_STRING(_n, _s, _f) \
DEFINE_PROP(_n, _s, _f, qdev_prop_string, char*)
-#define DEFINE_PROP_NETDEV(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_netdev, NICPeers)
-#define DEFINE_PROP_DRIVE(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_drive, BlockBackend *)
-#define DEFINE_PROP_MACADDR(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_macaddr, MACAddr)
#define DEFINE_PROP_ON_OFF_AUTO(_n, _s, _f, _d) \
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_on_off_auto, OnOffAuto)
-#define DEFINE_PROP_LOSTTICKPOLICY(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_losttickpolicy, \
- LostTickPolicy)
-#define DEFINE_PROP_BLOCKDEV_ON_ERROR(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_blockdev_on_error, \
- BlockdevOnError)
-#define DEFINE_PROP_BIOS_CHS_TRANS(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_bios_chs_trans, int)
-#define DEFINE_PROP_BLOCKSIZE(_n, _s, _f) \
- DEFINE_PROP_UNSIGNED(_n, _s, _f, 0, qdev_prop_blocksize, uint16_t)
-#define DEFINE_PROP_PCI_HOST_DEVADDR(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_pci_host_devaddr, PCIHostDeviceAddress)
-#define DEFINE_PROP_MEMORY_REGION(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_ptr, MemoryRegion *)
-#define DEFINE_PROP_OFF_AUTO_PCIBAR(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_off_auto_pcibar, \
- OffAutoPCIBAR)
-#define DEFINE_PROP_PCIE_LINK_SPEED(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pcie_link_speed, \
- PCIExpLinkSpeed)
-#define DEFINE_PROP_PCIE_LINK_WIDTH(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pcie_link_width, \
- PCIExpLinkWidth)
-
-#define DEFINE_PROP_UUID(_name, _state, _field) { \
- .name = (_name), \
- .info = &qdev_prop_uuid, \
- .offset = offsetof(_state, _field) \
- + type_check(QemuUUID, typeof_field(_state, _field)), \
- .set_default = true, \
- }
+#define DEFINE_PROP_SIZE32(_n, _s, _f, _d) \
+ DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size32, uint32_t)
#define DEFINE_PROP_END_OF_LIST() \
{}
-/* Set properties between creation and init. */
-void *qdev_get_prop_ptr(DeviceState *dev, Property *prop);
+/*
+ * Set properties between creation and realization.
+ *
+ * Returns: %true on success, %false on error.
+ */
+bool qdev_prop_set_drive_err(DeviceState *dev, const char *name,
+ BlockBackend *value, Error **errp);
+
+/*
+ * Set properties between creation and realization.
+ * @value must be valid. Each property may be set at most once.
+ */
void qdev_prop_set_bit(DeviceState *dev, const char *name, bool value);
void qdev_prop_set_uint8(DeviceState *dev, const char *name, uint8_t value);
void qdev_prop_set_uint16(DeviceState *dev, const char *name, uint16_t value);
@@ -249,31 +196,48 @@ void qdev_prop_set_string(DeviceState *dev, const char *name, const char *value)
void qdev_prop_set_chr(DeviceState *dev, const char *name, Chardev *value);
void qdev_prop_set_netdev(DeviceState *dev, const char *name, NetClientState *value);
void qdev_prop_set_drive(DeviceState *dev, const char *name,
- BlockBackend *value, Error **errp);
+ BlockBackend *value);
void qdev_prop_set_macaddr(DeviceState *dev, const char *name,
const uint8_t *value);
void qdev_prop_set_enum(DeviceState *dev, const char *name, int value);
-/* FIXME: Remove opaque pointer properties. */
-void qdev_prop_set_ptr(DeviceState *dev, const char *name, void *value);
+
+/* Takes ownership of @values */
+void qdev_prop_set_array(DeviceState *dev, const char *name, QList *values);
+
+void *object_field_prop_ptr(Object *obj, Property *prop);
void qdev_prop_register_global(GlobalProperty *prop);
+const GlobalProperty *qdev_find_global_prop(Object *obj,
+ const char *name);
int qdev_prop_check_globals(void);
void qdev_prop_set_globals(DeviceState *dev);
-void error_set_from_qdev_prop_error(Error **errp, int ret, DeviceState *dev,
- Property *prop, const char *value);
+void error_set_from_qdev_prop_error(Error **errp, int ret, Object *obj,
+ const char *name, const char *value);
/**
* qdev_property_add_static:
* @dev: Device to add the property to.
* @prop: The qdev property definition.
- * @errp: location to store error information.
*
* Add a static QOM property to @dev for qdev property @prop.
* On error, store error in @errp. Static properties access data in a struct.
* The type of the QOM property is derived from prop->info.
*/
-void qdev_property_add_static(DeviceState *dev, Property *prop, Error **errp);
+void qdev_property_add_static(DeviceState *dev, Property *prop);
+/**
+ * qdev_alias_all_properties: Create aliases on source for all target properties
+ * @target: Device which has properties to be aliased
+ * @source: Object to add alias properties to
+ *
+ * Add alias properties to the @source object for all properties on the @target
+ * DeviceState.
+ *
+ * This is useful when @target is an internal implementation object
+ * owned by @source, and you want to expose all the properties of that
+ * implementation object as properties on the @source object so that users
+ * of @source can set them.
+ */
void qdev_alias_all_properties(DeviceState *target, Object *source);
/**
diff --git a/include/hw/qdev.h b/include/hw/qdev.h
deleted file mode 100644
index 5cb8b080a6..0000000000
--- a/include/hw/qdev.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef QDEV_H
-#define QDEV_H
-
-#include "hw/hw.h"
-#include "hw/qdev-core.h"
-#include "hw/qdev-properties.h"
-
-#endif
diff --git a/include/hw/register.h b/include/hw/register.h
index 5796584588..6a076cfcdf 100644
--- a/include/hw/register.h
+++ b/include/hw/register.h
@@ -14,6 +14,7 @@
#include "hw/qdev-core.h"
#include "exec/memory.h"
#include "hw/registerfields.h"
+#include "qom/object.h"
typedef struct RegisterInfo RegisterInfo;
typedef struct RegisterAccessInfo RegisterAccessInfo;
@@ -86,8 +87,9 @@ struct RegisterInfo {
void *opaque;
};
-#define TYPE_REGISTER "qemu,register"
-#define REGISTER(obj) OBJECT_CHECK(RegisterInfo, (obj), TYPE_REGISTER)
+#define TYPE_REGISTER "qemu-register"
+DECLARE_INSTANCE_CHECKER(RegisterInfo, REGISTER,
+ TYPE_REGISTER)
/**
* This structure is used to group all of the individual registers which are
@@ -181,10 +183,19 @@ uint64_t register_read_memory(void *opaque, hwaddr addr, unsigned size);
* @data: Array to use for register data, must already be allocated
* @ops: Memory region ops to access registers.
* @debug enabled: turn on/off verbose debug information
+ * @memory_size: Size of the memory region
* returns: A structure containing all of the registers and an initialized
* memory region (r_array->mem) the caller should add to a container.
*/
+RegisterInfoArray *register_init_block8(DeviceState *owner,
+ const RegisterAccessInfo *rae,
+ int num, RegisterInfo *ri,
+ uint8_t *data,
+ const MemoryRegionOps *ops,
+ bool debug_enabled,
+ uint64_t memory_size);
+
RegisterInfoArray *register_init_block32(DeviceState *owner,
const RegisterAccessInfo *rae,
int num, RegisterInfo *ri,
@@ -193,6 +204,14 @@ RegisterInfoArray *register_init_block32(DeviceState *owner,
bool debug_enabled,
uint64_t memory_size);
+RegisterInfoArray *register_init_block64(DeviceState *owner,
+ const RegisterAccessInfo *rae,
+ int num, RegisterInfo *ri,
+ uint64_t *data,
+ const MemoryRegionOps *ops,
+ bool debug_enabled,
+ uint64_t memory_size);
+
/**
* This function should be called to cleanup the registers that were initialized
* when calling register_init_block32(). This function should only be called
diff --git a/include/hw/registerfields.h b/include/hw/registerfields.h
index 2659a58737..1330ca77de 100644
--- a/include/hw/registerfields.h
+++ b/include/hw/registerfields.h
@@ -22,6 +22,18 @@
enum { A_ ## reg = (addr) }; \
enum { R_ ## reg = (addr) / 4 };
+#define REG8(reg, addr) \
+ enum { A_ ## reg = (addr) }; \
+ enum { R_ ## reg = (addr) };
+
+#define REG16(reg, addr) \
+ enum { A_ ## reg = (addr) }; \
+ enum { R_ ## reg = (addr) / 2 };
+
+#define REG64(reg, addr) \
+ enum { A_ ## reg = (addr) }; \
+ enum { R_ ## reg = (addr) / 8 };
+
/* Define SHIFT, LENGTH and MASK constants for a field within a register */
/* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH
@@ -34,6 +46,12 @@
MAKE_64BIT_MASK(shift, length)};
/* Extract a field from a register */
+#define FIELD_EX8(storage, reg, field) \
+ extract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_EX16(storage, reg, field) \
+ extract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
#define FIELD_EX32(storage, reg, field) \
extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH)
@@ -41,33 +59,169 @@
extract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_SEX8(storage, reg, field) \
+ sextract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_SEX16(storage, reg, field) \
+ sextract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_SEX32(storage, reg, field) \
+ sextract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_SEX64(storage, reg, field) \
+ sextract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+
/* Extract a field from an array of registers */
#define ARRAY_FIELD_EX32(regs, reg, field) \
FIELD_EX32((regs)[R_ ## reg], reg, field)
+#define ARRAY_FIELD_EX64(regs, reg, field) \
+ FIELD_EX64((regs)[R_ ## reg], reg, field)
/* Deposit a register field.
* Assigning values larger then the target field will result in
* compilation warnings.
*/
+#define FIELD_DP8(storage, reg, field, val) ({ \
+ struct { \
+ unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint8_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+#define FIELD_DP16(storage, reg, field, val) ({ \
+ struct { \
+ unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint16_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
#define FIELD_DP32(storage, reg, field, val) ({ \
struct { \
unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
- } v = { .v = val }; \
- uint32_t d; \
- d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
- R_ ## reg ## _ ## field ## _LENGTH, v.v); \
- d; })
+ } _v = { .v = val }; \
+ uint32_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
#define FIELD_DP64(storage, reg, field, val) ({ \
struct { \
- unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
- } v = { .v = val }; \
- uint64_t d; \
- d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
- R_ ## reg ## _ ## field ## _LENGTH, v.v); \
- d; })
+ uint64_t v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint64_t _d; \
+ _d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+
+#define FIELD_SDP8(storage, reg, field, val) ({ \
+ struct { \
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint8_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+#define FIELD_SDP16(storage, reg, field, val) ({ \
+ struct { \
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint16_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+#define FIELD_SDP32(storage, reg, field, val) ({ \
+ struct { \
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint32_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+#define FIELD_SDP64(storage, reg, field, val) ({ \
+ struct { \
+ int64_t v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint64_t _d; \
+ _d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
/* Deposit a field to array of registers. */
#define ARRAY_FIELD_DP32(regs, reg, field, val) \
(regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val);
+#define ARRAY_FIELD_DP64(regs, reg, field, val) \
+ (regs)[R_ ## reg] = FIELD_DP64((regs)[R_ ## reg], reg, field, val);
+
+
+/*
+ * These macros can be used for defining and extracting fields that have the
+ * same bit position across multiple registers.
+ */
+
+/* Define shared SHIFT, LENGTH, and MASK constants */
+#define SHARED_FIELD(name, shift, length) \
+ enum { name ## _ ## SHIFT = (shift)}; \
+ enum { name ## _ ## LENGTH = (length)}; \
+ enum { name ## _ ## MASK = MAKE_64BIT_MASK(shift, length)};
+
+/* Extract a shared field */
+#define SHARED_FIELD_EX8(storage, field) \
+ extract8((storage), field ## _SHIFT, field ## _LENGTH)
+
+#define SHARED_FIELD_EX16(storage, field) \
+ extract16((storage), field ## _SHIFT, field ## _LENGTH)
+
+#define SHARED_FIELD_EX32(storage, field) \
+ extract32((storage), field ## _SHIFT, field ## _LENGTH)
+
+#define SHARED_FIELD_EX64(storage, field) \
+ extract64((storage), field ## _SHIFT, field ## _LENGTH)
+
+/* Extract a shared field from a register array */
+#define SHARED_ARRAY_FIELD_EX32(regs, offset, field) \
+ SHARED_FIELD_EX32((regs)[(offset)], field)
+#define SHARED_ARRAY_FIELD_EX64(regs, offset, field) \
+ SHARED_FIELD_EX64((regs)[(offset)], field)
+
+/* Deposit a shared field */
+#define SHARED_FIELD_DP8(storage, field, val) ({ \
+ struct { \
+ unsigned int v:field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint8_t _d; \
+ _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \
+ _d; })
+
+#define SHARED_FIELD_DP16(storage, field, val) ({ \
+ struct { \
+ unsigned int v:field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint16_t _d; \
+ _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \
+ _d; })
+
+#define SHARED_FIELD_DP32(storage, field, val) ({ \
+ struct { \
+ unsigned int v:field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint32_t _d; \
+ _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \
+ _d; })
+
+#define SHARED_FIELD_DP64(storage, field, val) ({ \
+ struct { \
+ uint64_t v:field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint64_t _d; \
+ _d = deposit64((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \
+ _d; })
+
+/* Deposit a shared field to a register array */
+#define SHARED_ARRAY_FIELD_DP32(regs, offset, field, val) \
+ (regs)[(offset)] = SHARED_FIELD_DP32((regs)[(offset)], field, val);
+#define SHARED_ARRAY_FIELD_DP64(regs, offset, field, val) \
+ (regs)[(offset)] = SHARED_FIELD_DP64((regs)[(offset)], field, val);
#endif
diff --git a/include/hw/remote/iohub.h b/include/hw/remote/iohub.h
new file mode 100644
index 0000000000..6a8444f9a9
--- /dev/null
+++ b/include/hw/remote/iohub.h
@@ -0,0 +1,42 @@
+/*
+ * IO Hub for remote device
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef REMOTE_IOHUB_H
+#define REMOTE_IOHUB_H
+
+#include "hw/pci/pci_device.h"
+#include "qemu/event_notifier.h"
+#include "qemu/thread-posix.h"
+#include "hw/remote/mpqemu-link.h"
+
+#define REMOTE_IOHUB_NB_PIRQS PCI_DEVFN_MAX
+
+typedef struct ResampleToken {
+ void *iohub;
+ int pirq;
+} ResampleToken;
+
+typedef struct RemoteIOHubState {
+ PCIDevice d;
+ EventNotifier irqfds[REMOTE_IOHUB_NB_PIRQS];
+ EventNotifier resamplefds[REMOTE_IOHUB_NB_PIRQS];
+ unsigned int irq_level[REMOTE_IOHUB_NB_PIRQS];
+ ResampleToken token[REMOTE_IOHUB_NB_PIRQS];
+ QemuMutex irq_level_lock[REMOTE_IOHUB_NB_PIRQS];
+} RemoteIOHubState;
+
+int remote_iohub_map_irq(PCIDevice *pci_dev, int intx);
+void remote_iohub_set_irq(void *opaque, int pirq, int level);
+void process_set_irqfd_msg(PCIDevice *pci_dev, MPQemuMsg *msg);
+
+void remote_iohub_init(RemoteIOHubState *iohub);
+void remote_iohub_finalize(RemoteIOHubState *iohub);
+
+#endif
diff --git a/include/hw/remote/iommu.h b/include/hw/remote/iommu.h
new file mode 100644
index 0000000000..33b68a8f4b
--- /dev/null
+++ b/include/hw/remote/iommu.h
@@ -0,0 +1,40 @@
+/**
+ * Copyright © 2022 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef REMOTE_IOMMU_H
+#define REMOTE_IOMMU_H
+
+#include "hw/pci/pci_bus.h"
+#include "hw/pci/pci.h"
+
+#ifndef INT2VOIDP
+#define INT2VOIDP(i) (void *)(uintptr_t)(i)
+#endif
+
+typedef struct RemoteIommuElem {
+ MemoryRegion *mr;
+
+ AddressSpace as;
+} RemoteIommuElem;
+
+#define TYPE_REMOTE_IOMMU "x-remote-iommu"
+OBJECT_DECLARE_SIMPLE_TYPE(RemoteIommu, REMOTE_IOMMU)
+
+struct RemoteIommu {
+ Object parent;
+
+ GHashTable *elem_by_devfn;
+
+ QemuMutex lock;
+};
+
+void remote_iommu_setup(PCIBus *pci_bus);
+
+void remote_iommu_unplug_dev(PCIDevice *pci_dev);
+
+#endif
diff --git a/include/hw/remote/machine.h b/include/hw/remote/machine.h
new file mode 100644
index 0000000000..ac32fda387
--- /dev/null
+++ b/include/hw/remote/machine.h
@@ -0,0 +1,42 @@
+/*
+ * Remote machine configuration
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef REMOTE_MACHINE_H
+#define REMOTE_MACHINE_H
+
+#include "qom/object.h"
+#include "hw/boards.h"
+#include "hw/pci-host/remote.h"
+#include "io/channel.h"
+#include "hw/remote/iohub.h"
+
+struct RemoteMachineState {
+ MachineState parent_obj;
+
+ RemotePCIHost *host;
+ RemoteIOHubState iohub;
+
+ bool vfio_user;
+
+ bool auto_shutdown;
+};
+
+/* Used to pass to co-routine device and ioc. */
+typedef struct RemoteCommDev {
+ PCIDevice *dev;
+ QIOChannel *ioc;
+} RemoteCommDev;
+
+#define TYPE_REMOTE_MACHINE "x-remote-machine"
+OBJECT_DECLARE_SIMPLE_TYPE(RemoteMachineState, REMOTE_MACHINE)
+
+void coroutine_fn mpqemu_remote_msg_loop_co(void *data);
+
+#endif
diff --git a/include/hw/remote/memory.h b/include/hw/remote/memory.h
new file mode 100644
index 0000000000..bc2e30945f
--- /dev/null
+++ b/include/hw/remote/memory.h
@@ -0,0 +1,19 @@
+/*
+ * Memory manager for remote device
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef REMOTE_MEMORY_H
+#define REMOTE_MEMORY_H
+
+#include "exec/hwaddr.h"
+#include "hw/remote/mpqemu-link.h"
+
+void remote_sysmem_reconfig(MPQemuMsg *msg, Error **errp);
+
+#endif
diff --git a/include/hw/remote/mpqemu-link.h b/include/hw/remote/mpqemu-link.h
new file mode 100644
index 0000000000..4ec0915885
--- /dev/null
+++ b/include/hw/remote/mpqemu-link.h
@@ -0,0 +1,99 @@
+/*
+ * Communication channel between QEMU and remote device process
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MPQEMU_LINK_H
+#define MPQEMU_LINK_H
+
+#include "qom/object.h"
+#include "qemu/thread.h"
+#include "io/channel.h"
+#include "exec/hwaddr.h"
+#include "io/channel-socket.h"
+#include "hw/remote/proxy.h"
+
+#define REMOTE_MAX_FDS 8
+
+#define MPQEMU_MSG_HDR_SIZE offsetof(MPQemuMsg, data.u64)
+
+/**
+ * MPQemuCmd:
+ *
+ * MPQemuCmd enum type to specify the command to be executed on the remote
+ * device.
+ *
+ * This uses a private protocol between QEMU and the remote process. vfio-user
+ * protocol would supersede this in the future.
+ *
+ */
+typedef enum {
+ MPQEMU_CMD_SYNC_SYSMEM,
+ MPQEMU_CMD_RET,
+ MPQEMU_CMD_PCI_CFGWRITE,
+ MPQEMU_CMD_PCI_CFGREAD,
+ MPQEMU_CMD_BAR_WRITE,
+ MPQEMU_CMD_BAR_READ,
+ MPQEMU_CMD_SET_IRQFD,
+ MPQEMU_CMD_DEVICE_RESET,
+ MPQEMU_CMD_MAX,
+} MPQemuCmd;
+
+typedef struct {
+ hwaddr gpas[REMOTE_MAX_FDS];
+ uint64_t sizes[REMOTE_MAX_FDS];
+ off_t offsets[REMOTE_MAX_FDS];
+} SyncSysmemMsg;
+
+typedef struct {
+ uint32_t addr;
+ uint32_t val;
+ int len;
+} PciConfDataMsg;
+
+typedef struct {
+ hwaddr addr;
+ uint64_t val;
+ unsigned size;
+ bool memory;
+} BarAccessMsg;
+
+/**
+ * MPQemuMsg:
+ * @cmd: The remote command
+ * @size: Size of the data to be shared
+ * @data: Structured data
+ * @fds: File descriptors to be shared with remote device
+ *
+ * MPQemuMsg Format of the message sent to the remote device from QEMU.
+ *
+ */
+
+typedef struct {
+ int cmd;
+ size_t size;
+
+ union {
+ uint64_t u64;
+ PciConfDataMsg pci_conf_data;
+ SyncSysmemMsg sync_sysmem;
+ BarAccessMsg bar_access;
+ } data;
+
+ int fds[REMOTE_MAX_FDS];
+ int num_fds;
+} MPQemuMsg;
+
+bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
+bool mpqemu_msg_recv(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
+
+uint64_t mpqemu_msg_send_and_await_reply(MPQemuMsg *msg, PCIProxyDev *pdev,
+ Error **errp);
+bool mpqemu_msg_valid(MPQemuMsg *msg);
+
+#endif
diff --git a/include/hw/remote/proxy-memory-listener.h b/include/hw/remote/proxy-memory-listener.h
new file mode 100644
index 0000000000..c4f3efb928
--- /dev/null
+++ b/include/hw/remote/proxy-memory-listener.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PROXY_MEMORY_LISTENER_H
+#define PROXY_MEMORY_LISTENER_H
+
+#include "exec/memory.h"
+#include "io/channel.h"
+
+typedef struct ProxyMemoryListener {
+ MemoryListener listener;
+
+ int n_mr_sections;
+ MemoryRegionSection *mr_sections;
+
+ QIOChannel *ioc;
+} ProxyMemoryListener;
+
+void proxy_memory_listener_configure(ProxyMemoryListener *proxy_listener,
+ QIOChannel *ioc);
+void proxy_memory_listener_deconfigure(ProxyMemoryListener *proxy_listener);
+
+#endif
diff --git a/include/hw/remote/proxy.h b/include/hw/remote/proxy.h
new file mode 100644
index 0000000000..0cfd9665be
--- /dev/null
+++ b/include/hw/remote/proxy.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PROXY_H
+#define PROXY_H
+
+#include "hw/pci/pci_device.h"
+#include "io/channel.h"
+#include "hw/remote/proxy-memory-listener.h"
+#include "qemu/event_notifier.h"
+
+#define TYPE_PCI_PROXY_DEV "x-pci-proxy-dev"
+OBJECT_DECLARE_SIMPLE_TYPE(PCIProxyDev, PCI_PROXY_DEV)
+
+typedef struct ProxyMemoryRegion {
+ PCIProxyDev *dev;
+ MemoryRegion mr;
+ bool memory;
+ bool present;
+ uint8_t type;
+} ProxyMemoryRegion;
+
+struct PCIProxyDev {
+ PCIDevice parent_dev;
+ char *fd;
+
+ /*
+ * Mutex used to protect the QIOChannel fd from
+ * the concurrent access by the VCPUs since proxy
+ * blocks while awaiting for the replies from the
+ * process remote.
+ */
+ QemuMutex io_mutex;
+ QIOChannel *ioc;
+ Error *migration_blocker;
+ ProxyMemoryListener proxy_listener;
+ int virq;
+ EventNotifier intr;
+ EventNotifier resample;
+ ProxyMemoryRegion region[PCI_NUM_REGIONS];
+};
+
+#endif /* PROXY_H */
diff --git a/include/hw/remote/vfio-user-obj.h b/include/hw/remote/vfio-user-obj.h
new file mode 100644
index 0000000000..87ab78b875
--- /dev/null
+++ b/include/hw/remote/vfio-user-obj.h
@@ -0,0 +1,6 @@
+#ifndef VFIO_USER_OBJ_H
+#define VFIO_USER_OBJ_H
+
+void vfu_object_set_bus_irq(PCIBus *pci_bus);
+
+#endif
diff --git a/include/hw/resettable.h b/include/hw/resettable.h
new file mode 100644
index 0000000000..7e249deb8b
--- /dev/null
+++ b/include/hw/resettable.h
@@ -0,0 +1,247 @@
+/*
+ * Resettable interface header.
+ *
+ * Copyright (c) 2019 GreenSocs SAS
+ *
+ * Authors:
+ * Damien Hedde
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_RESETTABLE_H
+#define HW_RESETTABLE_H
+
+#include "qom/object.h"
+
+#define TYPE_RESETTABLE_INTERFACE "resettable"
+
+typedef struct ResettableClass ResettableClass;
+DECLARE_CLASS_CHECKERS(ResettableClass, RESETTABLE,
+ TYPE_RESETTABLE_INTERFACE)
+
+
+typedef struct ResettableState ResettableState;
+
+/**
+ * ResetType:
+ * Types of reset.
+ *
+ * + Cold: reset resulting from a power cycle of the object.
+ *
+ * TODO: Support has to be added to handle more types. In particular,
+ * ResettableState structure needs to be expanded.
+ */
+typedef enum ResetType {
+ RESET_TYPE_COLD,
+ RESET_TYPE_SNAPSHOT_LOAD,
+} ResetType;
+
+/*
+ * ResettableClass:
+ * Interface for resettable objects.
+ *
+ * See docs/devel/reset.rst for more detailed information about how QEMU models
+ * reset. This whole API must only be used when holding the iothread mutex.
+ *
+ * All objects which can be reset must implement this interface;
+ * it is usually provided by a base class such as DeviceClass or BusClass.
+ * Every Resettable object must maintain some state tracking the
+ * progress of a reset operation by providing a ResettableState structure.
+ * The functions defined in this module take care of updating the
+ * state of the reset.
+ * The base class implementation of the interface provides this
+ * state and implements the associated method: get_state.
+ *
+ * Concrete object implementations (typically specific devices
+ * such as a UART model) should provide the functions
+ * for the phases.enter, phases.hold and phases.exit methods, which
+ * they can set in their class init function, either directly or
+ * by calling resettable_class_set_parent_phases().
+ * The phase methods are guaranteed to only only ever be called once
+ * for any reset event, in the order 'enter', 'hold', 'exit'.
+ * An object will always move quickly from 'enter' to 'hold'
+ * but might remain in 'hold' for an arbitrary period of time
+ * before eventually reset is deasserted and the 'exit' phase is called.
+ * Object implementations should be prepared for functions handling
+ * inbound connections from other devices (such as qemu_irq handler
+ * functions) to be called at any point during reset after their
+ * 'enter' method has been called.
+ *
+ * Users of a resettable object should not call these methods
+ * directly, but instead use the function resettable_reset().
+ *
+ * @phases.enter: This phase is called when the object enters reset. It
+ * should reset local state of the object, but it must not do anything that
+ * has a side-effect on other objects, such as raising or lowering a qemu_irq
+ * line or reading or writing guest memory. It takes the reset's type as
+ * argument.
+ *
+ * @phases.hold: This phase is called for entry into reset, once every object
+ * in the system which is being reset has had its @phases.enter method called.
+ * At this point devices can do actions that affect other objects.
+ *
+ * @phases.exit: This phase is called when the object leaves the reset state.
+ * Actions affecting other objects are permitted.
+ *
+ * @get_state: Mandatory method which must return a pointer to a
+ * ResettableState.
+ *
+ * @get_transitional_function: transitional method to handle Resettable objects
+ * not yet fully moved to this interface. It will be removed as soon as it is
+ * not needed anymore. This method is optional and may return a pointer to a
+ * function to be used instead of the phases. If the method exists and returns
+ * a non-NULL function pointer then that function is executed as a replacement
+ * of the 'hold' phase method taking the object as argument. The two other phase
+ * methods are not executed.
+ *
+ * @child_foreach: Executes a given callback on every Resettable child. Child
+ * in this context means a child in the qbus tree, so the children of a qbus
+ * are the devices on it, and the children of a device are all the buses it
+ * owns. This is not the same as the QOM object hierarchy. The function takes
+ * additional opaque and ResetType arguments which must be passed unmodified to
+ * the callback.
+ */
+typedef void (*ResettableEnterPhase)(Object *obj, ResetType type);
+typedef void (*ResettableHoldPhase)(Object *obj, ResetType type);
+typedef void (*ResettableExitPhase)(Object *obj, ResetType type);
+typedef ResettableState * (*ResettableGetState)(Object *obj);
+typedef void (*ResettableTrFunction)(Object *obj);
+typedef ResettableTrFunction (*ResettableGetTrFunction)(Object *obj);
+typedef void (*ResettableChildCallback)(Object *, void *opaque,
+ ResetType type);
+typedef void (*ResettableChildForeach)(Object *obj,
+ ResettableChildCallback cb,
+ void *opaque, ResetType type);
+typedef struct ResettablePhases {
+ ResettableEnterPhase enter;
+ ResettableHoldPhase hold;
+ ResettableExitPhase exit;
+} ResettablePhases;
+struct ResettableClass {
+ InterfaceClass parent_class;
+
+ /* Phase methods */
+ ResettablePhases phases;
+
+ /* State access method */
+ ResettableGetState get_state;
+
+ /* Transitional method for legacy reset compatibility */
+ ResettableGetTrFunction get_transitional_function;
+
+ /* Hierarchy handling method */
+ ResettableChildForeach child_foreach;
+};
+
+/**
+ * ResettableState:
+ * Structure holding reset related state. The fields should not be accessed
+ * directly; the definition is here to allow further inclusion into other
+ * objects.
+ *
+ * @count: Number of reset level the object is into. It is incremented when
+ * the reset operation starts and decremented when it finishes.
+ * @hold_phase_pending: flag which indicates that we need to invoke the 'hold'
+ * phase handler for this object.
+ * @exit_phase_in_progress: true if we are currently in the exit phase
+ */
+struct ResettableState {
+ unsigned count;
+ bool hold_phase_pending;
+ bool exit_phase_in_progress;
+};
+
+/**
+ * resettable_state_clear:
+ * Clear the state. It puts the state to the initial (zeroed) state required
+ * to reuse an object. Typically used in realize step of base classes
+ * implementing the interface.
+ */
+static inline void resettable_state_clear(ResettableState *state)
+{
+ memset(state, 0, sizeof(ResettableState));
+}
+
+/**
+ * resettable_reset:
+ * Trigger a reset on an object @obj of type @type. @obj must implement
+ * Resettable interface.
+ *
+ * Calling this function is equivalent to calling @resettable_assert_reset()
+ * then @resettable_release_reset().
+ */
+void resettable_reset(Object *obj, ResetType type);
+
+/**
+ * resettable_assert_reset:
+ * Put an object @obj into reset. @obj must implement Resettable interface.
+ *
+ * @resettable_release_reset() must eventually be called after this call.
+ * There must be one call to @resettable_release_reset() per call of
+ * @resettable_assert_reset(), with the same type argument.
+ *
+ * NOTE: Until support for migration is added, the @resettable_release_reset()
+ * must not be delayed. It must occur just after @resettable_assert_reset() so
+ * that migration cannot be triggered in between. Prefer using
+ * @resettable_reset() for now.
+ */
+void resettable_assert_reset(Object *obj, ResetType type);
+
+/**
+ * resettable_release_reset:
+ * Release the object @obj from reset. @obj must implement Resettable interface.
+ *
+ * See @resettable_assert_reset() description for details.
+ */
+void resettable_release_reset(Object *obj, ResetType type);
+
+/**
+ * resettable_is_in_reset:
+ * Return true if @obj is under reset.
+ *
+ * @obj must implement Resettable interface.
+ */
+bool resettable_is_in_reset(Object *obj);
+
+/**
+ * resettable_change_parent:
+ * Indicate that the parent of Ressettable @obj is changing from @oldp to @newp.
+ * All 3 objects must implement resettable interface. @oldp or @newp may be
+ * NULL.
+ *
+ * This function will adapt the reset state of @obj so that it is coherent
+ * with the reset state of @newp. It may trigger @resettable_assert_reset()
+ * or @resettable_release_reset(). It will do such things only if the reset
+ * state of @newp and @oldp are different.
+ *
+ * When using this function during reset, it must only be called during
+ * a hold phase method. Calling this during enter or exit phase is an error.
+ */
+void resettable_change_parent(Object *obj, Object *newp, Object *oldp);
+
+/**
+ * resettable_cold_reset_fn:
+ * Helper to call resettable_reset((Object *) opaque, RESET_TYPE_COLD).
+ *
+ * This function is typically useful to register a reset handler with
+ * qemu_register_reset.
+ */
+void resettable_cold_reset_fn(void *opaque);
+
+/**
+ * resettable_class_set_parent_phases:
+ *
+ * Save @rc current reset phases into @parent_phases and override @rc phases
+ * by the given new methods (@enter, @hold and @exit).
+ * Each phase is overridden only if the new one is not NULL allowing to
+ * override a subset of phases.
+ */
+void resettable_class_set_parent_phases(ResettableClass *rc,
+ ResettableEnterPhase enter,
+ ResettableHoldPhase hold,
+ ResettableExitPhase exit,
+ ResettablePhases *parent_phases);
+
+#endif
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
new file mode 100644
index 0000000000..a2e4ae9cb0
--- /dev/null
+++ b/include/hw/riscv/boot.h
@@ -0,0 +1,66 @@
+/*
+ * QEMU RISC-V Boot Helper
+ *
+ * Copyright (c) 2017 SiFive, Inc.
+ * Copyright (c) 2019 Alistair Francis <alistair.francis@wdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RISCV_BOOT_H
+#define RISCV_BOOT_H
+
+#include "exec/cpu-defs.h"
+#include "hw/loader.h"
+#include "hw/riscv/riscv_hart.h"
+
+#define RISCV32_BIOS_BIN "opensbi-riscv32-generic-fw_dynamic.bin"
+#define RISCV64_BIOS_BIN "opensbi-riscv64-generic-fw_dynamic.bin"
+
+bool riscv_is_32bit(RISCVHartArrayState *harts);
+
+char *riscv_plic_hart_config_string(int hart_count);
+
+target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts,
+ target_ulong firmware_end_addr);
+target_ulong riscv_find_and_load_firmware(MachineState *machine,
+ const char *default_machine_firmware,
+ hwaddr firmware_load_addr,
+ symbol_fn_t sym_cb);
+const char *riscv_default_firmware_name(RISCVHartArrayState *harts);
+char *riscv_find_firmware(const char *firmware_filename,
+ const char *default_machine_firmware);
+target_ulong riscv_load_firmware(const char *firmware_filename,
+ hwaddr firmware_load_addr,
+ symbol_fn_t sym_cb);
+target_ulong riscv_load_kernel(MachineState *machine,
+ RISCVHartArrayState *harts,
+ target_ulong firmware_end_addr,
+ bool load_initrd,
+ symbol_fn_t sym_cb);
+uint64_t riscv_compute_fdt_addr(hwaddr dram_start, uint64_t dram_size,
+ MachineState *ms);
+void riscv_load_fdt(hwaddr fdt_addr, void *fdt);
+void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts,
+ hwaddr saddr,
+ hwaddr rom_base, hwaddr rom_size,
+ uint64_t kernel_entry,
+ uint64_t fdt_load_addr);
+void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base,
+ hwaddr rom_size,
+ uint32_t reset_vec_size,
+ uint64_t kernel_entry);
+void riscv_setup_direct_kernel(hwaddr kernel_addr, hwaddr fdt_addr);
+void riscv_setup_firmware_boot(MachineState *machine);
+
+#endif /* RISCV_BOOT_H */
diff --git a/include/hw/riscv/boot_opensbi.h b/include/hw/riscv/boot_opensbi.h
new file mode 100644
index 0000000000..1b749663dc
--- /dev/null
+++ b/include/hw/riscv/boot_opensbi.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Based on include/sbi/{fw_dynamic.h,sbi_scratch.h} from the OpenSBI project.
+ */
+
+#ifndef RISCV_BOOT_OPENSBI_H
+#define RISCV_BOOT_OPENSBI_H
+
+#include "exec/cpu-defs.h"
+
+/** Expected value of info magic ('OSBI' ascii string in hex) */
+#define FW_DYNAMIC_INFO_MAGIC_VALUE 0x4942534f
+
+/** Maximum supported info version */
+#define FW_DYNAMIC_INFO_VERSION 0x2
+
+/** Possible next mode values */
+#define FW_DYNAMIC_INFO_NEXT_MODE_U 0x0
+#define FW_DYNAMIC_INFO_NEXT_MODE_S 0x1
+#define FW_DYNAMIC_INFO_NEXT_MODE_M 0x3
+
+enum sbi_scratch_options {
+ /** Disable prints during boot */
+ SBI_SCRATCH_NO_BOOT_PRINTS = (1 << 0),
+ /** Enable runtime debug prints */
+ SBI_SCRATCH_DEBUG_PRINTS = (1 << 1),
+};
+
+/** Representation dynamic info passed by previous booting stage */
+struct fw_dynamic_info {
+ /** Info magic */
+ target_long magic;
+ /** Info version */
+ target_long version;
+ /** Next booting stage address */
+ target_long next_addr;
+ /** Next booting stage mode */
+ target_long next_mode;
+ /** Options for OpenSBI library */
+ target_long options;
+ /**
+ * Preferred boot HART id
+ *
+ * It is possible that the previous booting stage uses same link
+ * address as the FW_DYNAMIC firmware. In this case, the relocation
+ * lottery mechanism can potentially overwrite the previous booting
+ * stage while other HARTs are still running in the previous booting
+ * stage leading to boot-time crash. To avoid this boot-time crash,
+ * the previous booting stage can specify last HART that will jump
+ * to the FW_DYNAMIC firmware as the preferred boot HART.
+ *
+ * To avoid specifying a preferred boot HART, the previous booting
+ * stage can set it to -1UL which will force the FW_DYNAMIC firmware
+ * to use the relocation lottery mechanism.
+ */
+ target_long boot_hart;
+};
+
+#endif
diff --git a/include/hw/riscv/microchip_pfsoc.h b/include/hw/riscv/microchip_pfsoc.h
new file mode 100644
index 0000000000..daef086da6
--- /dev/null
+++ b/include/hw/riscv/microchip_pfsoc.h
@@ -0,0 +1,168 @@
+/*
+ * Microchip PolarFire SoC machine interface
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MICROCHIP_PFSOC_H
+#define HW_MICROCHIP_PFSOC_H
+
+#include "hw/boards.h"
+#include "hw/char/mchp_pfsoc_mmuart.h"
+#include "hw/cpu/cluster.h"
+#include "hw/dma/sifive_pdma.h"
+#include "hw/misc/mchp_pfsoc_dmc.h"
+#include "hw/misc/mchp_pfsoc_ioscb.h"
+#include "hw/misc/mchp_pfsoc_sysreg.h"
+#include "hw/net/cadence_gem.h"
+#include "hw/sd/cadence_sdhci.h"
+#include "hw/riscv/riscv_hart.h"
+
+typedef struct MicrochipPFSoCState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CPUClusterState e_cluster;
+ CPUClusterState u_cluster;
+ RISCVHartArrayState e_cpus;
+ RISCVHartArrayState u_cpus;
+ DeviceState *plic;
+ MchpPfSoCDdrSgmiiPhyState ddr_sgmii_phy;
+ MchpPfSoCDdrCfgState ddr_cfg;
+ MchpPfSoCIoscbState ioscb;
+ MchpPfSoCMMUartState *serial0;
+ MchpPfSoCMMUartState *serial1;
+ MchpPfSoCMMUartState *serial2;
+ MchpPfSoCMMUartState *serial3;
+ MchpPfSoCMMUartState *serial4;
+ MchpPfSoCSysregState sysreg;
+ SiFivePDMAState dma;
+ CadenceGEMState gem0;
+ CadenceGEMState gem1;
+ CadenceSDHCIState sdhci;
+} MicrochipPFSoCState;
+
+#define TYPE_MICROCHIP_PFSOC "microchip.pfsoc"
+#define MICROCHIP_PFSOC(obj) \
+ OBJECT_CHECK(MicrochipPFSoCState, (obj), TYPE_MICROCHIP_PFSOC)
+
+typedef struct MicrochipIcicleKitState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ /*< public >*/
+ MicrochipPFSoCState soc;
+} MicrochipIcicleKitState;
+
+#define TYPE_MICROCHIP_ICICLE_KIT_MACHINE \
+ MACHINE_TYPE_NAME("microchip-icicle-kit")
+#define MICROCHIP_ICICLE_KIT_MACHINE(obj) \
+ OBJECT_CHECK(MicrochipIcicleKitState, (obj), \
+ TYPE_MICROCHIP_ICICLE_KIT_MACHINE)
+
+enum {
+ MICROCHIP_PFSOC_RSVD0,
+ MICROCHIP_PFSOC_DEBUG,
+ MICROCHIP_PFSOC_E51_DTIM,
+ MICROCHIP_PFSOC_BUSERR_UNIT0,
+ MICROCHIP_PFSOC_BUSERR_UNIT1,
+ MICROCHIP_PFSOC_BUSERR_UNIT2,
+ MICROCHIP_PFSOC_BUSERR_UNIT3,
+ MICROCHIP_PFSOC_BUSERR_UNIT4,
+ MICROCHIP_PFSOC_CLINT,
+ MICROCHIP_PFSOC_L2CC,
+ MICROCHIP_PFSOC_DMA,
+ MICROCHIP_PFSOC_L2LIM,
+ MICROCHIP_PFSOC_PLIC,
+ MICROCHIP_PFSOC_MMUART0,
+ MICROCHIP_PFSOC_WDOG0,
+ MICROCHIP_PFSOC_SYSREG,
+ MICROCHIP_PFSOC_AXISW,
+ MICROCHIP_PFSOC_MPUCFG,
+ MICROCHIP_PFSOC_FMETER,
+ MICROCHIP_PFSOC_DDR_SGMII_PHY,
+ MICROCHIP_PFSOC_EMMC_SD,
+ MICROCHIP_PFSOC_DDR_CFG,
+ MICROCHIP_PFSOC_MMUART1,
+ MICROCHIP_PFSOC_MMUART2,
+ MICROCHIP_PFSOC_MMUART3,
+ MICROCHIP_PFSOC_MMUART4,
+ MICROCHIP_PFSOC_WDOG1,
+ MICROCHIP_PFSOC_WDOG2,
+ MICROCHIP_PFSOC_WDOG3,
+ MICROCHIP_PFSOC_WDOG4,
+ MICROCHIP_PFSOC_SPI0,
+ MICROCHIP_PFSOC_SPI1,
+ MICROCHIP_PFSOC_I2C0,
+ MICROCHIP_PFSOC_I2C1,
+ MICROCHIP_PFSOC_CAN0,
+ MICROCHIP_PFSOC_CAN1,
+ MICROCHIP_PFSOC_GEM0,
+ MICROCHIP_PFSOC_GEM1,
+ MICROCHIP_PFSOC_GPIO0,
+ MICROCHIP_PFSOC_GPIO1,
+ MICROCHIP_PFSOC_GPIO2,
+ MICROCHIP_PFSOC_RTC,
+ MICROCHIP_PFSOC_ENVM_CFG,
+ MICROCHIP_PFSOC_ENVM_DATA,
+ MICROCHIP_PFSOC_USB,
+ MICROCHIP_PFSOC_QSPI_XIP,
+ MICROCHIP_PFSOC_IOSCB,
+ MICROCHIP_PFSOC_FABRIC_FIC0,
+ MICROCHIP_PFSOC_FABRIC_FIC1,
+ MICROCHIP_PFSOC_FABRIC_FIC3,
+ MICROCHIP_PFSOC_DRAM_LO,
+ MICROCHIP_PFSOC_DRAM_LO_ALIAS,
+ MICROCHIP_PFSOC_DRAM_HI,
+ MICROCHIP_PFSOC_DRAM_HI_ALIAS
+};
+
+enum {
+ MICROCHIP_PFSOC_DMA_IRQ0 = 5,
+ MICROCHIP_PFSOC_DMA_IRQ1 = 6,
+ MICROCHIP_PFSOC_DMA_IRQ2 = 7,
+ MICROCHIP_PFSOC_DMA_IRQ3 = 8,
+ MICROCHIP_PFSOC_DMA_IRQ4 = 9,
+ MICROCHIP_PFSOC_DMA_IRQ5 = 10,
+ MICROCHIP_PFSOC_DMA_IRQ6 = 11,
+ MICROCHIP_PFSOC_DMA_IRQ7 = 12,
+ MICROCHIP_PFSOC_GEM0_IRQ = 64,
+ MICROCHIP_PFSOC_GEM1_IRQ = 70,
+ MICROCHIP_PFSOC_EMMC_SD_IRQ = 88,
+ MICROCHIP_PFSOC_MMUART0_IRQ = 90,
+ MICROCHIP_PFSOC_MMUART1_IRQ = 91,
+ MICROCHIP_PFSOC_MMUART2_IRQ = 92,
+ MICROCHIP_PFSOC_MMUART3_IRQ = 93,
+ MICROCHIP_PFSOC_MMUART4_IRQ = 94,
+ MICROCHIP_PFSOC_MAILBOX_IRQ = 96,
+};
+
+#define MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT 1
+#define MICROCHIP_PFSOC_COMPUTE_CPU_COUNT 4
+
+#define MICROCHIP_PFSOC_PLIC_NUM_SOURCES 187
+#define MICROCHIP_PFSOC_PLIC_NUM_PRIORITIES 7
+#define MICROCHIP_PFSOC_PLIC_PRIORITY_BASE 0x00
+#define MICROCHIP_PFSOC_PLIC_PENDING_BASE 0x1000
+#define MICROCHIP_PFSOC_PLIC_ENABLE_BASE 0x2000
+#define MICROCHIP_PFSOC_PLIC_ENABLE_STRIDE 0x80
+#define MICROCHIP_PFSOC_PLIC_CONTEXT_BASE 0x200000
+#define MICROCHIP_PFSOC_PLIC_CONTEXT_STRIDE 0x1000
+
+#endif /* HW_MICROCHIP_PFSOC_H */
diff --git a/include/hw/riscv/numa.h b/include/hw/riscv/numa.h
new file mode 100644
index 0000000000..8f5280211d
--- /dev/null
+++ b/include/hw/riscv/numa.h
@@ -0,0 +1,114 @@
+/*
+ * QEMU RISC-V NUMA Helper
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RISCV_NUMA_H
+#define RISCV_NUMA_H
+
+#include "hw/boards.h"
+#include "hw/sysbus.h"
+#include "sysemu/numa.h"
+
+/**
+ * riscv_socket_count:
+ * @ms: pointer to machine state
+ *
+ * Returns: number of sockets for a numa system and 1 for a non-numa system
+ */
+int riscv_socket_count(const MachineState *ms);
+
+/**
+ * riscv_socket_first_hartid:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: first hartid for a valid socket and -1 for an invalid socket
+ */
+int riscv_socket_first_hartid(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_last_hartid:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: last hartid for a valid socket and -1 for an invalid socket
+ */
+int riscv_socket_last_hartid(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_hart_count:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: number of harts for a valid socket and -1 for an invalid socket
+ */
+int riscv_socket_hart_count(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_mem_offset:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: offset of ram belonging to given socket
+ */
+uint64_t riscv_socket_mem_offset(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_mem_size:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: size of ram belonging to given socket
+ */
+uint64_t riscv_socket_mem_size(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_check_hartids:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: true if hardids belonging to given socket are contiguous else false
+ */
+bool riscv_socket_check_hartids(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_fdt_write_id:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Write NUMA node-id FDT property in MachineState->fdt
+ */
+void riscv_socket_fdt_write_id(const MachineState *ms, const char *node_name,
+ int socket_id);
+
+/**
+ * riscv_socket_fdt_write_distance_matrix:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Write NUMA distance matrix in MachineState->fdt
+ */
+void riscv_socket_fdt_write_distance_matrix(const MachineState *ms);
+
+CpuInstanceProperties
+riscv_numa_cpu_index_to_props(MachineState *ms, unsigned cpu_index);
+
+int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx);
+
+const CPUArchIdList *riscv_numa_possible_cpu_arch_ids(MachineState *ms);
+
+#endif /* RISCV_NUMA_H */
diff --git a/include/hw/riscv/opentitan.h b/include/hw/riscv/opentitan.h
new file mode 100644
index 0000000000..609473d07b
--- /dev/null
+++ b/include/hw/riscv/opentitan.h
@@ -0,0 +1,122 @@
+/*
+ * QEMU RISC-V Board Compatible with OpenTitan FPGA platform
+ *
+ * Copyright (c) 2020 Western Digital
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_OPENTITAN_H
+#define HW_OPENTITAN_H
+
+#include "hw/riscv/riscv_hart.h"
+#include "hw/intc/sifive_plic.h"
+#include "hw/char/ibex_uart.h"
+#include "hw/timer/ibex_timer.h"
+#include "hw/ssi/ibex_spi_host.h"
+#include "hw/boards.h"
+#include "qom/object.h"
+
+#define TYPE_RISCV_IBEX_SOC "riscv.lowrisc.ibex.soc"
+OBJECT_DECLARE_SIMPLE_TYPE(LowRISCIbexSoCState, RISCV_IBEX_SOC)
+
+enum {
+ OPENTITAN_SPI_HOST0,
+ OPENTITAN_SPI_HOST1,
+ OPENTITAN_NUM_SPI_HOSTS,
+};
+
+struct LowRISCIbexSoCState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ RISCVHartArrayState cpus;
+ SiFivePLICState plic;
+ IbexUartState uart;
+ IbexTimerState timer;
+ IbexSPIHostState spi_host[OPENTITAN_NUM_SPI_HOSTS];
+
+ uint32_t resetvec;
+
+ MemoryRegion flash_mem;
+ MemoryRegion rom;
+ MemoryRegion flash_alias;
+};
+
+#define TYPE_OPENTITAN_MACHINE MACHINE_TYPE_NAME("opentitan")
+OBJECT_DECLARE_SIMPLE_TYPE(OpenTitanState, OPENTITAN_MACHINE)
+
+typedef struct OpenTitanState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ /*< public >*/
+ LowRISCIbexSoCState soc;
+} OpenTitanState;
+
+enum {
+ IBEX_DEV_ROM,
+ IBEX_DEV_RAM,
+ IBEX_DEV_FLASH,
+ IBEX_DEV_FLASH_VIRTUAL,
+ IBEX_DEV_UART,
+ IBEX_DEV_SPI_DEVICE,
+ IBEX_DEV_SPI_HOST0,
+ IBEX_DEV_SPI_HOST1,
+ IBEX_DEV_GPIO,
+ IBEX_DEV_I2C,
+ IBEX_DEV_PATTGEN,
+ IBEX_DEV_TIMER,
+ IBEX_DEV_SENSOR_CTRL,
+ IBEX_DEV_OTP_CTRL,
+ IBEX_DEV_LC_CTRL,
+ IBEX_DEV_PWRMGR,
+ IBEX_DEV_RSTMGR,
+ IBEX_DEV_CLKMGR,
+ IBEX_DEV_PINMUX,
+ IBEX_DEV_AON_TIMER,
+ IBEX_DEV_USBDEV,
+ IBEX_DEV_FLASH_CTRL,
+ IBEX_DEV_PLIC,
+ IBEX_DEV_AES,
+ IBEX_DEV_HMAC,
+ IBEX_DEV_KMAC,
+ IBEX_DEV_KEYMGR,
+ IBEX_DEV_CSRNG,
+ IBEX_DEV_ENTROPY,
+ IBEX_DEV_EDNO,
+ IBEX_DEV_EDN1,
+ IBEX_DEV_ALERT_HANDLER,
+ IBEX_DEV_SRAM_CTRL,
+ IBEX_DEV_OTBN,
+ IBEX_DEV_IBEX_CFG,
+};
+
+enum {
+ IBEX_UART0_TX_WATERMARK_IRQ = 1,
+ IBEX_UART0_RX_WATERMARK_IRQ = 2,
+ IBEX_UART0_TX_EMPTY_IRQ = 3,
+ IBEX_UART0_RX_OVERFLOW_IRQ = 4,
+ IBEX_UART0_RX_FRAME_ERR_IRQ = 5,
+ IBEX_UART0_RX_BREAK_ERR_IRQ = 6,
+ IBEX_UART0_RX_TIMEOUT_IRQ = 7,
+ IBEX_UART0_RX_PARITY_ERR_IRQ = 8,
+ IBEX_TIMER_TIMEREXPIRED0_0 = 124,
+ IBEX_SPI_HOST0_ERR_IRQ = 131,
+ IBEX_SPI_HOST0_SPI_EVENT_IRQ = 132,
+ IBEX_SPI_HOST1_ERR_IRQ = 133,
+ IBEX_SPI_HOST1_SPI_EVENT_IRQ = 134,
+};
+
+#endif
diff --git a/include/hw/riscv/riscv_hart.h b/include/hw/riscv/riscv_hart.h
index 0671d88a44..912b4a2682 100644
--- a/include/hw/riscv/riscv_hart.h
+++ b/include/hw/riscv/riscv_hart.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2017 SiFive, Inc.
*
- * Holds the state of a heterogenous array of RISC-V harts
+ * Holds the state of a heterogeneous array of RISC-V harts
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -21,19 +21,24 @@
#ifndef HW_RISCV_HART_H
#define HW_RISCV_HART_H
+#include "hw/sysbus.h"
+#include "target/riscv/cpu.h"
+#include "qom/object.h"
+
#define TYPE_RISCV_HART_ARRAY "riscv.hart_array"
-#define RISCV_HART_ARRAY(obj) \
- OBJECT_CHECK(RISCVHartArrayState, (obj), TYPE_RISCV_HART_ARRAY)
+OBJECT_DECLARE_SIMPLE_TYPE(RISCVHartArrayState, RISCV_HART_ARRAY)
-typedef struct RISCVHartArrayState {
+struct RISCVHartArrayState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
uint32_t num_harts;
+ uint32_t hartid_base;
char *cpu_type;
+ uint64_t resetvec;
RISCVCPU *harts;
-} RISCVHartArrayState;
+};
#endif
diff --git a/include/hw/riscv/shakti_c.h b/include/hw/riscv/shakti_c.h
new file mode 100644
index 0000000000..539fe1156d
--- /dev/null
+++ b/include/hw/riscv/shakti_c.h
@@ -0,0 +1,75 @@
+/*
+ * Shakti C-class SoC emulation
+ *
+ * Copyright (c) 2021 Vijai Kumar K <vijai@behindbytes.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SHAKTI_C_H
+#define HW_SHAKTI_C_H
+
+#include "hw/riscv/riscv_hart.h"
+#include "hw/boards.h"
+#include "hw/char/shakti_uart.h"
+
+#define TYPE_RISCV_SHAKTI_SOC "riscv.shakti.cclass.soc"
+#define RISCV_SHAKTI_SOC(obj) \
+ OBJECT_CHECK(ShaktiCSoCState, (obj), TYPE_RISCV_SHAKTI_SOC)
+
+typedef struct ShaktiCSoCState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ RISCVHartArrayState cpus;
+ DeviceState *plic;
+ ShaktiUartState uart;
+ MemoryRegion rom;
+
+} ShaktiCSoCState;
+
+#define TYPE_RISCV_SHAKTI_MACHINE MACHINE_TYPE_NAME("shakti_c")
+#define RISCV_SHAKTI_MACHINE(obj) \
+ OBJECT_CHECK(ShaktiCMachineState, (obj), TYPE_RISCV_SHAKTI_MACHINE)
+typedef struct ShaktiCMachineState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ /*< public >*/
+ ShaktiCSoCState soc;
+} ShaktiCMachineState;
+
+enum {
+ SHAKTI_C_ROM,
+ SHAKTI_C_RAM,
+ SHAKTI_C_UART,
+ SHAKTI_C_GPIO,
+ SHAKTI_C_PLIC,
+ SHAKTI_C_CLINT,
+ SHAKTI_C_I2C,
+};
+
+#define SHAKTI_C_PLIC_HART_CONFIG "MS"
+/* Including Interrupt ID 0 (no interrupt)*/
+#define SHAKTI_C_PLIC_NUM_SOURCES 28
+/* Excluding Priority 0 */
+#define SHAKTI_C_PLIC_NUM_PRIORITIES 2
+#define SHAKTI_C_PLIC_PRIORITY_BASE 0x00
+#define SHAKTI_C_PLIC_PENDING_BASE 0x1000
+#define SHAKTI_C_PLIC_ENABLE_BASE 0x2000
+#define SHAKTI_C_PLIC_ENABLE_STRIDE 0x80
+#define SHAKTI_C_PLIC_CONTEXT_BASE 0x200000
+#define SHAKTI_C_PLIC_CONTEXT_STRIDE 0x1000
+
+#endif
diff --git a/include/hw/riscv/sifive_clint.h b/include/hw/riscv/sifive_clint.h
deleted file mode 100644
index e2865be1d1..0000000000
--- a/include/hw/riscv/sifive_clint.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * SiFive CLINT (Core Local Interruptor) interface
- *
- * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
- * Copyright (c) 2017 SiFive, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2 or later, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef HW_SIFIVE_CLINT_H
-#define HW_SIFIVE_CLINT_H
-
-#define TYPE_SIFIVE_CLINT "riscv.sifive.clint"
-
-#define SIFIVE_CLINT(obj) \
- OBJECT_CHECK(SiFiveCLINTState, (obj), TYPE_SIFIVE_CLINT)
-
-typedef struct SiFiveCLINTState {
- /*< private >*/
- SysBusDevice parent_obj;
-
- /*< public >*/
- MemoryRegion mmio;
- uint32_t num_harts;
- uint32_t sip_base;
- uint32_t timecmp_base;
- uint32_t time_base;
- uint32_t aperture_size;
-} SiFiveCLINTState;
-
-DeviceState *sifive_clint_create(hwaddr addr, hwaddr size, uint32_t num_harts,
- uint32_t sip_base, uint32_t timecmp_base, uint32_t time_base);
-
-enum {
- SIFIVE_SIP_BASE = 0x0,
- SIFIVE_TIMECMP_BASE = 0x4000,
- SIFIVE_TIME_BASE = 0xBFF8
-};
-
-enum {
- SIFIVE_CLINT_TIMEBASE_FREQ = 10000000
-};
-
-#endif
diff --git a/include/hw/riscv/sifive_prci.h b/include/hw/riscv/sifive_cpu.h
index b6f4c486cc..136799633a 100644
--- a/include/hw/riscv/sifive_prci.h
+++ b/include/hw/riscv/sifive_cpu.h
@@ -1,7 +1,8 @@
/*
- * QEMU SiFive PRCI (Power, Reset, Clock, Interrupt) interface
+ * SiFive CPU types
*
* Copyright (c) 2017 SiFive, Inc.
+ * Copyright (c) 2019 Bin Meng <bmeng.cn@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -16,22 +17,15 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef HW_SIFIVE_PRCI_H
-#define HW_SIFIVE_PRCI_H
-
-#define TYPE_SIFIVE_PRCI "riscv.sifive.prci"
-
-#define SIFIVE_PRCI(obj) \
- OBJECT_CHECK(SiFivePRCIState, (obj), TYPE_SIFIVE_PRCI)
-
-typedef struct SiFivePRCIState {
- /*< private >*/
- SysBusDevice parent_obj;
-
- /*< public >*/
- MemoryRegion mmio;
-} SiFivePRCIState;
-
-DeviceState *sifive_prci_create(hwaddr addr);
+#ifndef HW_SIFIVE_CPU_H
+#define HW_SIFIVE_CPU_H
+#if defined(TARGET_RISCV32)
+#define SIFIVE_E_CPU TYPE_RISCV_CPU_SIFIVE_E31
+#define SIFIVE_U_CPU TYPE_RISCV_CPU_SIFIVE_U34
+#elif defined(TARGET_RISCV64)
+#define SIFIVE_E_CPU TYPE_RISCV_CPU_SIFIVE_E51
+#define SIFIVE_U_CPU TYPE_RISCV_CPU_SIFIVE_U54
#endif
+
+#endif /* HW_SIFIVE_CPU_H */
diff --git a/include/hw/riscv/sifive_e.h b/include/hw/riscv/sifive_e.h
index 7b6d8aed96..31180a680e 100644
--- a/include/hw/riscv/sifive_e.h
+++ b/include/hw/riscv/sifive_e.h
@@ -19,68 +19,84 @@
#ifndef HW_SIFIVE_E_H
#define HW_SIFIVE_E_H
+#include "hw/riscv/riscv_hart.h"
+#include "hw/riscv/sifive_cpu.h"
+#include "hw/gpio/sifive_gpio.h"
+#include "hw/misc/sifive_e_aon.h"
+#include "hw/boards.h"
+
#define TYPE_RISCV_E_SOC "riscv.sifive.e.soc"
#define RISCV_E_SOC(obj) \
OBJECT_CHECK(SiFiveESoCState, (obj), TYPE_RISCV_E_SOC)
typedef struct SiFiveESoCState {
/*< private >*/
- SysBusDevice parent_obj;
+ DeviceState parent_obj;
/*< public >*/
RISCVHartArrayState cpus;
DeviceState *plic;
+ SiFiveEAONState aon;
+ SIFIVEGPIOState gpio;
+ MemoryRegion xip_mem;
+ MemoryRegion mask_rom;
} SiFiveESoCState;
typedef struct SiFiveEState {
/*< private >*/
- SysBusDevice parent_obj;
+ MachineState parent_obj;
/*< public >*/
SiFiveESoCState soc;
+ bool revb;
} SiFiveEState;
+#define TYPE_RISCV_E_MACHINE MACHINE_TYPE_NAME("sifive_e")
+#define RISCV_E_MACHINE(obj) \
+ OBJECT_CHECK(SiFiveEState, (obj), TYPE_RISCV_E_MACHINE)
+
enum {
- SIFIVE_E_DEBUG,
- SIFIVE_E_MROM,
- SIFIVE_E_OTP,
- SIFIVE_E_CLINT,
- SIFIVE_E_PLIC,
- SIFIVE_E_AON,
- SIFIVE_E_PRCI,
- SIFIVE_E_OTP_CTRL,
- SIFIVE_E_GPIO0,
- SIFIVE_E_UART0,
- SIFIVE_E_QSPI0,
- SIFIVE_E_PWM0,
- SIFIVE_E_UART1,
- SIFIVE_E_QSPI1,
- SIFIVE_E_PWM1,
- SIFIVE_E_QSPI2,
- SIFIVE_E_PWM2,
- SIFIVE_E_XIP,
- SIFIVE_E_DTIM
+ SIFIVE_E_DEV_DEBUG,
+ SIFIVE_E_DEV_MROM,
+ SIFIVE_E_DEV_OTP,
+ SIFIVE_E_DEV_CLINT,
+ SIFIVE_E_DEV_PLIC,
+ SIFIVE_E_DEV_AON,
+ SIFIVE_E_DEV_PRCI,
+ SIFIVE_E_DEV_OTP_CTRL,
+ SIFIVE_E_DEV_GPIO0,
+ SIFIVE_E_DEV_UART0,
+ SIFIVE_E_DEV_QSPI0,
+ SIFIVE_E_DEV_PWM0,
+ SIFIVE_E_DEV_UART1,
+ SIFIVE_E_DEV_QSPI1,
+ SIFIVE_E_DEV_PWM1,
+ SIFIVE_E_DEV_QSPI2,
+ SIFIVE_E_DEV_PWM2,
+ SIFIVE_E_DEV_XIP,
+ SIFIVE_E_DEV_DTIM
};
enum {
- SIFIVE_E_UART0_IRQ = 3,
- SIFIVE_E_UART1_IRQ = 4
+ SIFIVE_E_AON_WDT_IRQ = 1,
+ SIFIVE_E_UART0_IRQ = 3,
+ SIFIVE_E_UART1_IRQ = 4,
+ SIFIVE_E_GPIO0_IRQ0 = 8
};
#define SIFIVE_E_PLIC_HART_CONFIG "M"
-#define SIFIVE_E_PLIC_NUM_SOURCES 127
+/*
+ * Freedom E310 G002 and G003 supports 52 interrupt sources while
+ * Freedom E310 G000 supports 51 interrupt sources. We use the value
+ * of G002 and G003, so it is 53 (including interrupt source 0).
+ */
+#define SIFIVE_E_PLIC_NUM_SOURCES 53
#define SIFIVE_E_PLIC_NUM_PRIORITIES 7
-#define SIFIVE_E_PLIC_PRIORITY_BASE 0x0
+#define SIFIVE_E_PLIC_PRIORITY_BASE 0x00
#define SIFIVE_E_PLIC_PENDING_BASE 0x1000
#define SIFIVE_E_PLIC_ENABLE_BASE 0x2000
#define SIFIVE_E_PLIC_ENABLE_STRIDE 0x80
#define SIFIVE_E_PLIC_CONTEXT_BASE 0x200000
#define SIFIVE_E_PLIC_CONTEXT_STRIDE 0x1000
-#if defined(TARGET_RISCV32)
-#define SIFIVE_E_CPU TYPE_RISCV_CPU_SIFIVE_E31
-#elif defined(TARGET_RISCV64)
-#define SIFIVE_E_CPU TYPE_RISCV_CPU_SIFIVE_E51
-#endif
-
#endif
diff --git a/include/hw/riscv/sifive_u.h b/include/hw/riscv/sifive_u.h
index be13cc1304..0696f85942 100644
--- a/include/hw/riscv/sifive_u.h
+++ b/include/hw/riscv/sifive_u.h
@@ -19,7 +19,17 @@
#ifndef HW_SIFIVE_U_H
#define HW_SIFIVE_U_H
+#include "hw/boards.h"
+#include "hw/cpu/cluster.h"
+#include "hw/dma/sifive_pdma.h"
#include "hw/net/cadence_gem.h"
+#include "hw/riscv/riscv_hart.h"
+#include "hw/riscv/sifive_cpu.h"
+#include "hw/gpio/sifive_gpio.h"
+#include "hw/misc/sifive_u_otp.h"
+#include "hw/misc/sifive_u_prci.h"
+#include "hw/ssi/sifive_spi.h"
+#include "hw/timer/sifive_pwm.h"
#define TYPE_RISCV_U_SOC "riscv.sifive.u.soc"
#define RISCV_U_SOC(obj) \
@@ -27,60 +37,132 @@
typedef struct SiFiveUSoCState {
/*< private >*/
- SysBusDevice parent_obj;
+ DeviceState parent_obj;
/*< public >*/
- RISCVHartArrayState cpus;
+ CPUClusterState e_cluster;
+ CPUClusterState u_cluster;
+ RISCVHartArrayState e_cpus;
+ RISCVHartArrayState u_cpus;
DeviceState *plic;
+ SiFiveUPRCIState prci;
+ SIFIVEGPIOState gpio;
+ SiFiveUOTPState otp;
+ SiFivePDMAState dma;
+ SiFiveSPIState spi0;
+ SiFiveSPIState spi2;
CadenceGEMState gem;
+ SiFivePwmState pwm[2];
+
+ uint32_t serial;
+ char *cpu_type;
} SiFiveUSoCState;
+#define TYPE_RISCV_U_MACHINE MACHINE_TYPE_NAME("sifive_u")
+#define RISCV_U_MACHINE(obj) \
+ OBJECT_CHECK(SiFiveUState, (obj), TYPE_RISCV_U_MACHINE)
+
typedef struct SiFiveUState {
/*< private >*/
- SysBusDevice parent_obj;
+ MachineState parent_obj;
/*< public >*/
SiFiveUSoCState soc;
- void *fdt;
int fdt_size;
+
+ bool start_in_flash;
+ uint32_t msel;
+ uint32_t serial;
} SiFiveUState;
enum {
- SIFIVE_U_DEBUG,
- SIFIVE_U_MROM,
- SIFIVE_U_CLINT,
- SIFIVE_U_PLIC,
- SIFIVE_U_UART0,
- SIFIVE_U_UART1,
- SIFIVE_U_DRAM,
- SIFIVE_U_GEM
+ SIFIVE_U_DEV_DEBUG,
+ SIFIVE_U_DEV_MROM,
+ SIFIVE_U_DEV_CLINT,
+ SIFIVE_U_DEV_L2CC,
+ SIFIVE_U_DEV_PDMA,
+ SIFIVE_U_DEV_L2LIM,
+ SIFIVE_U_DEV_PLIC,
+ SIFIVE_U_DEV_PRCI,
+ SIFIVE_U_DEV_UART0,
+ SIFIVE_U_DEV_UART1,
+ SIFIVE_U_DEV_GPIO,
+ SIFIVE_U_DEV_QSPI0,
+ SIFIVE_U_DEV_QSPI2,
+ SIFIVE_U_DEV_OTP,
+ SIFIVE_U_DEV_DMC,
+ SIFIVE_U_DEV_FLASH0,
+ SIFIVE_U_DEV_DRAM,
+ SIFIVE_U_DEV_GEM,
+ SIFIVE_U_DEV_GEM_MGMT,
+ SIFIVE_U_DEV_PWM0,
+ SIFIVE_U_DEV_PWM1
};
enum {
- SIFIVE_U_UART0_IRQ = 3,
- SIFIVE_U_UART1_IRQ = 4,
- SIFIVE_U_GEM_IRQ = 0x35
+ SIFIVE_U_L2CC_IRQ0 = 1,
+ SIFIVE_U_L2CC_IRQ1 = 2,
+ SIFIVE_U_L2CC_IRQ2 = 3,
+ SIFIVE_U_UART0_IRQ = 4,
+ SIFIVE_U_UART1_IRQ = 5,
+ SIFIVE_U_QSPI2_IRQ = 6,
+ SIFIVE_U_GPIO_IRQ0 = 7,
+ SIFIVE_U_GPIO_IRQ1 = 8,
+ SIFIVE_U_GPIO_IRQ2 = 9,
+ SIFIVE_U_GPIO_IRQ3 = 10,
+ SIFIVE_U_GPIO_IRQ4 = 11,
+ SIFIVE_U_GPIO_IRQ5 = 12,
+ SIFIVE_U_GPIO_IRQ6 = 13,
+ SIFIVE_U_GPIO_IRQ7 = 14,
+ SIFIVE_U_GPIO_IRQ8 = 15,
+ SIFIVE_U_GPIO_IRQ9 = 16,
+ SIFIVE_U_GPIO_IRQ10 = 17,
+ SIFIVE_U_GPIO_IRQ11 = 18,
+ SIFIVE_U_GPIO_IRQ12 = 19,
+ SIFIVE_U_GPIO_IRQ13 = 20,
+ SIFIVE_U_GPIO_IRQ14 = 21,
+ SIFIVE_U_GPIO_IRQ15 = 22,
+ SIFIVE_U_PDMA_IRQ0 = 23,
+ SIFIVE_U_PDMA_IRQ1 = 24,
+ SIFIVE_U_PDMA_IRQ2 = 25,
+ SIFIVE_U_PDMA_IRQ3 = 26,
+ SIFIVE_U_PDMA_IRQ4 = 27,
+ SIFIVE_U_PDMA_IRQ5 = 28,
+ SIFIVE_U_PDMA_IRQ6 = 29,
+ SIFIVE_U_PDMA_IRQ7 = 30,
+ SIFIVE_U_PWM0_IRQ0 = 42,
+ SIFIVE_U_PWM0_IRQ1 = 43,
+ SIFIVE_U_PWM0_IRQ2 = 44,
+ SIFIVE_U_PWM0_IRQ3 = 45,
+ SIFIVE_U_PWM1_IRQ0 = 46,
+ SIFIVE_U_PWM1_IRQ1 = 47,
+ SIFIVE_U_PWM1_IRQ2 = 48,
+ SIFIVE_U_PWM1_IRQ3 = 49,
+ SIFIVE_U_QSPI0_IRQ = 51,
+ SIFIVE_U_GEM_IRQ = 53
};
enum {
- SIFIVE_U_CLOCK_FREQ = 1000000000,
- SIFIVE_U_GEM_CLOCK_FREQ = 125000000
+ SIFIVE_U_HFCLK_FREQ = 33333333,
+ SIFIVE_U_RTCCLK_FREQ = 1000000
};
-#define SIFIVE_U_PLIC_HART_CONFIG "MS"
-#define SIFIVE_U_PLIC_NUM_SOURCES 127
+enum {
+ MSEL_MEMMAP_QSPI0_FLASH = 1,
+ MSEL_L2LIM_QSPI0_FLASH = 6,
+ MSEL_L2LIM_QSPI2_SD = 11
+};
+
+#define SIFIVE_U_MANAGEMENT_CPU_COUNT 1
+#define SIFIVE_U_COMPUTE_CPU_COUNT 4
+
+#define SIFIVE_U_PLIC_NUM_SOURCES 54
#define SIFIVE_U_PLIC_NUM_PRIORITIES 7
-#define SIFIVE_U_PLIC_PRIORITY_BASE 0x0
+#define SIFIVE_U_PLIC_PRIORITY_BASE 0x00
#define SIFIVE_U_PLIC_PENDING_BASE 0x1000
#define SIFIVE_U_PLIC_ENABLE_BASE 0x2000
#define SIFIVE_U_PLIC_ENABLE_STRIDE 0x80
#define SIFIVE_U_PLIC_CONTEXT_BASE 0x200000
#define SIFIVE_U_PLIC_CONTEXT_STRIDE 0x1000
-#if defined(TARGET_RISCV32)
-#define SIFIVE_U_CPU TYPE_RISCV_CPU_SIFIVE_U34
-#elif defined(TARGET_RISCV64)
-#define SIFIVE_U_CPU TYPE_RISCV_CPU_SIFIVE_U54
-#endif
-
#endif
diff --git a/include/hw/riscv/spike.h b/include/hw/riscv/spike.h
index 641b70da67..0c2a223763 100644
--- a/include/hw/riscv/spike.h
+++ b/include/hw/riscv/spike.h
@@ -19,32 +19,31 @@
#ifndef HW_RISCV_SPIKE_H
#define HW_RISCV_SPIKE_H
-typedef struct {
+#include "hw/boards.h"
+#include "hw/riscv/riscv_hart.h"
+#include "hw/sysbus.h"
+
+#define SPIKE_CPUS_MAX 8
+#define SPIKE_SOCKETS_MAX 8
+
+#define TYPE_SPIKE_MACHINE MACHINE_TYPE_NAME("spike")
+typedef struct SpikeState SpikeState;
+DECLARE_INSTANCE_CHECKER(SpikeState, SPIKE_MACHINE,
+ TYPE_SPIKE_MACHINE)
+
+struct SpikeState {
/*< private >*/
- SysBusDevice parent_obj;
+ MachineState parent;
/*< public >*/
- RISCVHartArrayState soc;
- void *fdt;
- int fdt_size;
-} SpikeState;
+ RISCVHartArrayState soc[SPIKE_SOCKETS_MAX];
+};
enum {
SPIKE_MROM,
+ SPIKE_HTIF,
SPIKE_CLINT,
SPIKE_DRAM
};
-enum {
- SPIKE_CLOCK_FREQ = 1000000000
-};
-
-#if defined(TARGET_RISCV32)
-#define SPIKE_V1_09_1_CPU TYPE_RISCV_CPU_RV32GCSU_V1_09_1
-#define SPIKE_V1_10_0_CPU TYPE_RISCV_CPU_RV32GCSU_V1_10_0
-#elif defined(TARGET_RISCV64)
-#define SPIKE_V1_09_1_CPU TYPE_RISCV_CPU_RV64GCSU_V1_09_1
-#define SPIKE_V1_10_0_CPU TYPE_RISCV_CPU_RV64GCSU_V1_10_0
-#endif
-
#endif
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
index f12deaebd6..3db839160f 100644
--- a/include/hw/riscv/virt.h
+++ b/include/hw/riscv/virt.h
@@ -19,64 +19,138 @@
#ifndef HW_RISCV_VIRT_H
#define HW_RISCV_VIRT_H
-typedef struct {
+#include "hw/boards.h"
+#include "hw/riscv/riscv_hart.h"
+#include "hw/sysbus.h"
+#include "hw/block/flash.h"
+#include "hw/intc/riscv_imsic.h"
+
+#define VIRT_CPUS_MAX_BITS 9
+#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
+#define VIRT_SOCKETS_MAX_BITS 2
+#define VIRT_SOCKETS_MAX (1 << VIRT_SOCKETS_MAX_BITS)
+
+#define TYPE_RISCV_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
+typedef struct RISCVVirtState RISCVVirtState;
+DECLARE_INSTANCE_CHECKER(RISCVVirtState, RISCV_VIRT_MACHINE,
+ TYPE_RISCV_VIRT_MACHINE)
+
+typedef enum RISCVVirtAIAType {
+ VIRT_AIA_TYPE_NONE = 0,
+ VIRT_AIA_TYPE_APLIC,
+ VIRT_AIA_TYPE_APLIC_IMSIC,
+} RISCVVirtAIAType;
+
+struct RISCVVirtState {
/*< private >*/
- SysBusDevice parent_obj;
+ MachineState parent;
/*< public >*/
- RISCVHartArrayState soc;
- DeviceState *plic;
- void *fdt;
+ Notifier machine_done;
+ DeviceState *platform_bus_dev;
+ RISCVHartArrayState soc[VIRT_SOCKETS_MAX];
+ DeviceState *irqchip[VIRT_SOCKETS_MAX];
+ PFlashCFI01 *flash[2];
+ FWCfgState *fw_cfg;
+
int fdt_size;
-} RISCVVirtState;
+ bool have_aclint;
+ RISCVVirtAIAType aia_type;
+ int aia_guests;
+ char *oem_id;
+ char *oem_table_id;
+ OnOffAuto acpi;
+ const MemMapEntry *memmap;
+ struct GPEXHost *gpex_host;
+};
enum {
VIRT_DEBUG,
VIRT_MROM,
VIRT_TEST,
+ VIRT_RTC,
VIRT_CLINT,
+ VIRT_ACLINT_SSWI,
VIRT_PLIC,
+ VIRT_APLIC_M,
+ VIRT_APLIC_S,
VIRT_UART0,
VIRT_VIRTIO,
+ VIRT_FW_CFG,
+ VIRT_IMSIC_M,
+ VIRT_IMSIC_S,
+ VIRT_FLASH,
VIRT_DRAM,
VIRT_PCIE_MMIO,
VIRT_PCIE_PIO,
+ VIRT_PLATFORM_BUS,
VIRT_PCIE_ECAM
};
enum {
UART0_IRQ = 10,
+ RTC_IRQ = 11,
VIRTIO_IRQ = 1, /* 1 to 8 */
VIRTIO_COUNT = 8,
PCIE_IRQ = 0x20, /* 32 to 35 */
- VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */
+ VIRT_PLATFORM_BUS_IRQ = 64, /* 64 to 95 */
};
-enum {
- VIRT_CLOCK_FREQ = 1000000000
-};
+#define VIRT_PLATFORM_BUS_NUM_IRQS 32
-#define VIRT_PLIC_HART_CONFIG "MS"
-#define VIRT_PLIC_NUM_SOURCES 127
-#define VIRT_PLIC_NUM_PRIORITIES 7
-#define VIRT_PLIC_PRIORITY_BASE 0x0
+#define VIRT_IRQCHIP_NUM_MSIS 255
+#define VIRT_IRQCHIP_NUM_SOURCES 96
+#define VIRT_IRQCHIP_NUM_PRIO_BITS 3
+#define VIRT_IRQCHIP_MAX_GUESTS_BITS 3
+#define VIRT_IRQCHIP_MAX_GUESTS ((1U << VIRT_IRQCHIP_MAX_GUESTS_BITS) - 1U)
+
+#define VIRT_PLIC_PRIORITY_BASE 0x00
#define VIRT_PLIC_PENDING_BASE 0x1000
#define VIRT_PLIC_ENABLE_BASE 0x2000
#define VIRT_PLIC_ENABLE_STRIDE 0x80
#define VIRT_PLIC_CONTEXT_BASE 0x200000
#define VIRT_PLIC_CONTEXT_STRIDE 0x1000
+#define VIRT_PLIC_SIZE(__num_context) \
+ (VIRT_PLIC_CONTEXT_BASE + (__num_context) * VIRT_PLIC_CONTEXT_STRIDE)
#define FDT_PCI_ADDR_CELLS 3
#define FDT_PCI_INT_CELLS 1
#define FDT_PLIC_ADDR_CELLS 0
#define FDT_PLIC_INT_CELLS 1
-#define FDT_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + 1 + \
- FDT_PLIC_ADDR_CELLS + FDT_PLIC_INT_CELLS)
+#define FDT_APLIC_INT_CELLS 2
+#define FDT_IMSIC_INT_CELLS 0
+#define FDT_MAX_INT_CELLS 2
+#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
+ 1 + FDT_MAX_INT_CELLS)
+#define FDT_PLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
+ 1 + FDT_PLIC_INT_CELLS)
+#define FDT_APLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
+ 1 + FDT_APLIC_INT_CELLS)
+
+bool virt_is_acpi_enabled(RISCVVirtState *s);
+void virt_acpi_setup(RISCVVirtState *vms);
+uint32_t imsic_num_bits(uint32_t count);
+
+/*
+ * The virt machine physical address space used by some of the devices
+ * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
+ * number of CPUs, and number of IMSIC guest files.
+ *
+ * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
+ * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
+ * of virt machine physical address space.
+ */
+
+#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
+#if VIRT_IMSIC_GROUP_MAX_SIZE < \
+ IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
+#error "Can't accommodate single IMSIC group in address space"
+#endif
-#if defined(TARGET_RISCV32)
-#define VIRT_CPU TYPE_RISCV_CPU_RV32GCSU_V1_10_0
-#elif defined(TARGET_RISCV64)
-#define VIRT_CPU TYPE_RISCV_CPU_RV64GCSU_V1_10_0
+#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
+ VIRT_IMSIC_GROUP_MAX_SIZE)
+#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
+#error "Can't accommodate all IMSIC groups in address space"
#endif
#endif
diff --git a/include/hw/rtc/allwinner-rtc.h b/include/hw/rtc/allwinner-rtc.h
new file mode 100644
index 0000000000..bf415431cd
--- /dev/null
+++ b/include/hw/rtc/allwinner-rtc.h
@@ -0,0 +1,129 @@
+/*
+ * Allwinner Real Time Clock emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_RTC_H
+#define HW_MISC_ALLWINNER_RTC_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+/**
+ * Constants
+ * @{
+ */
+
+/** Highest register address used by RTC device */
+#define AW_RTC_REGS_MAXADDR (0x200)
+
+/** Total number of known registers */
+#define AW_RTC_REGS_NUM (AW_RTC_REGS_MAXADDR / sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * Object model types
+ * @{
+ */
+
+/** Generic Allwinner RTC device (abstract) */
+#define TYPE_AW_RTC "allwinner-rtc"
+
+/** Allwinner RTC sun4i family (A10, A12) */
+#define TYPE_AW_RTC_SUN4I TYPE_AW_RTC "-sun4i"
+
+/** Allwinner RTC sun6i family and newer (A31, H2+, H3, etc) */
+#define TYPE_AW_RTC_SUN6I TYPE_AW_RTC "-sun6i"
+
+/** Allwinner RTC sun7i family (A20) */
+#define TYPE_AW_RTC_SUN7I TYPE_AW_RTC "-sun7i"
+
+/** @} */
+
+/**
+ * Object model macros
+ * @{
+ */
+
+OBJECT_DECLARE_TYPE(AwRtcState, AwRtcClass, AW_RTC)
+
+/** @} */
+
+/**
+ * Allwinner RTC per-object instance state.
+ */
+struct AwRtcState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /**
+ * Actual year represented by the device when year counter is zero
+ *
+ * Can be overridden by the user using the corresponding 'base-year'
+ * property. The base year used by the target OS driver can vary, for
+ * example the Linux driver for sun6i uses 1970 while NetBSD uses 2000.
+ */
+ int base_year;
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_RTC_REGS_NUM];
+
+};
+
+/**
+ * Allwinner RTC class-level struct.
+ *
+ * This struct is filled by each sunxi device specific code
+ * such that the generic code can use this struct to support
+ * all devices.
+ */
+struct AwRtcClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+ /*< public >*/
+
+ /** Defines device specific register map */
+ const uint8_t *regmap;
+
+ /** Size of the regmap in bytes */
+ size_t regmap_size;
+
+ /**
+ * Read device specific register
+ *
+ * @offset: register offset to read
+ * @return true if register read successful, false otherwise
+ */
+ bool (*read)(AwRtcState *s, uint32_t offset);
+
+ /**
+ * Write device specific register
+ *
+ * @offset: register offset to write
+ * @data: value to set in register
+ * @return true if register write successful, false otherwise
+ */
+ bool (*write)(AwRtcState *s, uint32_t offset, uint32_t data);
+
+};
+
+#endif /* HW_MISC_ALLWINNER_RTC_H */
diff --git a/include/hw/rtc/aspeed_rtc.h b/include/hw/rtc/aspeed_rtc.h
new file mode 100644
index 0000000000..596dfebb46
--- /dev/null
+++ b/include/hw/rtc/aspeed_rtc.h
@@ -0,0 +1,28 @@
+/*
+ * ASPEED Real Time Clock
+ * Joel Stanley <joel@jms.id.au>
+ *
+ * Copyright 2019 IBM Corp
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_RTC_ASPEED_RTC_H
+#define HW_RTC_ASPEED_RTC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+struct AspeedRtcState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t reg[0x18];
+ int64_t offset;
+
+};
+
+#define TYPE_ASPEED_RTC "aspeed.rtc"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedRtcState, ASPEED_RTC)
+
+#endif /* HW_RTC_ASPEED_RTC_H */
diff --git a/include/hw/rtc/goldfish_rtc.h b/include/hw/rtc/goldfish_rtc.h
new file mode 100644
index 0000000000..162be33863
--- /dev/null
+++ b/include/hw/rtc/goldfish_rtc.h
@@ -0,0 +1,49 @@
+/*
+ * Goldfish virtual platform RTC
+ *
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * For more details on Google Goldfish virtual platform refer:
+ * https://android.googlesource.com/platform/external/qemu/+/master/docs/GOLDFISH-VIRTUAL-HARDWARE.TXT
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RTC_GOLDFISH_RTC_H
+#define HW_RTC_GOLDFISH_RTC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_GOLDFISH_RTC "goldfish_rtc"
+OBJECT_DECLARE_SIMPLE_TYPE(GoldfishRTCState, GOLDFISH_RTC)
+
+struct GoldfishRTCState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ QEMUTimer *timer;
+ qemu_irq irq;
+
+ uint64_t tick_offset;
+ uint64_t tick_offset_vmstate;
+ uint64_t alarm_next;
+ uint32_t alarm_running;
+ uint32_t irq_pending;
+ uint32_t irq_enabled;
+ uint32_t time_high;
+
+ bool big_endian;
+};
+
+#endif
diff --git a/include/hw/rtc/m48t59.h b/include/hw/rtc/m48t59.h
new file mode 100644
index 0000000000..c14937476c
--- /dev/null
+++ b/include/hw/rtc/m48t59.h
@@ -0,0 +1,50 @@
+/*
+ * QEMU M48T59 and M48T08 NVRAM emulation
+ *
+ * Copyright (c) 2003-2005, 2007 Jocelyn Mayer
+ * Copyright (c) 2013 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_RTC_M48T59_H
+#define HW_RTC_M48T59_H
+
+#include "exec/hwaddr.h"
+#include "qom/object.h"
+
+#define TYPE_NVRAM "nvram"
+
+typedef struct NvramClass NvramClass;
+DECLARE_CLASS_CHECKERS(NvramClass, NVRAM,
+ TYPE_NVRAM)
+#define NVRAM(obj) \
+ INTERFACE_CHECK(Nvram, (obj), TYPE_NVRAM)
+
+typedef struct Nvram Nvram;
+
+struct NvramClass {
+ InterfaceClass parent;
+
+ uint32_t (*read)(Nvram *obj, uint32_t addr);
+ void (*write)(Nvram *obj, uint32_t addr, uint32_t val);
+ void (*toggle_lock)(Nvram *obj, int lock);
+};
+
+#endif /* HW_RTC_M48T59_H */
diff --git a/include/hw/rtc/mc146818rtc.h b/include/hw/rtc/mc146818rtc.h
new file mode 100644
index 0000000000..97cec0b3e8
--- /dev/null
+++ b/include/hw/rtc/mc146818rtc.h
@@ -0,0 +1,60 @@
+/*
+ * QEMU MC146818 RTC emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef HW_RTC_MC146818RTC_H
+#define HW_RTC_MC146818RTC_H
+
+#include "qapi/qapi-types-machine.h"
+#include "qemu/queue.h"
+#include "qemu/timer.h"
+#include "hw/isa/isa.h"
+#include "qom/object.h"
+
+#define TYPE_MC146818_RTC "mc146818rtc"
+OBJECT_DECLARE_SIMPLE_TYPE(MC146818RtcState, MC146818_RTC)
+
+struct MC146818RtcState {
+ ISADevice parent_obj;
+
+ MemoryRegion io;
+ MemoryRegion coalesced_io;
+ uint8_t cmos_data[128];
+ uint8_t cmos_index;
+ uint8_t isairq;
+ uint16_t io_base;
+ int32_t base_year;
+ uint64_t base_rtc;
+ uint64_t last_update;
+ int64_t offset;
+ qemu_irq irq;
+ int it_shift;
+ /* periodic timer */
+ QEMUTimer *periodic_timer;
+ int64_t next_periodic_time;
+ /* update-ended timer */
+ QEMUTimer *update_timer;
+ uint64_t next_alarm_time;
+ uint16_t irq_reinject_on_ack_count;
+ uint32_t irq_coalesced;
+ uint32_t period;
+ QEMUTimer *coalesced_timer;
+ Notifier clock_reset_notifier;
+ LostTickPolicy lost_tick_policy;
+ Notifier suspend_notifier;
+ QLIST_ENTRY(MC146818RtcState) link;
+};
+
+#define RTC_ISA_IRQ 8
+
+MC146818RtcState *mc146818_rtc_init(ISABus *bus, int base_year,
+ qemu_irq intercept_irq);
+void mc146818rtc_set_cmos_data(MC146818RtcState *s, int addr, int val);
+int mc146818rtc_get_cmos_data(MC146818RtcState *s, int addr);
+void qmp_rtc_reset_reinjection(Error **errp);
+
+#endif /* HW_RTC_MC146818RTC_H */
diff --git a/include/hw/timer/mc146818rtc_regs.h b/include/hw/rtc/mc146818rtc_regs.h
index c62f17bf2d..12197e0553 100644
--- a/include/hw/timer/mc146818rtc_regs.h
+++ b/include/hw/rtc/mc146818rtc_regs.h
@@ -22,10 +22,11 @@
* THE SOFTWARE.
*/
-#ifndef MC146818RTC_REGS_H
-#define MC146818RTC_REGS_H
+#ifndef HW_RTC_MC146818RTC_REGS_H
+#define HW_RTC_MC146818RTC_REGS_H
-#define RTC_ISA_IRQ 8
+#include "qemu/timer.h"
+#include "qemu/host-utils.h"
#define RTC_SECONDS 0
#define RTC_SECONDS_ALARM 1
diff --git a/include/hw/rtc/pl031.h b/include/hw/rtc/pl031.h
new file mode 100644
index 0000000000..9fd4be1abb
--- /dev/null
+++ b/include/hw/rtc/pl031.h
@@ -0,0 +1,48 @@
+/*
+ * ARM AMBA PrimeCell PL031 RTC
+ *
+ * Copyright (c) 2007 CodeSourcery
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#ifndef HW_RTC_PL031_H
+#define HW_RTC_PL031_H
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+
+#define TYPE_PL031 "pl031"
+OBJECT_DECLARE_SIMPLE_TYPE(PL031State, PL031)
+
+struct PL031State {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ QEMUTimer *timer;
+ qemu_irq irq;
+
+ /*
+ * Needed to preserve the tick_count across migration, even if the
+ * absolute value of the rtc_clock is different on the source and
+ * destination.
+ */
+ uint32_t tick_offset_vmstate;
+ uint32_t tick_offset;
+ bool tick_offset_migrated;
+ bool migrate_tick_offset;
+
+ uint32_t mr;
+ uint32_t lr;
+ uint32_t cr;
+ uint32_t im;
+ uint32_t is;
+};
+
+#endif
diff --git a/include/hw/rtc/sun4v-rtc.h b/include/hw/rtc/sun4v-rtc.h
new file mode 100644
index 0000000000..26a9eb6196
--- /dev/null
+++ b/include/hw/rtc/sun4v-rtc.h
@@ -0,0 +1,19 @@
+/*
+ * QEMU sun4v Real Time Clock device
+ *
+ * The sun4v_rtc device (sun4v tod clock)
+ *
+ * Copyright (c) 2016 Artyom Tarasenko
+ *
+ * This code is licensed under the GNU GPL v2 or (at your option) any later
+ * version.
+ */
+
+#ifndef HW_RTC_SUN4V_RTC_H
+#define HW_RTC_SUN4V_RTC_H
+
+#include "exec/hwaddr.h"
+
+void sun4v_rtc_init(hwaddr addr);
+
+#endif
diff --git a/include/hw/timer/xlnx-zynqmp-rtc.h b/include/hw/rtc/xlnx-zynqmp-rtc.h
index 5ba4d8bc4a..f0c6a2d78a 100644
--- a/include/hw/timer/xlnx-zynqmp-rtc.h
+++ b/include/hw/rtc/xlnx-zynqmp-rtc.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2017 Xilinx Inc.
*
- * Written-by: Alistair Francis <alistair.francis@xilinx.com>
+ * Written-by: Alistair Francis
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -24,12 +24,16 @@
* THE SOFTWARE.
*/
+#ifndef HW_RTC_XLNX_ZYNQMP_RTC_H
+#define HW_RTC_XLNX_ZYNQMP_RTC_H
+
#include "hw/register.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_XLNX_ZYNQMP_RTC "xlnx-zynmp.rtc"
-#define XLNX_ZYNQMP_RTC(obj) \
- OBJECT_CHECK(XlnxZynqMPRTC, (obj), TYPE_XLNX_ZYNQMP_RTC)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPRTC, XLNX_ZYNQMP_RTC)
REG32(SET_TIME_WRITE, 0x0)
REG32(SET_TIME_READ, 0x4)
@@ -73,7 +77,7 @@ REG32(SAFETY_CHK, 0x50)
#define XLNX_ZYNQMP_RTC_R_MAX (R_SAFETY_CHK + 1)
-typedef struct XlnxZynqMPRTC {
+struct XlnxZynqMPRTC {
SysBusDevice parent_obj;
MemoryRegion iomem;
qemu_irq irq_rtc_int;
@@ -83,4 +87,6 @@ typedef struct XlnxZynqMPRTC {
uint32_t regs[XLNX_ZYNQMP_RTC_R_MAX];
RegisterInfo regs_info[XLNX_ZYNQMP_RTC_R_MAX];
-} XlnxZynqMPRTC;
+};
+
+#endif
diff --git a/include/hw/rx/rx62n.h b/include/hw/rx/rx62n.h
new file mode 100644
index 0000000000..766fe0e435
--- /dev/null
+++ b/include/hw/rx/rx62n.h
@@ -0,0 +1,77 @@
+/*
+ * RX62N MCU Object
+ *
+ * Datasheet: RX62N Group, RX621 Group User's Manual: Hardware
+ * (Rev.1.40 R01UH0033EJ0140)
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RX_RX62N_H
+#define HW_RX_RX62N_H
+
+#include "target/rx/cpu.h"
+#include "hw/intc/rx_icu.h"
+#include "hw/timer/renesas_tmr.h"
+#include "hw/timer/renesas_cmt.h"
+#include "hw/char/renesas_sci.h"
+#include "qom/object.h"
+
+#define TYPE_RX62N_MCU "rx62n-mcu"
+typedef struct RX62NState RX62NState;
+DECLARE_INSTANCE_CHECKER(RX62NState, RX62N_MCU,
+ TYPE_RX62N_MCU)
+
+#define TYPE_R5F562N7_MCU "r5f562n7-mcu"
+#define TYPE_R5F562N8_MCU "r5f562n8-mcu"
+
+#define EXT_CS_BASE 0x01000000
+#define VECTOR_TABLE_BASE 0xffffff80
+#define RX62N_CFLASH_BASE 0xfff80000
+
+#define RX62N_NR_TMR 2
+#define RX62N_NR_CMT 2
+#define RX62N_NR_SCI 6
+
+struct RX62NState {
+ /*< private >*/
+ DeviceState parent_obj;
+ /*< public >*/
+
+ RXCPU cpu;
+ RXICUState icu;
+ RTMRState tmr[RX62N_NR_TMR];
+ RCMTState cmt[RX62N_NR_CMT];
+ RSCIState sci[RX62N_NR_SCI];
+
+ MemoryRegion *sysmem;
+ bool kernel;
+
+ MemoryRegion iram;
+ MemoryRegion iomem1;
+ MemoryRegion d_flash;
+ MemoryRegion iomem2;
+ MemoryRegion iomem3;
+ MemoryRegion c_flash;
+
+ /* Input Clock (XTAL) frequency */
+ uint32_t xtal_freq_hz;
+ /* Peripheral Module Clock frequency */
+ uint32_t pclk_freq_hz;
+};
+
+#endif
diff --git a/include/hw/s390x/3270-ccw.h b/include/hw/s390x/3270-ccw.h
index 9d1d18e2bd..1439882294 100644
--- a/include/hw/s390x/3270-ccw.h
+++ b/include/hw/s390x/3270-ccw.h
@@ -16,6 +16,7 @@
#include "hw/sysbus.h"
#include "hw/s390x/css.h"
#include "hw/s390x/ccw-device.h"
+#include "qom/object.h"
#define EMULATED_CCW_3270_CU_TYPE 0x3270
#define EMULATED_CCW_3270_CHPID_TYPE 0x1a
@@ -30,23 +31,18 @@
#define TC_EWRITEA 0x0d /* Erase write alternate */
#define TC_WRITESF 0x11 /* Write structured field */
-#define EMULATED_CCW_3270(obj) \
- OBJECT_CHECK(EmulatedCcw3270Device, (obj), TYPE_EMULATED_CCW_3270)
-#define EMULATED_CCW_3270_CLASS(klass) \
- OBJECT_CLASS_CHECK(EmulatedCcw3270Class, (klass), TYPE_EMULATED_CCW_3270)
-#define EMULATED_CCW_3270_GET_CLASS(obj) \
- OBJECT_GET_CLASS(EmulatedCcw3270Class, (obj), TYPE_EMULATED_CCW_3270)
+OBJECT_DECLARE_TYPE(EmulatedCcw3270Device, EmulatedCcw3270Class, EMULATED_CCW_3270)
-typedef struct EmulatedCcw3270Device {
+struct EmulatedCcw3270Device {
CcwDevice parent_obj;
-} EmulatedCcw3270Device;
+};
-typedef struct EmulatedCcw3270Class {
+struct EmulatedCcw3270Class {
CCWDeviceClass parent_class;
void (*init)(EmulatedCcw3270Device *, Error **);
int (*read_payload_3270)(EmulatedCcw3270Device *);
int (*write_payload_3270)(EmulatedCcw3270Device *, uint8_t);
-} EmulatedCcw3270Class;
+};
#endif
diff --git a/include/hw/s390x/ap-device.h b/include/hw/s390x/ap-device.h
index 765e9082a3..e502745de5 100644
--- a/include/hw/s390x/ap-device.h
+++ b/include/hw/s390x/ap-device.h
@@ -7,16 +7,21 @@
* your option) any later version. See the COPYING file in the top-level
* directory.
*/
+
#ifndef HW_S390X_AP_DEVICE_H
#define HW_S390X_AP_DEVICE_H
-#define AP_DEVICE_TYPE "ap-device"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
+
+#define TYPE_AP_DEVICE "ap-device"
-typedef struct APDevice {
+struct APDevice {
DeviceState parent_obj;
-} APDevice;
+};
+typedef struct APDevice APDevice;
-#define AP_DEVICE(obj) \
- OBJECT_CHECK(APDevice, (obj), AP_DEVICE_TYPE)
+DECLARE_INSTANCE_CHECKER(APDevice, AP_DEVICE,
+ TYPE_AP_DEVICE)
#endif /* HW_S390X_AP_DEVICE_H */
diff --git a/include/hw/s390x/cpu-topology.h b/include/hw/s390x/cpu-topology.h
new file mode 100644
index 0000000000..c064f427e9
--- /dev/null
+++ b/include/hw/s390x/cpu-topology.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CPU Topology
+ *
+ * Copyright IBM Corp. 2022, 2023
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+#ifndef HW_S390X_CPU_TOPOLOGY_H
+#define HW_S390X_CPU_TOPOLOGY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include "qemu/queue.h"
+#include "hw/boards.h"
+#include "qapi/qapi-types-machine-target.h"
+
+#define S390_TOPOLOGY_CPU_IFL 0x03
+
+typedef struct S390TopologyId {
+ uint8_t sentinel;
+ uint8_t drawer;
+ uint8_t book;
+ uint8_t socket;
+ uint8_t type;
+ uint8_t vertical:1;
+ uint8_t entitlement:2;
+ uint8_t dedicated;
+ uint8_t origin;
+} S390TopologyId;
+
+typedef struct S390TopologyEntry {
+ QTAILQ_ENTRY(S390TopologyEntry) next;
+ S390TopologyId id;
+ uint64_t mask;
+} S390TopologyEntry;
+
+typedef struct S390Topology {
+ uint8_t *cores_per_socket;
+ CpuS390Polarization polarization;
+} S390Topology;
+
+typedef QTAILQ_HEAD(, S390TopologyEntry) S390TopologyList;
+
+#ifdef CONFIG_KVM
+bool s390_has_topology(void);
+void s390_topology_setup_cpu(MachineState *ms, S390CPU *cpu, Error **errp);
+void s390_topology_reset(void);
+#else
+static inline bool s390_has_topology(void)
+{
+ return false;
+}
+static inline void s390_topology_setup_cpu(MachineState *ms,
+ S390CPU *cpu,
+ Error **errp) {}
+static inline void s390_topology_reset(void)
+{
+ /* Unreachable, CPU topology not implemented for TCG */
+ assert(false);
+}
+#endif
+
+extern S390Topology s390_topology;
+
+static inline int s390_std_socket(int n, CpuTopology *smp)
+{
+ return (n / smp->cores) % smp->sockets;
+}
+
+static inline int s390_std_book(int n, CpuTopology *smp)
+{
+ return (n / (smp->cores * smp->sockets)) % smp->books;
+}
+
+static inline int s390_std_drawer(int n, CpuTopology *smp)
+{
+ return (n / (smp->cores * smp->sockets * smp->books)) % smp->drawers;
+}
+
+#endif /* CONFIG_USER_ONLY */
+
+#endif
diff --git a/include/hw/s390x/css-bridge.h b/include/hw/s390x/css-bridge.h
index 5a0203be5f..deb606d71f 100644
--- a/include/hw/s390x/css-bridge.h
+++ b/include/hw/s390x/css-bridge.h
@@ -12,27 +12,26 @@
#ifndef HW_S390X_CSS_BRIDGE_H
#define HW_S390X_CSS_BRIDGE_H
+
#include "qom/object.h"
-#include "hw/qdev-core.h"
+#include "hw/sysbus.h"
/* virtual css bridge */
-typedef struct VirtualCssBridge {
+struct VirtualCssBridge {
SysBusDevice sysbus_dev;
bool css_dev_path;
-} VirtualCssBridge;
+};
#define TYPE_VIRTUAL_CSS_BRIDGE "virtual-css-bridge"
-#define VIRTUAL_CSS_BRIDGE(obj) \
- OBJECT_CHECK(VirtualCssBridge, (obj), TYPE_VIRTUAL_CSS_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtualCssBridge, VIRTUAL_CSS_BRIDGE)
/* virtual css bus type */
-typedef struct VirtualCssBus {
+struct VirtualCssBus {
BusState parent_obj;
-} VirtualCssBus;
+};
#define TYPE_VIRTUAL_CSS_BUS "virtual-css-bus"
-#define VIRTUAL_CSS_BUS(obj) \
- OBJECT_CHECK(VirtualCssBus, (obj), TYPE_VIRTUAL_CSS_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtualCssBus, VIRTUAL_CSS_BUS)
VirtualCssBus *virtual_css_bus_init(void);
#endif
diff --git a/include/hw/s390x/css.h b/include/hw/s390x/css.h
index aae19c4272..ba72ee3dd2 100644
--- a/include/hw/s390x/css.h
+++ b/include/hw/s390x/css.h
@@ -12,11 +12,11 @@
#ifndef CSS_H
#define CSS_H
-#include "cpu.h"
#include "hw/s390x/adapter.h"
#include "hw/s390x/s390_flic.h"
#include "hw/s390x/ioinst.h"
#include "sysemu/kvm.h"
+#include "target/s390x/cpu-qom.h"
/* Channel subsystem constants. */
#define MAX_DEVNO 65535
@@ -97,6 +97,7 @@ typedef struct CcwDataStream {
int (*op_handler)(struct CcwDataStream *cds, void *buff, int len,
CcwDataStreamOp op);
hwaddr cda;
+ bool do_skip;
} CcwDataStream;
/*
@@ -131,19 +132,22 @@ struct SubchDev {
bool ccw_fmt_1;
bool thinint_active;
uint8_t ccw_no_data_cnt;
- uint16_t migrated_schid; /* used for missmatch detection */
+ uint16_t migrated_schid; /* used for mismatch detection */
CcwDataStream cds;
/* transport-provided data: */
int (*ccw_cb) (SubchDev *, CCW1);
void (*disable_cb)(SubchDev *);
IOInstEnding (*do_subchannel_work) (SubchDev *);
+ void (*irb_cb)(SubchDev *, IRB *);
SenseId id;
void *driver_data;
+ ESW esw;
};
static inline void sch_gen_unit_exception(SubchDev *sch)
{
- sch->curr_status.scsw.ctrl &= ~SCSW_ACTL_START_PEND;
+ sch->curr_status.scsw.ctrl &= ~(SCSW_ACTL_DEVICE_ACTIVE |
+ SCSW_ACTL_SUBCH_ACTIVE);
sch->curr_status.scsw.ctrl |= SCSW_STCTL_PRIMARY |
SCSW_STCTL_SECONDARY |
SCSW_STCTL_ALERT |
@@ -200,9 +204,11 @@ int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id);
unsigned int css_find_free_chpid(uint8_t cssid);
uint16_t css_build_subchannel_id(SubchDev *sch);
void copy_scsw_to_guest(SCSW *dest, const SCSW *src);
+void copy_esw_to_guest(ESW *dest, const ESW *src);
void css_inject_io_interrupt(SubchDev *sch);
void css_reset(void);
void css_reset_sch(SubchDev *sch);
+void css_crw_add_to_queue(CRW crw);
void css_queue_crw(uint8_t rsc, uint8_t erc, int solicited,
int chain, uint16_t rsid);
void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
@@ -213,6 +219,12 @@ void css_clear_sei_pending(void);
IOInstEnding s390_ccw_cmd_request(SubchDev *sch);
IOInstEnding do_subchannel_work_virtual(SubchDev *sub);
IOInstEnding do_subchannel_work_passthrough(SubchDev *sub);
+void build_irb_passthrough(SubchDev *sch, IRB *irb);
+void build_irb_virtual(SubchDev *sch, IRB *irb);
+
+int s390_ccw_halt(SubchDev *sch);
+int s390_ccw_clear(SubchDev *sch);
+IOInstEnding s390_ccw_store(SubchDev *sch);
typedef enum {
CSS_IO_ADAPTER_VIRTIO = 0,
@@ -221,23 +233,17 @@ typedef enum {
} CssIoAdapterType;
void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc);
-int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode);
+int css_do_sic(S390CPU *cpu, uint8_t isc, uint16_t mode);
uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc);
void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
uint8_t flags, Error **errp);
-#ifndef CONFIG_KVM
-#define S390_ADAPTER_SUPPRESSIBLE 0x01
-#else
-#define S390_ADAPTER_SUPPRESSIBLE KVM_S390_ADAPTER_SUPPRESSIBLE
-#endif
-
#ifndef CONFIG_USER_ONLY
SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
uint16_t schid);
bool css_subch_visible(SubchDev *sch);
void css_conditional_io_interrupt(SubchDev *sch);
-int css_do_stsch(SubchDev *sch, SCHIB *schib);
+IOInstEnding css_do_stsch(SubchDev *sch, SCHIB *schib);
bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid);
IOInstEnding css_do_msch(SubchDev *sch, const SCHIB *schib);
IOInstEnding css_do_xsch(SubchDev *sch);
diff --git a/include/hw/s390x/event-facility.h b/include/hw/s390x/event-facility.h
index 6cf71cec38..3ffd575d8f 100644
--- a/include/hw/s390x/event-facility.h
+++ b/include/hw/s390x/event-facility.h
@@ -15,9 +15,10 @@
#ifndef HW_S390_SCLP_EVENT_FACILITY_H
#define HW_S390_SCLP_EVENT_FACILITY_H
-#include "hw/qdev.h"
#include "qemu/thread.h"
+#include "hw/qdev-core.h"
#include "hw/s390x/sclp.h"
+#include "qom/object.h"
/* SCLP event types */
#define SCLP_EVENT_OPRTNS_COMMAND 0x01
@@ -41,12 +42,8 @@
#define SCLP_SELECTIVE_READ 0x01
#define TYPE_SCLP_EVENT "s390-sclp-event-type"
-#define SCLP_EVENT(obj) \
- OBJECT_CHECK(SCLPEvent, (obj), TYPE_SCLP_EVENT)
-#define SCLP_EVENT_CLASS(klass) \
- OBJECT_CLASS_CHECK(SCLPEventClass, (klass), TYPE_SCLP_EVENT)
-#define SCLP_EVENT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SCLPEventClass, (obj), TYPE_SCLP_EVENT)
+OBJECT_DECLARE_TYPE(SCLPEvent, SCLPEventClass,
+ SCLP_EVENT)
#define TYPE_SCLP_CPU_HOTPLUG "sclp-cpu-hotplug"
#define TYPE_SCLP_QUIESCE "sclpquiesce"
@@ -122,7 +119,7 @@ typedef struct MDBO {
typedef struct MDB {
MdbHeader header;
- MDBO mdbo[0];
+ MDBO mdbo[];
} QEMU_PACKED MDB;
typedef struct SclpMsg {
@@ -169,13 +166,13 @@ typedef struct ReadEventData {
};
} QEMU_PACKED ReadEventData;
-typedef struct SCLPEvent {
+struct SCLPEvent {
DeviceState qdev;
bool event_pending;
char *name;
-} SCLPEvent;
+};
-typedef struct SCLPEventClass {
+struct SCLPEventClass {
DeviceClass parent_class;
int (*init)(SCLPEvent *event);
@@ -192,23 +189,19 @@ typedef struct SCLPEventClass {
/* can we handle this event type? */
bool (*can_handle_event)(uint8_t type);
-} SCLPEventClass;
+};
#define TYPE_SCLP_EVENT_FACILITY "s390-sclp-event-facility"
-#define EVENT_FACILITY(obj) \
- OBJECT_CHECK(SCLPEventFacility, (obj), TYPE_SCLP_EVENT_FACILITY)
-#define EVENT_FACILITY_CLASS(klass) \
- OBJECT_CLASS_CHECK(SCLPEventFacilityClass, (klass), \
- TYPE_SCLP_EVENT_FACILITY)
-#define EVENT_FACILITY_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SCLPEventFacilityClass, (obj), \
- TYPE_SCLP_EVENT_FACILITY)
-
-typedef struct SCLPEventFacilityClass {
+typedef struct SCLPEventFacility SCLPEventFacility;
+typedef struct SCLPEventFacilityClass SCLPEventFacilityClass;
+DECLARE_OBJ_CHECKERS(SCLPEventFacility, SCLPEventFacilityClass,
+ EVENT_FACILITY, TYPE_SCLP_EVENT_FACILITY)
+
+struct SCLPEventFacilityClass {
SysBusDeviceClass parent_class;
void (*command_handler)(SCLPEventFacility *ef, SCCB *sccb, uint64_t code);
bool (*event_pending)(SCLPEventFacility *ef);
-} SCLPEventFacilityClass;
+};
BusState *sclp_get_event_facility_bus(void);
diff --git a/include/hw/s390x/ioinst.h b/include/hw/s390x/ioinst.h
index c6737a30d4..ea8d0f2444 100644
--- a/include/hw/s390x/ioinst.h
+++ b/include/hw/s390x/ioinst.h
@@ -107,7 +107,7 @@ QEMU_BUILD_BUG_MSG(sizeof(PMCW) != 28, "size of PMCW is wrong");
#define PMCW_FLAGS_MASK_MP 0x0004
#define PMCW_FLAGS_MASK_TF 0x0002
#define PMCW_FLAGS_MASK_DNV 0x0001
-#define PMCW_FLAGS_MASK_INVALID 0x0700
+#define PMCW_FLAGS_MASK_INVALID 0xc300
#define PMCW_CHARS_MASK_ST 0x00e00000
#define PMCW_CHARS_MASK_MBFC 0x00000004
@@ -123,10 +123,20 @@ typedef struct SCHIB {
uint8_t mda[4];
} QEMU_PACKED SCHIB;
+/* format-0 extended-status word */
+typedef struct ESW {
+ uint32_t word0; /* subchannel logout for format 0 */
+ uint32_t erw;
+ uint64_t word2; /* failing-storage address for format 0 */
+ uint32_t word4; /* secondary-CCW address for format 0 */
+} QEMU_PACKED ESW;
+
+#define ESW_ERW_SENSE 0x01000000
+
/* interruption response block */
typedef struct IRB {
SCSW scsw;
- uint32_t esw[5];
+ ESW esw;
uint32_t ecw[8];
uint32_t emw[8];
} IRB;
diff --git a/include/hw/s390x/s390-ccw.h b/include/hw/s390x/s390-ccw.h
index 7d15a1a5d4..2c807ee3a1 100644
--- a/include/hw/s390x/s390-ccw.h
+++ b/include/hw/s390x/s390-ccw.h
@@ -14,26 +14,29 @@
#define HW_S390_CCW_H
#include "hw/s390x/ccw-device.h"
+#include "qom/object.h"
#define TYPE_S390_CCW "s390-ccw"
-#define S390_CCW_DEVICE(obj) \
- OBJECT_CHECK(S390CCWDevice, (obj), TYPE_S390_CCW)
-#define S390_CCW_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390CCWDeviceClass, (klass), TYPE_S390_CCW)
-#define S390_CCW_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390CCWDeviceClass, (obj), TYPE_S390_CCW)
+typedef struct S390CCWDevice S390CCWDevice;
+typedef struct S390CCWDeviceClass S390CCWDeviceClass;
+DECLARE_OBJ_CHECKERS(S390CCWDevice, S390CCWDeviceClass,
+ S390_CCW_DEVICE, TYPE_S390_CCW)
-typedef struct S390CCWDevice {
+struct S390CCWDevice {
CcwDevice parent_obj;
CssDevId hostid;
char *mdevid;
-} S390CCWDevice;
+ int32_t bootindex;
+};
-typedef struct S390CCWDeviceClass {
+struct S390CCWDeviceClass {
CCWDeviceClass parent_class;
void (*realize)(S390CCWDevice *dev, char *sysfsdev, Error **errp);
- void (*unrealize)(S390CCWDevice *dev, Error **errp);
+ void (*unrealize)(S390CCWDevice *dev);
IOInstEnding (*handle_request) (SubchDev *sch);
-} S390CCWDeviceClass;
+ int (*handle_halt) (SubchDev *sch);
+ int (*handle_clear) (SubchDev *sch);
+ IOInstEnding (*handle_store) (SubchDev *sch);
+};
#endif
diff --git a/include/hw/s390x/s390-pci-bus.h b/include/hw/s390x/s390-pci-bus.h
new file mode 100644
index 0000000000..2c43ea123f
--- /dev/null
+++ b/include/hw/s390x/s390-pci-bus.h
@@ -0,0 +1,406 @@
+/*
+ * s390 PCI BUS definitions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_BUS_H
+#define HW_S390_PCI_BUS_H
+
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_host.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/s390_flic.h"
+#include "hw/s390x/css.h"
+#include "hw/s390x/s390-pci-clp.h"
+#include "qom/object.h"
+
+#define TYPE_S390_PCI_HOST_BRIDGE "s390-pcihost"
+#define TYPE_S390_PCI_BUS "s390-pcibus"
+#define TYPE_S390_PCI_DEVICE "zpci"
+#define TYPE_S390_PCI_IOMMU "s390-pci-iommu"
+#define TYPE_S390_IOMMU_MEMORY_REGION "s390-iommu-memory-region"
+#define FH_MASK_ENABLE 0x80000000
+#define FH_MASK_INSTANCE 0x7f000000
+#define FH_MASK_SHM 0x00ff0000
+#define FH_MASK_INDEX 0x0000ffff
+#define FH_SHM_VFIO 0x00010000
+#define FH_SHM_EMUL 0x00020000
+#define ZPCI_MAX_FID 0xffffffff
+#define ZPCI_MAX_UID 0xffff
+#define UID_UNDEFINED 0
+#define UID_CHECKING_ENABLED 0x01
+#define ZPCI_DTSM 0x40
+
+/* zPCI Function Types */
+#define ZPCI_PFT_ISM 5
+
+OBJECT_DECLARE_SIMPLE_TYPE(S390pciState, S390_PCI_HOST_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(S390PCIBus, S390_PCI_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(S390PCIBusDevice, S390_PCI_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(S390PCIIOMMU, S390_PCI_IOMMU)
+
+#define HP_EVENT_TO_CONFIGURED 0x0301
+#define HP_EVENT_RESERVED_TO_STANDBY 0x0302
+#define HP_EVENT_DECONFIGURE_REQUEST 0x0303
+#define HP_EVENT_CONFIGURED_TO_STBRES 0x0304
+#define HP_EVENT_STANDBY_TO_RESERVED 0x0308
+
+#define ERR_EVENT_INVALAS 0x1
+#define ERR_EVENT_OORANGE 0x2
+#define ERR_EVENT_INVALTF 0x3
+#define ERR_EVENT_TPROTE 0x4
+#define ERR_EVENT_APROTE 0x5
+#define ERR_EVENT_KEYE 0x6
+#define ERR_EVENT_INVALTE 0x7
+#define ERR_EVENT_INVALTL 0x8
+#define ERR_EVENT_TT 0x9
+#define ERR_EVENT_INVALMS 0xa
+#define ERR_EVENT_SERR 0xb
+#define ERR_EVENT_NOMSI 0x10
+#define ERR_EVENT_INVALBV 0x11
+#define ERR_EVENT_AIBV 0x12
+#define ERR_EVENT_AIRERR 0x13
+#define ERR_EVENT_FMBA 0x2a
+#define ERR_EVENT_FMBUP 0x2b
+#define ERR_EVENT_FMBPRO 0x2c
+#define ERR_EVENT_CCONF 0x30
+#define ERR_EVENT_SERVAC 0x3a
+#define ERR_EVENT_PERMERR 0x3b
+
+#define ERR_EVENT_Q_BIT 0x2
+#define ERR_EVENT_MVN_OFFSET 16
+
+#define ZPCI_MSI_VEC_BITS 11
+#define ZPCI_MSI_VEC_MASK 0x7ff
+
+#define ZPCI_MSI_ADDR 0xfe00000000000000ULL
+#define ZPCI_SDMA_ADDR 0x100000000ULL
+#define ZPCI_EDMA_ADDR 0x1ffffffffffffffULL
+
+#define PAGE_DEFAULT_ACC 0
+#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
+
+/* I/O Translation Anchor (IOTA) */
+enum ZpciIoatDtype {
+ ZPCI_IOTA_STO = 0,
+ ZPCI_IOTA_RTTO = 1,
+ ZPCI_IOTA_RSTO = 2,
+ ZPCI_IOTA_RFTO = 3,
+ ZPCI_IOTA_PFAA = 4,
+ ZPCI_IOTA_IOPFAA = 5,
+ ZPCI_IOTA_IOPTO = 7
+};
+
+#define ZPCI_IOTA_IOT_ENABLED 0x800ULL
+#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
+#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
+#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
+#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
+#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
+#define ZPCI_IOTA_FS_4K 0
+#define ZPCI_IOTA_FS_1M 1
+#define ZPCI_IOTA_FS_2G 2
+#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
+
+#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
+#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
+#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
+#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
+#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY |\
+ ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
+
+/* I/O Region and segment tables */
+#define ZPCI_INDEX_MASK 0x7ffULL
+
+#define ZPCI_TABLE_TYPE_MASK 0xc
+#define ZPCI_TABLE_TYPE_RFX 0xc
+#define ZPCI_TABLE_TYPE_RSX 0x8
+#define ZPCI_TABLE_TYPE_RTX 0x4
+#define ZPCI_TABLE_TYPE_SX 0x0
+
+#define ZPCI_TABLE_LEN_RFX 0x3
+#define ZPCI_TABLE_LEN_RSX 0x3
+#define ZPCI_TABLE_LEN_RTX 0x3
+
+#define ZPCI_TABLE_OFFSET_MASK 0xc0
+#define ZPCI_TABLE_SIZE 0x4000
+#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
+#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
+#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+
+#define ZPCI_TABLE_BITS 11
+#define ZPCI_PT_BITS 8
+#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + TARGET_PAGE_BITS)
+#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
+
+#define ZPCI_RTE_FLAG_MASK 0x3fffULL
+#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
+#define ZPCI_STE_FLAG_MASK 0x7ffULL
+#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
+
+#define ZPCI_SFAA_MASK (~((1ULL << 20) - 1))
+
+/* I/O Page tables */
+#define ZPCI_PTE_VALID_MASK 0x400
+#define ZPCI_PTE_INVALID 0x400
+#define ZPCI_PTE_VALID 0x000
+#define ZPCI_PT_SIZE 0x800
+#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
+#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
+
+#define ZPCI_PTE_FLAG_MASK 0xfffULL
+#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
+
+/* Shared bits */
+#define ZPCI_TABLE_VALID 0x00
+#define ZPCI_TABLE_INVALID 0x20
+#define ZPCI_TABLE_PROTECTED 0x200
+#define ZPCI_TABLE_UNPROTECTED 0x000
+#define ZPCI_TABLE_FC 0x400
+
+#define ZPCI_TABLE_VALID_MASK 0x20
+#define ZPCI_TABLE_PROT_MASK 0x200
+
+#define ZPCI_ETT_RT 1
+#define ZPCI_ETT_ST 0
+#define ZPCI_ETT_PT -1
+
+/* PCI Function States
+ *
+ * reserved: default; device has just been plugged or is in progress of being
+ * unplugged
+ * standby: device is present but not configured; transition from any
+ * configured state/to this state via sclp configure/deconfigure
+ *
+ * The following states make up the "configured" meta-state:
+ * disabled: device is configured but not enabled; transition between this
+ * state and enabled via clp enable/disable
+ * enabled: device is ready for use; transition to disabled via clp disable;
+ * may enter an error state
+ * blocked: ignore all DMA and interrupts; transition back to enabled or from
+ * error state via mpcifc
+ * error: an error occurred; transition back to enabled via mpcifc
+ * permanent error: an unrecoverable error occurred; transition to standby via
+ * sclp deconfigure
+ */
+typedef enum {
+ ZPCI_FS_RESERVED,
+ ZPCI_FS_STANDBY,
+ ZPCI_FS_DISABLED,
+ ZPCI_FS_ENABLED,
+ ZPCI_FS_BLOCKED,
+ ZPCI_FS_ERROR,
+ ZPCI_FS_PERMANENT_ERROR,
+} ZpciState;
+
+typedef struct SeiContainer {
+ QTAILQ_ENTRY(SeiContainer) link;
+ uint32_t fid;
+ uint32_t fh;
+ uint8_t cc;
+ uint16_t pec;
+ uint64_t faddr;
+ uint32_t e;
+} SeiContainer;
+
+typedef struct PciCcdfErr {
+ uint32_t reserved1;
+ uint32_t fh;
+ uint32_t fid;
+ uint32_t e;
+ uint64_t faddr;
+ uint32_t reserved3;
+ uint16_t reserved4;
+ uint16_t pec;
+} QEMU_PACKED PciCcdfErr;
+
+typedef struct PciCcdfAvail {
+ uint32_t reserved1;
+ uint32_t fh;
+ uint32_t fid;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ uint16_t reserved6;
+ uint16_t pec;
+} QEMU_PACKED PciCcdfAvail;
+
+typedef struct ChscSeiNt2Res {
+ uint16_t length;
+ uint16_t code;
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t nt;
+ uint8_t flags;
+ uint8_t reserved3;
+ uint8_t reserved4;
+ uint8_t cc;
+ uint32_t reserved5[13];
+ uint8_t ccdf[4016];
+} QEMU_PACKED ChscSeiNt2Res;
+
+typedef struct S390MsixInfo {
+ uint8_t table_bar;
+ uint8_t pba_bar;
+ uint16_t entries;
+ uint32_t table_offset;
+ uint32_t pba_offset;
+} S390MsixInfo;
+
+typedef struct S390IOTLBEntry {
+ uint64_t iova;
+ uint64_t translated_addr;
+ uint64_t len;
+ uint64_t perm;
+} S390IOTLBEntry;
+
+typedef struct S390PCIDMACount {
+ int id;
+ int users;
+ uint32_t avail;
+ QTAILQ_ENTRY(S390PCIDMACount) link;
+} S390PCIDMACount;
+
+struct S390PCIIOMMU {
+ Object parent_obj;
+ S390PCIBusDevice *pbdev;
+ AddressSpace as;
+ MemoryRegion mr;
+ IOMMUMemoryRegion iommu_mr;
+ bool enabled;
+ uint64_t g_iota;
+ uint64_t pba;
+ uint64_t pal;
+ uint64_t max_dma_limit;
+ GHashTable *iotlb;
+ S390PCIDMACount *dma_limit;
+};
+
+typedef struct S390PCIIOMMUTable {
+ uint64_t key;
+ S390PCIIOMMU *iommu[PCI_SLOT_MAX];
+} S390PCIIOMMUTable;
+
+/* Function Measurement Block */
+#define DEFAULT_MUI 4000
+#define UPDATE_U_BIT 0x1ULL
+#define FMBK_MASK 0xfULL
+
+typedef struct ZpciFmbFmt0 {
+ uint64_t dma_rbytes;
+ uint64_t dma_wbytes;
+} ZpciFmbFmt0;
+
+#define ZPCI_FMB_CNT_LD 0
+#define ZPCI_FMB_CNT_ST 1
+#define ZPCI_FMB_CNT_STB 2
+#define ZPCI_FMB_CNT_RPCIT 3
+#define ZPCI_FMB_CNT_MAX 4
+
+#define ZPCI_FMB_FORMAT 0
+
+typedef struct ZpciFmb {
+ uint32_t format;
+ uint32_t sample;
+ uint64_t last_update;
+ uint64_t counter[ZPCI_FMB_CNT_MAX];
+ ZpciFmbFmt0 fmt0;
+} ZpciFmb;
+QEMU_BUILD_BUG_MSG(offsetof(ZpciFmb, fmt0) != 48, "padding in ZpciFmb");
+
+#define ZPCI_DEFAULT_FN_GRP 0xFF
+#define ZPCI_SIM_GRP_START 0xF0
+typedef struct S390PCIGroup {
+ ClpRspQueryPciGrp zpci_group;
+ int id;
+ int host_id;
+ QTAILQ_ENTRY(S390PCIGroup) link;
+} S390PCIGroup;
+S390PCIGroup *s390_group_create(int id, int host_id);
+S390PCIGroup *s390_group_find(int id);
+S390PCIGroup *s390_group_find_host_sim(int host_id);
+
+struct S390PCIBusDevice {
+ DeviceState qdev;
+ PCIDevice *pdev;
+ ZpciState state;
+ char *target;
+ uint16_t uid;
+ uint32_t idx;
+ uint32_t fh;
+ uint32_t fid;
+ bool fid_defined;
+ uint64_t fmb_addr;
+ ZpciFmb fmb;
+ QEMUTimer *fmb_timer;
+ uint8_t isc;
+ uint16_t noi;
+ uint16_t maxstbl;
+ uint8_t sum;
+ uint8_t pft;
+ S390PCIGroup *pci_group;
+ ClpRspQueryPci zpci_fn;
+ S390MsixInfo msix;
+ AdapterRoutes routes;
+ S390PCIIOMMU *iommu;
+ MemoryRegion msix_notify_mr;
+ IndAddr *summary_ind;
+ IndAddr *indicator;
+ Notifier shutdown_notifier;
+ bool pci_unplug_request_processed;
+ bool unplug_requested;
+ bool interp;
+ bool forwarding_assist;
+ bool aif;
+ QTAILQ_ENTRY(S390PCIBusDevice) link;
+};
+
+struct S390PCIBus {
+ BusState qbus;
+};
+
+struct S390pciState {
+ PCIHostState parent_obj;
+ uint32_t next_idx;
+ int bus_no;
+ S390PCIBus *bus;
+ GHashTable *iommu_table;
+ GHashTable *zpci_table;
+ QTAILQ_HEAD(, SeiContainer) pending_sei;
+ QTAILQ_HEAD(, S390PCIBusDevice) zpci_devs;
+ QTAILQ_HEAD(, S390PCIDMACount) zpci_dma_limit;
+ QTAILQ_HEAD(, S390PCIGroup) zpci_groups;
+ uint8_t next_sim_grp;
+};
+
+S390pciState *s390_get_phb(void);
+int pci_chsc_sei_nt2_get_event(void *res);
+int pci_chsc_sei_nt2_have_event(void);
+void s390_pci_sclp_configure(SCCB *sccb);
+void s390_pci_sclp_deconfigure(SCCB *sccb);
+void s390_pci_iommu_enable(S390PCIIOMMU *iommu);
+void s390_pci_iommu_disable(S390PCIIOMMU *iommu);
+void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
+ uint64_t faddr, uint32_t e);
+uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
+ S390IOTLBEntry *entry);
+S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx);
+S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh);
+S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid);
+S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s,
+ const char *target);
+S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s,
+ S390PCIBusDevice *pbdev);
+void s390_pci_ism_reset(void);
+
+#endif
diff --git a/include/hw/s390x/s390-pci-clp.h b/include/hw/s390x/s390-pci-clp.h
new file mode 100644
index 0000000000..03b7f9ba5f
--- /dev/null
+++ b/include/hw/s390x/s390-pci-clp.h
@@ -0,0 +1,216 @@
+/*
+ * s390 CLP instruction definitions
+ *
+ * Copyright 2019 IBM Corp.
+ * Author(s): Pierre Morel <pmorel@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_CLP_H
+#define HW_S390_PCI_CLP_H
+
+/* CLP common request & response block size */
+#define CLP_BLK_SIZE 4096
+#define PCI_BAR_COUNT 6
+#define PCI_MAX_FUNCTIONS 4096
+
+typedef struct ClpReqHdr {
+ uint16_t len;
+ uint16_t cmd;
+} QEMU_PACKED ClpReqHdr;
+
+typedef struct ClpRspHdr {
+ uint16_t len;
+ uint16_t rsp;
+} QEMU_PACKED ClpRspHdr;
+
+/* CLP Response Codes */
+#define CLP_RC_OK 0x0010 /* Command request successfully */
+#define CLP_RC_CMD 0x0020 /* Command code not recognized */
+#define CLP_RC_PERM 0x0030 /* Command not authorized */
+#define CLP_RC_FMT 0x0040 /* Invalid command request format */
+#define CLP_RC_LEN 0x0050 /* Invalid command request length */
+#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
+#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
+#define CLP_RC_NODATA 0x0080 /* No data available */
+#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
+
+/*
+ * Call Logical Processor - Command Codes
+ */
+#define CLP_LIST_PCI 0x0002
+#define CLP_QUERY_PCI_FN 0x0003
+#define CLP_QUERY_PCI_FNGRP 0x0004
+#define CLP_SET_PCI_FN 0x0005
+
+/* PCI function handle list entry */
+typedef struct ClpFhListEntry {
+ uint16_t device_id;
+ uint16_t vendor_id;
+#define CLP_FHLIST_MASK_CONFIG 0x80000000
+ uint32_t config;
+ uint32_t fid;
+ uint32_t fh;
+} QEMU_PACKED ClpFhListEntry;
+
+#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
+#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
+#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
+#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
+#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
+#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
+#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
+#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
+#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
+#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
+
+/* request or response block header length */
+#define LIST_PCI_HDR_LEN 32
+
+/* Number of function handles fitting in response block */
+#define CLP_FH_LIST_NR_ENTRIES \
+ ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
+ / sizeof(ClpFhListEntry))
+
+#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
+#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+
+#define CLP_UTIL_STR_LEN 64
+#define CLP_PFIP_NR_SEGMENTS 4
+
+#define CLP_MASK_FMT 0xf0000000
+
+/* List PCI functions request */
+typedef struct ClpReqListPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint64_t resume_token;
+ uint64_t reserved2;
+} QEMU_PACKED ClpReqListPci;
+
+/* List PCI functions response */
+typedef struct ClpRspListPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint64_t resume_token;
+ uint32_t mdd;
+ uint16_t max_fn;
+ uint8_t flags;
+ uint8_t entry_size;
+ ClpFhListEntry fh_list[CLP_FH_LIST_NR_ENTRIES];
+} QEMU_PACKED ClpRspListPci;
+
+/* Query PCI function request */
+typedef struct ClpReqQueryPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint32_t reserved2;
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqQueryPci;
+
+/* Query PCI function response */
+typedef struct ClpRspQueryPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint16_t vfn; /* virtual fn number */
+#define CLP_RSP_QPCI_MASK_UTIL 0x01
+ uint8_t flags;
+ uint8_t pfgid;
+ uint32_t fid; /* pci function id */
+ uint8_t bar_size[PCI_BAR_COUNT];
+ uint16_t pchid;
+ uint32_t bar[PCI_BAR_COUNT];
+ uint8_t pfip[CLP_PFIP_NR_SEGMENTS];
+ uint16_t reserved2;
+ uint8_t fmbl;
+ uint8_t pft;
+ uint64_t sdma; /* start dma as */
+ uint64_t edma; /* end dma as */
+ uint32_t reserved3[11];
+ uint32_t uid;
+ uint8_t util_str[CLP_UTIL_STR_LEN]; /* utility string */
+} QEMU_PACKED ClpRspQueryPci;
+
+/* Query PCI function group request */
+typedef struct ClpReqQueryPciGrp {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint8_t reserved2[3];
+ uint8_t g;
+ uint32_t reserved3;
+ uint64_t reserved4;
+} QEMU_PACKED ClpReqQueryPciGrp;
+
+/* Query PCI function group response */
+typedef struct ClpRspQueryPciGrp {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+#define CLP_RSP_QPCIG_MASK_NOI 0xfff
+ uint16_t i;
+ uint8_t version;
+#define CLP_RSP_QPCIG_MASK_FRAME 0x2
+#define CLP_RSP_QPCIG_MASK_REFRESH 0x1
+ uint8_t fr;
+ uint16_t maxstbl;
+ uint16_t mui;
+ uint8_t dtsm;
+ uint8_t reserved3[7];
+ uint64_t dasm; /* dma address space mask */
+ uint64_t msia; /* MSI address */
+ uint64_t reserved4;
+ uint64_t reserved5;
+} QEMU_PACKED ClpRspQueryPciGrp;
+
+/* Set PCI function request */
+typedef struct ClpReqSetPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint16_t reserved2;
+ uint8_t oc; /* operation controls */
+ uint8_t ndas; /* number of dma spaces */
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqSetPci;
+
+/* Set PCI function response */
+typedef struct ClpRspSetPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint32_t reserved3;
+ uint64_t reserved4;
+} QEMU_PACKED ClpRspSetPci;
+
+typedef struct ClpReqRspListPci {
+ ClpReqListPci request;
+ ClpRspListPci response;
+} QEMU_PACKED ClpReqRspListPci;
+
+typedef struct ClpReqRspSetPci {
+ ClpReqSetPci request;
+ ClpRspSetPci response;
+} QEMU_PACKED ClpReqRspSetPci;
+
+typedef struct ClpReqRspQueryPci {
+ ClpReqQueryPci request;
+ ClpRspQueryPci response;
+} QEMU_PACKED ClpReqRspQueryPci;
+
+typedef struct ClpReqRspQueryPciGrp {
+ ClpReqQueryPciGrp request;
+ ClpRspQueryPciGrp response;
+} QEMU_PACKED ClpReqRspQueryPciGrp;
+
+#endif
diff --git a/include/hw/s390x/s390-pci-inst.h b/include/hw/s390x/s390-pci-inst.h
new file mode 100644
index 0000000000..a55c448aad
--- /dev/null
+++ b/include/hw/s390x/s390-pci-inst.h
@@ -0,0 +1,119 @@
+/*
+ * s390 PCI instruction definitions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_INST_H
+#define HW_S390_PCI_INST_H
+
+#include "s390-pci-bus.h"
+#include "sysemu/dma.h"
+
+/* Load/Store status codes */
+#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
+#define ZPCI_PCI_ST_FUNC_IN_ERR 8
+#define ZPCI_PCI_ST_BLOCKED 12
+#define ZPCI_PCI_ST_INSUF_RES 16
+#define ZPCI_PCI_ST_INVAL_AS 20
+#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
+#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
+#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
+#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
+#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
+
+/* Load/Store return codes */
+#define ZPCI_PCI_LS_OK 0
+#define ZPCI_PCI_LS_ERR 1
+#define ZPCI_PCI_LS_BUSY 2
+#define ZPCI_PCI_LS_INVAL_HANDLE 3
+
+/* Modify PCI status codes */
+#define ZPCI_MOD_ST_RES_NOT_AVAIL 4
+#define ZPCI_MOD_ST_INSUF_RES 16
+#define ZPCI_MOD_ST_SEQUENCE 24
+#define ZPCI_MOD_ST_DMAAS_INVAL 28
+#define ZPCI_MOD_ST_FRAME_INVAL 32
+#define ZPCI_MOD_ST_ERROR_RECOVER 40
+
+/* Modify PCI Function Controls */
+#define ZPCI_MOD_FC_REG_INT 2
+#define ZPCI_MOD_FC_DEREG_INT 3
+#define ZPCI_MOD_FC_REG_IOAT 4
+#define ZPCI_MOD_FC_DEREG_IOAT 5
+#define ZPCI_MOD_FC_REREG_IOAT 6
+#define ZPCI_MOD_FC_RESET_ERROR 7
+#define ZPCI_MOD_FC_RESET_BLOCK 9
+#define ZPCI_MOD_FC_SET_MEASURE 10
+
+/* Store PCI Function Controls status codes */
+#define ZPCI_STPCIFC_ST_PERM_ERROR 8
+#define ZPCI_STPCIFC_ST_INVAL_DMAAS 28
+#define ZPCI_STPCIFC_ST_ERROR_RECOVER 40
+
+/* Refresh PCI Translations status codes */
+#define ZPCI_RPCIT_ST_INSUFF_RES 16
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* Function Information Block */
+typedef struct ZpciFib {
+ uint8_t fmt; /* format */
+ uint8_t reserved1[7];
+ uint8_t fc; /* function controls */
+ uint8_t reserved2;
+ uint16_t reserved3;
+ uint32_t reserved4;
+ uint64_t pba; /* PCI base address */
+ uint64_t pal; /* PCI address limit */
+ uint64_t iota; /* I/O Translation Anchor */
+#define FIB_DATA_ISC(x) (((x) >> 28) & 0x7)
+#define FIB_DATA_NOI(x) (((x) >> 16) & 0xfff)
+#define FIB_DATA_AIBVO(x) (((x) >> 8) & 0x3f)
+#define FIB_DATA_SUM(x) (((x) >> 7) & 0x1)
+#define FIB_DATA_AISBO(x) ((x) & 0x3f)
+ uint32_t data;
+ uint32_t reserved5;
+ uint64_t aibv; /* Adapter int bit vector address */
+ uint64_t aisb; /* Adapter int summary bit address */
+ uint64_t fmb_addr; /* Function measurement address and key */
+ uint32_t reserved6;
+ uint32_t gd;
+} QEMU_PACKED ZpciFib;
+
+int pci_dereg_irqs(S390PCIBusDevice *pbdev);
+void pci_dereg_ioat(S390PCIIOMMU *iommu);
+int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra);
+int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
+int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
+int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
+int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
+ uint8_t ar, uintptr_t ra);
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
+ uintptr_t ra);
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
+ uintptr_t ra);
+void fmb_timer_free(S390PCIBusDevice *pbdev);
+
+#define ZPCI_IO_BAR_MIN 0
+#define ZPCI_IO_BAR_MAX 5
+#define ZPCI_CONFIG_BAR 15
+
+#endif
diff --git a/include/hw/s390x/s390-pci-kvm.h b/include/hw/s390x/s390-pci-kvm.h
new file mode 100644
index 0000000000..933814a402
--- /dev/null
+++ b/include/hw/s390x/s390-pci-kvm.h
@@ -0,0 +1,38 @@
+/*
+ * s390 PCI KVM interfaces
+ *
+ * Copyright 2022 IBM Corp.
+ * Author(s): Matthew Rosato <mjrosato@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_KVM_H
+#define HW_S390_PCI_KVM_H
+
+#include "hw/s390x/s390-pci-bus.h"
+#include "hw/s390x/s390-pci-inst.h"
+
+#ifdef CONFIG_KVM
+bool s390_pci_kvm_interp_allowed(void);
+int s390_pci_kvm_aif_enable(S390PCIBusDevice *pbdev, ZpciFib *fib, bool assist);
+int s390_pci_kvm_aif_disable(S390PCIBusDevice *pbdev);
+#else
+static inline bool s390_pci_kvm_interp_allowed(void)
+{
+ return false;
+}
+static inline int s390_pci_kvm_aif_enable(S390PCIBusDevice *pbdev, ZpciFib *fib,
+ bool assist)
+{
+ return -EINVAL;
+}
+static inline int s390_pci_kvm_aif_disable(S390PCIBusDevice *pbdev)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/hw/s390x/s390-pci-vfio.h b/include/hw/s390x/s390-pci-vfio.h
new file mode 100644
index 0000000000..ae1b126ff7
--- /dev/null
+++ b/include/hw/s390x/s390-pci-vfio.h
@@ -0,0 +1,44 @@
+/*
+ * s390 vfio-pci interfaces
+ *
+ * Copyright 2020 IBM Corp.
+ * Author(s): Matthew Rosato <mjrosato@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_VFIO_H
+#define HW_S390_PCI_VFIO_H
+
+#include "hw/s390x/s390-pci-bus.h"
+#include CONFIG_DEVICES
+
+#ifdef CONFIG_VFIO
+bool s390_pci_update_dma_avail(int fd, unsigned int *avail);
+S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s,
+ S390PCIBusDevice *pbdev);
+void s390_pci_end_dma_count(S390pciState *s, S390PCIDMACount *cnt);
+bool s390_pci_get_host_fh(S390PCIBusDevice *pbdev, uint32_t *fh);
+void s390_pci_get_clp_info(S390PCIBusDevice *pbdev);
+#else
+static inline bool s390_pci_update_dma_avail(int fd, unsigned int *avail)
+{
+ return false;
+}
+static inline S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s,
+ S390PCIBusDevice *pbdev)
+{
+ return NULL;
+}
+static inline void s390_pci_end_dma_count(S390pciState *s,
+ S390PCIDMACount *cnt) { }
+static inline bool s390_pci_get_host_fh(S390PCIBusDevice *pbdev, uint32_t *fh)
+{
+ return false;
+}
+static inline void s390_pci_get_clp_info(S390PCIBusDevice *pbdev) { }
+#endif
+
+#endif
diff --git a/include/hw/s390x/s390-virtio-ccw.h b/include/hw/s390x/s390-virtio-ccw.h
index 8aa27199c9..c1d46e78af 100644
--- a/include/hw/s390x/s390-virtio-ccw.h
+++ b/include/hw/s390x/s390-virtio-ccw.h
@@ -12,26 +12,31 @@
#define HW_S390X_S390_VIRTIO_CCW_H
#include "hw/boards.h"
+#include "qom/object.h"
#define TYPE_S390_CCW_MACHINE "s390-ccw-machine"
-#define S390_CCW_MACHINE(obj) \
- OBJECT_CHECK(S390CcwMachineState, (obj), TYPE_S390_CCW_MACHINE)
+OBJECT_DECLARE_TYPE(S390CcwMachineState, S390CcwMachineClass, S390_CCW_MACHINE)
-#define S390_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390CcwMachineClass, (klass), TYPE_S390_CCW_MACHINE)
-typedef struct S390CcwMachineState {
+struct S390CcwMachineState {
/*< private >*/
MachineState parent_obj;
/*< public >*/
bool aes_key_wrap;
bool dea_key_wrap;
+ bool pv;
uint8_t loadparm[8];
-} S390CcwMachineState;
+};
-typedef struct S390CcwMachineClass {
+#define S390_PTF_REASON_NONE (0x00 << 8)
+#define S390_PTF_REASON_DONE (0x01 << 8)
+#define S390_PTF_REASON_BUSY (0x02 << 8)
+#define S390_TOPO_FC_MASK 0xffUL
+void s390_handle_ptf(S390CPU *cpu, uint8_t r1, uintptr_t ra);
+
+struct S390CcwMachineClass {
/*< private >*/
MachineClass parent_class;
@@ -40,7 +45,8 @@ typedef struct S390CcwMachineClass {
bool cpu_model_allowed;
bool css_migration_enabled;
bool hpage_1m_allowed;
-} S390CcwMachineClass;
+ int max_threads;
+};
/* runtime-instrumentation allowed by the machine */
bool ri_allowed(void);
diff --git a/include/hw/s390x/s390_flic.h b/include/hw/s390x/s390_flic.h
index 4687ecfe83..3907a13d07 100644
--- a/include/hw/s390x/s390_flic.h
+++ b/include/hw/s390x/s390_flic.h
@@ -17,6 +17,7 @@
#include "hw/s390x/adapter.h"
#include "hw/virtio/virtio.h"
#include "qemu/queue.h"
+#include "qom/object.h"
/*
* Reserve enough gsis to accommodate all virtio devices.
@@ -38,22 +39,18 @@ extern const VMStateDescription vmstate_adapter_routes;
VMSTATE_STRUCT(_f, _s, 1, vmstate_adapter_routes, AdapterRoutes)
#define TYPE_S390_FLIC_COMMON "s390-flic"
-#define S390_FLIC_COMMON(obj) \
- OBJECT_CHECK(S390FLICState, (obj), TYPE_S390_FLIC_COMMON)
+OBJECT_DECLARE_TYPE(S390FLICState, S390FLICStateClass,
+ S390_FLIC_COMMON)
-typedef struct S390FLICState {
+struct S390FLICState {
SysBusDevice parent_obj;
/* to limit AdapterRoutes.num_routes for compat */
uint32_t adapter_routes_max_batch;
bool ais_supported;
-} S390FLICState;
+};
-#define S390_FLIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390FLICStateClass, (klass), TYPE_S390_FLIC_COMMON)
-#define S390_FLIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390FLICStateClass, (obj), TYPE_S390_FLIC_COMMON)
-typedef struct S390FLICStateClass {
+struct S390FLICStateClass {
DeviceClass parent_class;
int (*register_io_adapter)(S390FLICState *fs, uint32_t id, uint8_t isc,
@@ -72,15 +69,15 @@ typedef struct S390FLICStateClass {
uint16_t subchannel_nr, uint32_t io_int_parm,
uint32_t io_int_word);
void (*inject_crw_mchk)(S390FLICState *fs);
-} S390FLICStateClass;
+};
#define TYPE_KVM_S390_FLIC "s390-flic-kvm"
-#define KVM_S390_FLIC(obj) \
- OBJECT_CHECK(KVMS390FLICState, (obj), TYPE_KVM_S390_FLIC)
+typedef struct KVMS390FLICState KVMS390FLICState;
+DECLARE_INSTANCE_CHECKER(KVMS390FLICState, KVM_S390_FLIC,
+ TYPE_KVM_S390_FLIC)
#define TYPE_QEMU_S390_FLIC "s390-flic-qemu"
-#define QEMU_S390_FLIC(obj) \
- OBJECT_CHECK(QEMUS390FLICState, (obj), TYPE_QEMU_S390_FLIC)
+OBJECT_DECLARE_SIMPLE_TYPE(QEMUS390FLICState, QEMU_S390_FLIC)
#define SIC_IRQ_MODE_ALL 0
#define SIC_IRQ_MODE_SINGLE 1
@@ -114,14 +111,14 @@ typedef struct QEMUS390FlicIO {
QLIST_ENTRY(QEMUS390FlicIO) next;
} QEMUS390FlicIO;
-typedef struct QEMUS390FLICState {
+struct QEMUS390FLICState {
S390FLICState parent_obj;
uint32_t pending;
uint32_t service_param;
uint8_t simm;
uint8_t nimm;
QLIST_HEAD(, QEMUS390FlicIO) io[8];
-} QEMUS390FLICState;
+};
uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic);
QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic,
@@ -137,6 +134,9 @@ void s390_flic_init(void);
S390FLICState *s390_get_flic(void);
QEMUS390FLICState *s390_get_qemu_flic(S390FLICState *fs);
S390FLICStateClass *s390_get_flic_class(S390FLICState *fs);
+void s390_crw_mchk(void);
+void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
+ uint32_t io_int_parm, uint32_t io_int_word);
bool ais_needed(void *opaque);
#endif /* HW_S390_FLIC_H */
diff --git a/include/hw/s390x/sclp.h b/include/hw/s390x/sclp.h
index f9db243484..b405a387b6 100644
--- a/include/hw/s390x/sclp.h
+++ b/include/hw/s390x/sclp.h
@@ -15,8 +15,8 @@
#define HW_S390_SCLP_H
#include "hw/sysbus.h"
-#include "hw/qdev.h"
#include "target/s390x/cpu-qom.h"
+#include "qom/object.h"
#define SCLP_CMD_CODE_MASK 0xffff00ff
@@ -38,10 +38,8 @@
#define MAX_STORAGE_INCREMENTS 1020
/* CPU hotplug SCLP codes */
-#define SCLP_HAS_CPU_INFO 0x0C00000000000000ULL
+#define SCLP_HAS_CPU_INFO 0x0800000000000000ULL
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
-#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
-#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
/* SCLP PCI codes */
#define SCLP_HAS_IOA_RECONFIG 0x0000000040000000ULL
@@ -87,7 +85,7 @@
* - we work on a private copy of the SCCB, since there are several length
* fields, that would cause a security nightmare if we allow the guest to
* alter the structure while we parse it. We cannot use ldl_p and friends
- * either without doing pointer arithmetics
+ * either without doing pointer arithmetic
* So we have to double check that all users of sclp data structures use the
* right endianness wrappers.
*/
@@ -111,11 +109,14 @@ typedef struct CPUEntry {
uint8_t reserved1;
} QEMU_PACKED CPUEntry;
+#define SCLP_READ_SCP_INFO_FIXED_CPU_OFFSET 128
+#define SCLP_READ_SCP_INFO_MNEST 4
typedef struct ReadInfo {
SCCBHeader h;
uint16_t rnmax;
uint8_t rnsize;
- uint8_t _reserved1[16 - 11]; /* 11-15 */
+ uint8_t _reserved1[15 - 11]; /* 11-14 */
+ uint8_t stsi_parm; /* 15-15 */
uint16_t entries_cpu; /* 16-17 */
uint16_t offset_cpu; /* 18-19 */
uint8_t _reserved2[24 - 20]; /* 20-23 */
@@ -133,7 +134,15 @@ typedef struct ReadInfo {
uint16_t highest_cpu;
uint8_t _reserved5[124 - 122]; /* 122-123 */
uint32_t hmfai;
- struct CPUEntry entries[0];
+ uint8_t _reserved7[134 - 128]; /* 128-133 */
+ uint8_t fac134;
+ uint8_t _reserved8[144 - 135]; /* 135-143 */
+ struct CPUEntry entries[];
+ /*
+ * When the Extended-Length SCCB (ELS) feature is enabled the
+ * start of the entries field begins at an offset denoted by the
+ * offset_cpu field, otherwise it's at an offset of 128.
+ */
} QEMU_PACKED ReadInfo;
typedef struct ReadCpuInfo {
@@ -143,7 +152,7 @@ typedef struct ReadCpuInfo {
uint16_t nr_standby; /* 12-13 */
uint16_t offset_standby; /* 14-15 */
uint8_t reserved0[24-16]; /* 16-23 */
- struct CPUEntry entries[0];
+ struct CPUEntry entries[];
} QEMU_PACKED ReadCpuInfo;
typedef struct ReadStorageElementInfo {
@@ -152,7 +161,7 @@ typedef struct ReadStorageElementInfo {
uint16_t assigned;
uint16_t standby;
uint8_t _reserved0[16 - 14]; /* 14-15 */
- uint32_t entries[0];
+ uint32_t entries[];
} QEMU_PACKED ReadStorageElementInfo;
typedef struct AttachStorageElement {
@@ -160,7 +169,7 @@ typedef struct AttachStorageElement {
uint8_t _reserved0[10 - 8]; /* 8-9 */
uint16_t assigned;
uint8_t _reserved1[16 - 12]; /* 12-15 */
- uint32_t entries[0];
+ uint32_t entries[];
} QEMU_PACKED AttachStorageElement;
typedef struct AssignStorage {
@@ -178,26 +187,25 @@ typedef struct IoaCfgSccb {
typedef struct SCCB {
SCCBHeader h;
- char data[SCCB_DATA_LEN];
+ char data[];
} QEMU_PACKED SCCB;
#define TYPE_SCLP "sclp"
-#define SCLP(obj) OBJECT_CHECK(SCLPDevice, (obj), TYPE_SCLP)
-#define SCLP_CLASS(oc) OBJECT_CLASS_CHECK(SCLPDeviceClass, (oc), TYPE_SCLP)
-#define SCLP_GET_CLASS(obj) OBJECT_GET_CLASS(SCLPDeviceClass, (obj), TYPE_SCLP)
+OBJECT_DECLARE_TYPE(SCLPDevice, SCLPDeviceClass,
+ SCLP)
-typedef struct SCLPEventFacility SCLPEventFacility;
+struct SCLPEventFacility;
-typedef struct SCLPDevice {
+struct SCLPDevice {
/* private */
DeviceState parent_obj;
- SCLPEventFacility *event_facility;
+ struct SCLPEventFacility *event_facility;
int increment_size;
/* public */
-} SCLPDevice;
+};
-typedef struct SCLPDeviceClass {
+struct SCLPDeviceClass {
/* private */
DeviceClass parent_class;
void (*read_SCP_info)(SCLPDevice *sclp, SCCB *sccb);
@@ -206,7 +214,7 @@ typedef struct SCLPDeviceClass {
/* public */
void (*execute)(SCLPDevice *sclp, SCCB *sccb, uint32_t code);
void (*service_interrupt)(SCLPDevice *sclp, uint32_t sccb);
-} SCLPDeviceClass;
+};
static inline int sccb_data_len(SCCB *sccb)
{
@@ -217,6 +225,7 @@ static inline int sccb_data_len(SCCB *sccb)
void s390_sclp_init(void);
void sclp_service_interrupt(uint32_t sccb);
void raise_irq_cpu_hotplug(void);
-int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
+int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code);
+int sclp_service_call_protected(S390CPU *cpu, uint64_t sccb, uint32_t code);
#endif
diff --git a/include/hw/s390x/storage-attributes.h b/include/hw/s390x/storage-attributes.h
index d6403a0a7e..8921a04d51 100644
--- a/include/hw/s390x/storage-attributes.h
+++ b/include/hw/s390x/storage-attributes.h
@@ -12,28 +12,24 @@
#ifndef S390_STORAGE_ATTRIBUTES_H
#define S390_STORAGE_ATTRIBUTES_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
#include "monitor/monitor.h"
+#include "qom/object.h"
#define TYPE_S390_STATTRIB "s390-storage_attributes"
#define TYPE_QEMU_S390_STATTRIB "s390-storage_attributes-qemu"
#define TYPE_KVM_S390_STATTRIB "s390-storage_attributes-kvm"
-#define S390_STATTRIB(obj) \
- OBJECT_CHECK(S390StAttribState, (obj), TYPE_S390_STATTRIB)
+OBJECT_DECLARE_TYPE(S390StAttribState, S390StAttribClass, S390_STATTRIB)
-typedef struct S390StAttribState {
+struct S390StAttribState {
DeviceState parent_obj;
uint64_t migration_cur_gfn;
bool migration_enabled;
-} S390StAttribState;
+};
-#define S390_STATTRIB_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390StAttribClass, (klass), TYPE_S390_STATTRIB)
-#define S390_STATTRIB_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390StAttribClass, (obj), TYPE_S390_STATTRIB)
-typedef struct S390StAttribClass {
+struct S390StAttribClass {
DeviceClass parent_class;
/* Return value: < 0 on error, or new count */
int (*get_stattr)(S390StAttribState *sa, uint64_t *start_gfn,
@@ -43,26 +39,28 @@ typedef struct S390StAttribClass {
int (*set_stattr)(S390StAttribState *sa, uint64_t start_gfn,
uint32_t count, uint8_t *values);
void (*synchronize)(S390StAttribState *sa);
- int (*set_migrationmode)(S390StAttribState *sa, bool value);
+ int (*set_migrationmode)(S390StAttribState *sa, bool value, Error **errp);
int (*get_active)(S390StAttribState *sa);
long long (*get_dirtycount)(S390StAttribState *sa);
-} S390StAttribClass;
+};
-#define QEMU_S390_STATTRIB(obj) \
- OBJECT_CHECK(QEMUS390StAttribState, (obj), TYPE_QEMU_S390_STATTRIB)
+typedef struct QEMUS390StAttribState QEMUS390StAttribState;
+DECLARE_INSTANCE_CHECKER(QEMUS390StAttribState, QEMU_S390_STATTRIB,
+ TYPE_QEMU_S390_STATTRIB)
-typedef struct QEMUS390StAttribState {
+struct QEMUS390StAttribState {
S390StAttribState parent_obj;
-} QEMUS390StAttribState;
+};
-#define KVM_S390_STATTRIB(obj) \
- OBJECT_CHECK(KVMS390StAttribState, (obj), TYPE_KVM_S390_STATTRIB)
+typedef struct KVMS390StAttribState KVMS390StAttribState;
+DECLARE_INSTANCE_CHECKER(KVMS390StAttribState, KVM_S390_STATTRIB,
+ TYPE_KVM_S390_STATTRIB)
-typedef struct KVMS390StAttribState {
+struct KVMS390StAttribState {
S390StAttribState parent_obj;
uint64_t still_dirty;
uint8_t *incoming_buffer;
-} KVMS390StAttribState;
+};
void s390_stattrib_init(void);
diff --git a/include/hw/s390x/storage-keys.h b/include/hw/s390x/storage-keys.h
index 62df48ec06..aa2ec2aae5 100644
--- a/include/hw/s390x/storage-keys.h
+++ b/include/hw/s390x/storage-keys.h
@@ -12,43 +12,103 @@
#ifndef S390_STORAGE_KEYS_H
#define S390_STORAGE_KEYS_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
#include "monitor/monitor.h"
+#include "qom/object.h"
#define TYPE_S390_SKEYS "s390-skeys"
-#define S390_SKEYS(obj) \
- OBJECT_CHECK(S390SKeysState, (obj), TYPE_S390_SKEYS)
+OBJECT_DECLARE_TYPE(S390SKeysState, S390SKeysClass, S390_SKEYS)
-typedef struct S390SKeysState {
+struct S390SKeysState {
DeviceState parent_obj;
bool migration_enabled;
-} S390SKeysState;
+};
-#define S390_SKEYS_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390SKeysClass, (klass), TYPE_S390_SKEYS)
-#define S390_SKEYS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390SKeysClass, (obj), TYPE_S390_SKEYS)
-typedef struct S390SKeysClass {
+struct S390SKeysClass {
DeviceClass parent_class;
- int (*skeys_enabled)(S390SKeysState *ks);
+
+ /**
+ * @skeys_are_enabled:
+ *
+ * Check whether storage keys are enabled. If not enabled, they were not
+ * enabled lazily either by the guest via a storage key instruction or
+ * by the host during migration.
+ *
+ * If disabled, everything not explicitly triggered by the guest,
+ * such as outgoing migration or dirty/change tracking, should not touch
+ * storage keys and should not lazily enable it.
+ *
+ * @ks: the #S390SKeysState
+ *
+ * Returns false if not enabled and true if enabled.
+ */
+ bool (*skeys_are_enabled)(S390SKeysState *ks);
+
+ /**
+ * @enable_skeys:
+ *
+ * Lazily enable storage keys. If this function is not implemented,
+ * setting a storage key will lazily enable storage keys implicitly
+ * instead. TCG guests have to make sure to flush the TLB of all CPUs
+ * if storage keys were not enabled before this call.
+ *
+ * @ks: the #S390SKeysState
+ *
+ * Returns false if not enabled before this call, and true if already
+ * enabled.
+ */
+ bool (*enable_skeys)(S390SKeysState *ks);
+
+ /**
+ * @get_skeys:
+ *
+ * Get storage keys for the given PFN range. This call will fail if
+ * storage keys have not been lazily enabled yet.
+ *
+ * Callers have to validate that a GFN is valid before this call.
+ *
+ * @ks: the #S390SKeysState
+ * @start_gfn: the start GFN to get storage keys for
+ * @count: the number of storage keys to get
+ * @keys: the byte array where storage keys will be stored to
+ *
+ * Returns 0 on success, returns an error if getting a storage key failed.
+ */
int (*get_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count,
uint8_t *keys);
+ /**
+ * @set_skeys:
+ *
+ * Set storage keys for the given PFN range. This call will fail if
+ * storage keys have not been lazily enabled yet and implicit
+ * enablement is not supported.
+ *
+ * Callers have to validate that a GFN is valid before this call.
+ *
+ * @ks: the #S390SKeysState
+ * @start_gfn: the start GFN to set storage keys for
+ * @count: the number of storage keys to set
+ * @keys: the byte array where storage keys will be read from
+ *
+ * Returns 0 on success, returns an error if setting a storage key failed.
+ */
int (*set_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count,
uint8_t *keys);
-} S390SKeysClass;
+};
#define TYPE_KVM_S390_SKEYS "s390-skeys-kvm"
#define TYPE_QEMU_S390_SKEYS "s390-skeys-qemu"
-#define QEMU_S390_SKEYS(obj) \
- OBJECT_CHECK(QEMUS390SKeysState, (obj), TYPE_QEMU_S390_SKEYS)
+typedef struct QEMUS390SKeysState QEMUS390SKeysState;
+DECLARE_INSTANCE_CHECKER(QEMUS390SKeysState, QEMU_S390_SKEYS,
+ TYPE_QEMU_S390_SKEYS)
-typedef struct QEMUS390SKeysState {
+struct QEMUS390SKeysState {
S390SKeysState parent_obj;
uint8_t *keydata;
uint32_t key_count;
-} QEMUS390SKeysState;
+};
void s390_skeys_init(void);
diff --git a/include/hw/s390x/tod.h b/include/hw/s390x/tod.h
index 47ef9de869..0935e85089 100644
--- a/include/hw/s390x/tod.h
+++ b/include/hw/s390x/tod.h
@@ -11,7 +11,9 @@
#ifndef HW_S390_TOD_H
#define HW_S390_TOD_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "tcg/s390-tod.h"
+#include "qom/object.h"
typedef struct S390TOD {
uint8_t high;
@@ -19,15 +21,11 @@ typedef struct S390TOD {
} S390TOD;
#define TYPE_S390_TOD "s390-tod"
-#define S390_TOD(obj) OBJECT_CHECK(S390TODState, (obj), TYPE_S390_TOD)
-#define S390_TOD_CLASS(oc) OBJECT_CLASS_CHECK(S390TODClass, (oc), \
- TYPE_S390_TOD)
-#define S390_TOD_GET_CLASS(obj) OBJECT_GET_CLASS(S390TODClass, (obj), \
- TYPE_S390_TOD)
+OBJECT_DECLARE_TYPE(S390TODState, S390TODClass, S390_TOD)
#define TYPE_KVM_S390_TOD TYPE_S390_TOD "-kvm"
#define TYPE_QEMU_S390_TOD TYPE_S390_TOD "-qemu"
-typedef struct S390TODState {
+struct S390TODState {
/* private */
DeviceState parent_obj;
@@ -38,9 +36,9 @@ typedef struct S390TODState {
S390TOD base;
/* Used by KVM to remember if the TOD is stopped and base is valid. */
bool stopped;
-} S390TODState;
+};
-typedef struct S390TODClass {
+struct S390TODClass {
/* private */
DeviceClass parent_class;
void (*parent_realize)(DeviceState *dev, Error **errp);
@@ -48,22 +46,7 @@ typedef struct S390TODClass {
/* public */
void (*get)(const S390TODState *td, S390TOD *tod, Error **errp);
void (*set)(S390TODState *td, const S390TOD *tod, Error **errp);
-} S390TODClass;
-
-/* The value of the TOD clock for 1.1.1970. */
-#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
-
-/* Converts ns to s390's clock format */
-static inline uint64_t time2tod(uint64_t ns)
-{
- return (ns << 9) / 125 + (((ns & 0xff80000000000000ull) / 125) << 9);
-}
-
-/* Converts s390's clock format to ns */
-static inline uint64_t tod2time(uint64_t t)
-{
- return ((t >> 9) * 125) + (((t & 0x1ff) * 125) >> 9);
-}
+};
void s390_init_tod(void);
S390TODState *s390_get_todstate(void);
diff --git a/include/hw/s390x/vfio-ccw.h b/include/hw/s390x/vfio-ccw.h
new file mode 100644
index 0000000000..4209d27657
--- /dev/null
+++ b/include/hw/s390x/vfio-ccw.h
@@ -0,0 +1,25 @@
+/*
+ * vfio based subchannel assignment support
+ *
+ * Copyright 2017, 2019 IBM Corp.
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Pierre Morel <pmorel@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_VFIO_CCW_H
+#define HW_VFIO_CCW_H
+
+#include "hw/vfio/vfio-common.h"
+#include "hw/s390x/s390-ccw.h"
+#include "hw/s390x/ccw-device.h"
+#include "qom/object.h"
+
+#define TYPE_VFIO_CCW "vfio-ccw"
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOCCWDevice, VFIO_CCW)
+
+#endif
diff --git a/include/hw/scsi/emulation.h b/include/hw/scsi/emulation.h
index 09fba1ff39..9521704326 100644
--- a/include/hw/scsi/emulation.h
+++ b/include/hw/scsi/emulation.h
@@ -1,5 +1,5 @@
#ifndef HW_SCSI_EMULATION_H
-#define HW_SCSI_EMULATION_H 1
+#define HW_SCSI_EMULATION_H
typedef struct SCSIBlockLimits {
bool wsnz;
diff --git a/include/hw/scsi/esp.h b/include/hw/scsi/esp.h
index adab63d1c9..533d856aa3 100644
--- a/include/hw/scsi/esp.h
+++ b/include/hw/scsi/esp.h
@@ -3,42 +3,45 @@
#include "hw/scsi/scsi.h"
#include "hw/sysbus.h"
+#include "qemu/fifo8.h"
+#include "qom/object.h"
/* esp.c */
#define ESP_MAX_DEVS 7
typedef void (*ESPDMAMemoryReadWriteFunc)(void *opaque, uint8_t *buf, int len);
#define ESP_REGS 16
-#define TI_BUFSZ 16
-#define ESP_CMDBUF_SZ 32
+#define ESP_FIFO_SZ 16
+#define ESP_CMDFIFO_SZ 32
typedef struct ESPState ESPState;
+#define TYPE_ESP "esp"
+OBJECT_DECLARE_SIMPLE_TYPE(ESPState, ESP)
+
struct ESPState {
+ DeviceState parent_obj;
+
uint8_t rregs[ESP_REGS];
uint8_t wregs[ESP_REGS];
qemu_irq irq;
+ qemu_irq drq_irq;
+ bool drq_state;
uint8_t chip_id;
bool tchi_written;
int32_t ti_size;
- uint32_t ti_rptr, ti_wptr;
uint32_t status;
- uint32_t deferred_status;
- bool deferred_complete;
uint32_t dma;
- uint8_t ti_buf[TI_BUFSZ];
+ Fifo8 fifo;
SCSIBus bus;
SCSIDevice *current_dev;
SCSIRequest *current_req;
- uint8_t cmdbuf[ESP_CMDBUF_SZ];
- uint32_t cmdlen;
+ Fifo8 cmdfifo;
+ uint8_t cmdfifo_cdb_offset;
+ uint8_t lun;
uint32_t do_cmd;
- /* The amount of data left in the current DMA transfer. */
- uint32_t dma_left;
- /* The size of the current DMA transfer. Zero if no transfer is in
- progress. */
- uint32_t dma_counter;
+ bool data_ready;
int dma_enabled;
uint32_t async_len;
@@ -48,20 +51,34 @@ struct ESPState {
ESPDMAMemoryReadWriteFunc dma_memory_write;
void *dma_opaque;
void (*dma_cb)(ESPState *s);
+
+ uint8_t mig_version_id;
+
+ /* Legacy fields for vmstate_esp version < 5 */
+ uint32_t mig_dma_left;
+ uint32_t mig_deferred_status;
+ bool mig_deferred_complete;
+ uint32_t mig_ti_rptr, mig_ti_wptr;
+ uint8_t mig_ti_buf[ESP_FIFO_SZ];
+ uint8_t mig_cmdbuf[ESP_CMDFIFO_SZ];
+ uint32_t mig_cmdlen;
+
+ uint8_t mig_ti_cmd;
};
-#define TYPE_ESP "esp"
-#define ESP_STATE(obj) OBJECT_CHECK(SysBusESPState, (obj), TYPE_ESP)
+#define TYPE_SYSBUS_ESP "sysbus-esp"
+OBJECT_DECLARE_SIMPLE_TYPE(SysBusESPState, SYSBUS_ESP)
-typedef struct {
+struct SysBusESPState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
+ MemoryRegion pdma;
uint32_t it_shift;
ESPState esp;
-} SysBusESPState;
+};
#define ESP_TCLO 0x0
#define ESP_TCMID 0x1
@@ -126,6 +143,7 @@ typedef struct {
#define INTR_RST 0x80
#define SEQ_0 0x0
+#define SEQ_MO 0x1
#define SEQ_CD 0x4
#define CFG1_RESREPT 0x40
@@ -135,11 +153,12 @@ typedef struct {
void esp_dma_enable(ESPState *s, int irq, int level);
void esp_request_cancelled(SCSIRequest *req);
-void esp_command_complete(SCSIRequest *req, uint32_t status, size_t resid);
+void esp_command_complete(SCSIRequest *req, size_t resid);
void esp_transfer_data(SCSIRequest *req, uint32_t len);
void esp_hard_reset(ESPState *s);
uint64_t esp_reg_read(ESPState *s, uint32_t saddr);
void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val);
extern const VMStateDescription vmstate_esp;
+int esp_pre_save(void *opaque);
#endif
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index acef25faa4..c3d5e17e38 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -1,13 +1,14 @@
#ifndef QEMU_HW_SCSI_H
#define QEMU_HW_SCSI_H
-#include "hw/qdev.h"
+#include "block/aio.h"
#include "hw/block/block.h"
-#include "sysemu/sysemu.h"
+#include "hw/qdev-core.h"
#include "scsi/utils.h"
#include "qemu/notify.h"
+#include "qom/object.h"
-#define MAX_SCSI_DEVS 255
+#define MAX_SCSI_DEVS 255
typedef struct SCSIBus SCSIBus;
typedef struct SCSIBusInfo SCSIBusInfo;
@@ -17,6 +18,7 @@ typedef struct SCSIReqOps SCSIReqOps;
#define SCSI_SENSE_BUF_SIZE_OLD 96
#define SCSI_SENSE_BUF_SIZE 252
+#define DEFAULT_IO_TIMEOUT 30
struct SCSIRequest {
SCSIBus *bus;
@@ -25,9 +27,10 @@ struct SCSIRequest {
uint32_t refcount;
uint32_t tag;
uint32_t lun;
- uint32_t status;
+ int16_t status;
+ int16_t host_status;
void *hba_private;
- size_t resid;
+ uint64_t residual;
SCSICommand cmd;
NotifierList cancel_notifiers;
@@ -49,35 +52,36 @@ struct SCSIRequest {
};
#define TYPE_SCSI_DEVICE "scsi-device"
-#define SCSI_DEVICE(obj) \
- OBJECT_CHECK(SCSIDevice, (obj), TYPE_SCSI_DEVICE)
-#define SCSI_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SCSIDeviceClass, (klass), TYPE_SCSI_DEVICE)
-#define SCSI_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SCSIDeviceClass, (obj), TYPE_SCSI_DEVICE)
-
-typedef struct SCSIDeviceClass {
+OBJECT_DECLARE_TYPE(SCSIDevice, SCSIDeviceClass, SCSI_DEVICE)
+
+struct SCSIDeviceClass {
DeviceClass parent_class;
void (*realize)(SCSIDevice *dev, Error **errp);
+ void (*unrealize)(SCSIDevice *dev);
int (*parse_cdb)(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
- void *hba_private);
+ size_t buf_len, void *hba_private);
SCSIRequest *(*alloc_req)(SCSIDevice *s, uint32_t tag, uint32_t lun,
uint8_t *buf, void *hba_private);
void (*unit_attention_reported)(SCSIDevice *s);
-} SCSIDeviceClass;
+};
struct SCSIDevice
{
DeviceState qdev;
VMChangeStateEntry *vmsentry;
- QEMUBH *bh;
uint32_t id;
BlockConf conf;
SCSISense unit_attention;
bool sense_is_ua;
uint8_t sense[SCSI_SENSE_BUF_SIZE];
uint32_t sense_len;
+
+ /*
+ * The requests list is only accessed from the AioContext that executes
+ * requests or from the main loop when IOThread processing is stopped.
+ */
QTAILQ_HEAD(, SCSIRequest) requests;
+
uint32_t channel;
uint32_t lun;
int blocksize;
@@ -87,7 +91,9 @@ struct SCSIDevice
uint64_t port_wwn;
int scsi_version;
int default_scsi_version;
+ uint32_t io_timeout;
bool needs_vpd_bl_emulation;
+ bool hba_supports_iothread;
};
extern const VMStateDescription vmstate_scsi_device;
@@ -107,6 +113,7 @@ int cdrom_read_toc_raw(int nb_sectors, uint8_t *buf, int msf, int session_num);
/* scsi-bus.c */
struct SCSIReqOps {
size_t size;
+ void (*init_req)(SCSIRequest *req);
void (*free_req)(SCSIRequest *req);
int32_t (*send_command)(SCSIRequest *req, uint8_t *buf);
void (*read_data)(SCSIRequest *req);
@@ -121,9 +128,10 @@ struct SCSIBusInfo {
int tcq;
int max_channel, max_target, max_lun;
int (*parse_cdb)(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
- void *hba_private);
+ size_t buf_len, void *hba_private);
void (*transfer_data)(SCSIRequest *req, uint32_t arg);
- void (*complete)(SCSIRequest *req, uint32_t arg, size_t resid);
+ void (*fail)(SCSIRequest *req);
+ void (*complete)(SCSIRequest *req, size_t residual);
void (*cancel)(SCSIRequest *req);
void (*change)(SCSIBus *bus, SCSIDevice *dev, SCSISense sense);
QEMUSGList *(*get_sg_list)(SCSIRequest *req);
@@ -131,10 +139,20 @@ struct SCSIBusInfo {
void (*save_request)(QEMUFile *f, SCSIRequest *req);
void *(*load_request)(QEMUFile *f, SCSIRequest *req);
void (*free_request)(SCSIBus *bus, void *priv);
+
+ /*
+ * Temporarily stop submitting new requests between drained_begin() and
+ * drained_end(). Called from the main loop thread with the BQL held.
+ *
+ * Implement these callbacks if request processing is triggered by a file
+ * descriptor like an EventNotifier. Otherwise set them to NULL.
+ */
+ void (*drained_begin)(SCSIBus *bus);
+ void (*drained_end)(SCSIBus *bus);
};
#define TYPE_SCSI_BUS "SCSI"
-#define SCSI_BUS(obj) OBJECT_CHECK(SCSIBus, (obj), TYPE_SCSI_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(SCSIBus, SCSI_BUS)
struct SCSIBus {
BusState qbus;
@@ -142,10 +160,38 @@ struct SCSIBus {
SCSISense unit_attention;
const SCSIBusInfo *info;
+
+ int drain_count; /* protected by BQL */
};
-void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
- const SCSIBusInfo *info, const char *bus_name);
+/**
+ * scsi_bus_init_named: Initialize a SCSI bus with the specified name
+ * @bus: SCSIBus object to initialize
+ * @bus_size: size of @bus object
+ * @host: Device which owns the bus (generally the SCSI controller)
+ * @info: structure defining callbacks etc for the controller
+ * @bus_name: Name to use for this bus
+ *
+ * This in-place initializes @bus as a new SCSI bus with a name
+ * provided by the caller. It is the caller's responsibility to make
+ * sure that name does not clash with the name of any other bus in the
+ * system. Unless you need the new bus to have a specific name, you
+ * should use scsi_bus_init() instead.
+ */
+void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
+ const SCSIBusInfo *info, const char *bus_name);
+
+/**
+ * scsi_bus_init: Initialize a SCSI bus
+ *
+ * This in-place-initializes @bus as a new SCSI bus and gives it
+ * an automatically generated unique name.
+ */
+static inline void scsi_bus_init(SCSIBus *bus, size_t bus_size,
+ DeviceState *host, const SCSIBusInfo *info)
+{
+ scsi_bus_init_named(bus, bus_size, host, info, NULL);
+}
static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d)
{
@@ -153,36 +199,37 @@ static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d)
}
SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
- int unit, bool removable, int bootindex,
- bool share_rw,
- BlockdevOnError rerror,
- BlockdevOnError werror,
+ int unit, bool removable, BlockConf *conf,
const char *serial, Error **errp);
+void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense);
void scsi_bus_legacy_handle_cmdline(SCSIBus *bus);
-void scsi_legacy_handle_cmdline(void);
SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
uint32_t tag, uint32_t lun, void *hba_private);
SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
- uint8_t *buf, void *hba_private);
+ uint8_t *buf, size_t buf_len, void *hba_private);
int32_t scsi_req_enqueue(SCSIRequest *req);
SCSIRequest *scsi_req_ref(SCSIRequest *req);
void scsi_req_unref(SCSIRequest *req);
int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
- void *hba_private);
-int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf);
+ size_t buf_len, void *hba_private);
+int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
+ size_t buf_len);
void scsi_req_build_sense(SCSIRequest *req, SCSISense sense);
void scsi_req_print(SCSIRequest *req);
void scsi_req_continue(SCSIRequest *req);
void scsi_req_data(SCSIRequest *req, int len);
void scsi_req_complete(SCSIRequest *req, int status);
+void scsi_req_complete_failed(SCSIRequest *req, int host_status);
uint8_t *scsi_req_get_buf(SCSIRequest *req);
int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len);
void scsi_req_cancel_complete(SCSIRequest *req);
void scsi_req_cancel(SCSIRequest *req);
void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier);
void scsi_req_retry(SCSIRequest *req);
+void scsi_device_drained_begin(SCSIDevice *sdev);
+void scsi_device_drained_end(SCSIDevice *sdev);
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense);
void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense);
void scsi_device_report_change(SCSIDevice *dev, SCSISense sense);
@@ -190,10 +237,17 @@ void scsi_device_unit_attention_reported(SCSIDevice *dev);
void scsi_generic_read_device_inquiry(SCSIDevice *dev);
int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed);
int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
- uint8_t *buf, uint8_t buf_size);
+ uint8_t *buf, uint8_t buf_size, uint32_t timeout);
SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int target, int lun);
+SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int target, int lun);
/* scsi-generic.c. */
extern const SCSIReqOps scsi_generic_req_ops;
+/* scsi-disk.c */
+#define SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR 0
+#define SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD 1
+#define SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE 2
+#define SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED 3
+
#endif
diff --git a/include/hw/sd/allwinner-sdhost.h b/include/hw/sd/allwinner-sdhost.h
new file mode 100644
index 0000000000..1b951177dd
--- /dev/null
+++ b/include/hw/sd/allwinner-sdhost.h
@@ -0,0 +1,146 @@
+/*
+ * Allwinner (sun4i and above) SD Host Controller emulation
+ *
+ * Copyright (C) 2019 Niek Linnenbank <nieklinnenbank@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SD_ALLWINNER_SDHOST_H
+#define HW_SD_ALLWINNER_SDHOST_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "hw/sd/sd.h"
+
+/**
+ * Object model types
+ * @{
+ */
+
+/** Generic Allwinner SD Host Controller (abstract) */
+#define TYPE_AW_SDHOST "allwinner-sdhost"
+
+/** Allwinner sun4i family (A10, A12) */
+#define TYPE_AW_SDHOST_SUN4I TYPE_AW_SDHOST "-sun4i"
+
+/** Allwinner sun5i family and newer (A13, H2+, H3, etc) */
+#define TYPE_AW_SDHOST_SUN5I TYPE_AW_SDHOST "-sun5i"
+
+/** Allwinner sun50i-a64 */
+#define TYPE_AW_SDHOST_SUN50I_A64 TYPE_AW_SDHOST "-sun50i-a64"
+
+/** Allwinner sun50i-a64 emmc */
+#define TYPE_AW_SDHOST_SUN50I_A64_EMMC TYPE_AW_SDHOST "-sun50i-a64-emmc"
+
+/** @} */
+
+/**
+ * Object model macros
+ * @{
+ */
+
+OBJECT_DECLARE_TYPE(AwSdHostState, AwSdHostClass, AW_SDHOST)
+
+/** @} */
+
+/**
+ * Allwinner SD Host Controller object instance state.
+ */
+struct AwSdHostState {
+ /*< private >*/
+ SysBusDevice busdev;
+ /*< public >*/
+
+ /** Secure Digital (SD) bus, which connects to SD card (if present) */
+ SDBus sdbus;
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Interrupt output signal to notify CPU */
+ qemu_irq irq;
+
+ /** Memory region where DMA transfers are done */
+ MemoryRegion *dma_mr;
+
+ /** Address space used internally for DMA transfers */
+ AddressSpace dma_as;
+
+ /** Number of bytes left in current DMA transfer */
+ uint32_t transfer_cnt;
+
+ /**
+ * @name Hardware Registers
+ * @{
+ */
+
+ uint32_t global_ctl; /**< Global Control */
+ uint32_t clock_ctl; /**< Clock Control */
+ uint32_t timeout; /**< Timeout */
+ uint32_t bus_width; /**< Bus Width */
+ uint32_t block_size; /**< Block Size */
+ uint32_t byte_count; /**< Byte Count */
+
+ uint32_t command; /**< Command */
+ uint32_t command_arg; /**< Command Argument */
+ uint32_t response[4]; /**< Command Response */
+
+ uint32_t irq_mask; /**< Interrupt Mask */
+ uint32_t irq_status; /**< Raw Interrupt Status */
+ uint32_t status; /**< Status */
+
+ uint32_t fifo_wlevel; /**< FIFO Water Level */
+ uint32_t fifo_func_sel; /**< FIFO Function Select */
+ uint32_t debug_enable; /**< Debug Enable */
+ uint32_t auto12_arg; /**< Auto Command 12 Argument */
+ uint32_t newtiming_set; /**< SD New Timing Set */
+ uint32_t newtiming_debug; /**< SD New Timing Debug */
+ uint32_t hardware_rst; /**< Hardware Reset */
+ uint32_t dmac; /**< Internal DMA Controller Control */
+ uint32_t desc_base; /**< Descriptor List Base Address */
+ uint32_t dmac_status; /**< Internal DMA Controller Status */
+ uint32_t dmac_irq; /**< Internal DMA Controller IRQ Enable */
+ uint32_t card_threshold; /**< Card Threshold Control */
+ uint32_t startbit_detect; /**< eMMC DDR Start Bit Detection Control */
+ uint32_t response_crc; /**< Response CRC */
+ uint32_t data_crc[8]; /**< Data CRC */
+ uint32_t sample_delay; /**< Sample delay control */
+ uint32_t status_crc; /**< Status CRC */
+
+ /** @} */
+
+};
+
+/**
+ * Allwinner SD Host Controller class-level struct.
+ *
+ * This struct is filled by each sunxi device specific code
+ * such that the generic code can use this struct to support
+ * all devices.
+ */
+struct AwSdHostClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+ /*< public >*/
+
+ /** Maximum buffer size in bytes per DMA descriptor */
+ size_t max_desc_size;
+ bool is_sun4i;
+
+ /** does the IP block support autocalibration? */
+ bool can_calibrate;
+};
+
+#endif /* HW_SD_ALLWINNER_SDHOST_H */
diff --git a/include/hw/sd/aspeed_sdhci.h b/include/hw/sd/aspeed_sdhci.h
new file mode 100644
index 0000000000..057bc5f3d1
--- /dev/null
+++ b/include/hw/sd/aspeed_sdhci.h
@@ -0,0 +1,35 @@
+/*
+ * Aspeed SD Host Controller
+ * Eddie James <eajames@linux.ibm.com>
+ *
+ * Copyright (C) 2019 IBM Corp
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ASPEED_SDHCI_H
+#define ASPEED_SDHCI_H
+
+#include "hw/sd/sdhci.h"
+#include "qom/object.h"
+
+#define TYPE_ASPEED_SDHCI "aspeed.sdhci"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedSDHCIState, ASPEED_SDHCI)
+
+#define ASPEED_SDHCI_CAPABILITIES 0x01E80080
+#define ASPEED_SDHCI_NUM_SLOTS 2
+#define ASPEED_SDHCI_NUM_REGS (ASPEED_SDHCI_REG_SIZE / sizeof(uint32_t))
+#define ASPEED_SDHCI_REG_SIZE 0x100
+
+struct AspeedSDHCIState {
+ SysBusDevice parent;
+
+ SDHCIState slots[ASPEED_SDHCI_NUM_SLOTS];
+ uint8_t num_slots;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t regs[ASPEED_SDHCI_NUM_REGS];
+};
+
+#endif /* ASPEED_SDHCI_H */
diff --git a/include/hw/sd/bcm2835_sdhost.h b/include/hw/sd/bcm2835_sdhost.h
index 7520dd6507..f6bca5c397 100644
--- a/include/hw/sd/bcm2835_sdhost.h
+++ b/include/hw/sd/bcm2835_sdhost.h
@@ -16,14 +16,14 @@
#include "hw/sysbus.h"
#include "hw/sd/sd.h"
+#include "qom/object.h"
#define TYPE_BCM2835_SDHOST "bcm2835-sdhost"
-#define BCM2835_SDHOST(obj) \
- OBJECT_CHECK(BCM2835SDHostState, (obj), TYPE_BCM2835_SDHOST)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835SDHostState, BCM2835_SDHOST)
#define BCM2835_SDHOST_FIFO_LEN 16
-typedef struct {
+struct BCM2835SDHostState {
SysBusDevice busdev;
SDBus sdbus;
MemoryRegion iomem;
@@ -43,6 +43,6 @@ typedef struct {
uint32_t datacnt;
qemu_irq irq;
-} BCM2835SDHostState;
+};
#endif
diff --git a/include/hw/sd/cadence_sdhci.h b/include/hw/sd/cadence_sdhci.h
new file mode 100644
index 0000000000..cd8288b7d8
--- /dev/null
+++ b/include/hw/sd/cadence_sdhci.h
@@ -0,0 +1,47 @@
+/*
+ * Cadence SDHCI emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CADENCE_SDHCI_H
+#define CADENCE_SDHCI_H
+
+#include "hw/sd/sdhci.h"
+
+#define CADENCE_SDHCI_REG_SIZE 0x100
+#define CADENCE_SDHCI_NUM_REGS (CADENCE_SDHCI_REG_SIZE / sizeof(uint32_t))
+
+typedef struct CadenceSDHCIState {
+ SysBusDevice parent;
+
+ MemoryRegion container;
+ MemoryRegion iomem;
+ BusState *bus;
+
+ uint32_t regs[CADENCE_SDHCI_NUM_REGS];
+
+ SDHCIState sdhci;
+} CadenceSDHCIState;
+
+#define TYPE_CADENCE_SDHCI "cadence.sdhci"
+#define CADENCE_SDHCI(obj) OBJECT_CHECK(CadenceSDHCIState, (obj), \
+ TYPE_CADENCE_SDHCI)
+
+#endif /* CADENCE_SDHCI_H */
diff --git a/include/hw/sd/npcm7xx_sdhci.h b/include/hw/sd/npcm7xx_sdhci.h
new file mode 100644
index 0000000000..ad8002f766
--- /dev/null
+++ b/include/hw/sd/npcm7xx_sdhci.h
@@ -0,0 +1,65 @@
+/*
+ * NPCM7xx SD-3.0 / eMMC-4.51 Host Controller
+ *
+ * Copyright (c) 2021 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef NPCM7XX_SDHCI_H
+#define NPCM7XX_SDHCI_H
+
+#include "hw/sd/sdhci.h"
+#include "qom/object.h"
+
+#define TYPE_NPCM7XX_SDHCI "npcm7xx.sdhci"
+#define NPCM7XX_PRSTVALS_SIZE 6
+#define NPCM7XX_PRSTVALS 0x60
+#define NPCM7XX_PRSTVALS_0 0x0
+#define NPCM7XX_PRSTVALS_1 0x2
+#define NPCM7XX_PRSTVALS_2 0x4
+#define NPCM7XX_PRSTVALS_3 0x6
+#define NPCM7XX_PRSTVALS_4 0x8
+#define NPCM7XX_PRSTVALS_5 0xA
+#define NPCM7XX_BOOTTOCTRL 0x10
+#define NPCM7XX_SDHCI_REGSIZE 0x20
+
+#define NPCM7XX_PRSNTS_RESET 0x04A00000
+#define NPCM7XX_BLKGAP_RESET 0x80
+#define NPCM7XX_CAPAB_RESET 0x0100200161EE0399
+#define NPCM7XX_MAXCURR_RESET 0x0000000000000005
+#define NPCM7XX_HCVER_RESET 0x1002
+
+#define NPCM7XX_PRSTVALS_0_RESET 0x0040
+#define NPCM7XX_PRSTVALS_1_RESET 0x0001
+#define NPCM7XX_PRSTVALS_3_RESET 0x0001
+
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxSDHCIState, NPCM7XX_SDHCI)
+
+typedef struct NPCM7xxRegs {
+ /* Preset Values Register Field, read-only */
+ uint16_t prstvals[NPCM7XX_PRSTVALS_SIZE];
+ /* Boot Timeout Control Register, read-write */
+ uint32_t boottoctrl;
+} NPCM7xxRegisters;
+
+struct NPCM7xxSDHCIState {
+ SysBusDevice parent;
+
+ MemoryRegion container;
+ MemoryRegion iomem;
+ BusState *bus;
+ NPCM7xxRegisters regs;
+
+ SDHCIState sdhci;
+};
+
+#endif /* NPCM7XX_SDHCI_H */
diff --git a/include/hw/sd/sd.h b/include/hw/sd/sd.h
index b865aafc33..2c8748fb9b 100644
--- a/include/hw/sd/sd.h
+++ b/include/hw/sd/sd.h
@@ -30,29 +30,30 @@
#ifndef HW_SD_H
#define HW_SD_H
-#include "hw/qdev.h"
-
-#define OUT_OF_RANGE (1 << 31)
-#define ADDRESS_ERROR (1 << 30)
-#define BLOCK_LEN_ERROR (1 << 29)
-#define ERASE_SEQ_ERROR (1 << 28)
-#define ERASE_PARAM (1 << 27)
-#define WP_VIOLATION (1 << 26)
-#define CARD_IS_LOCKED (1 << 25)
-#define LOCK_UNLOCK_FAILED (1 << 24)
-#define COM_CRC_ERROR (1 << 23)
-#define ILLEGAL_COMMAND (1 << 22)
-#define CARD_ECC_FAILED (1 << 21)
-#define CC_ERROR (1 << 20)
-#define SD_ERROR (1 << 19)
-#define CID_CSD_OVERWRITE (1 << 16)
-#define WP_ERASE_SKIP (1 << 15)
-#define CARD_ECC_DISABLED (1 << 14)
-#define ERASE_RESET (1 << 13)
-#define CURRENT_STATE (7 << 9)
-#define READY_FOR_DATA (1 << 8)
-#define APP_CMD (1 << 5)
-#define AKE_SEQ_ERROR (1 << 3)
+#include "hw/qdev-core.h"
+#include "qom/object.h"
+
+#define OUT_OF_RANGE (1 << 31)
+#define ADDRESS_ERROR (1 << 30)
+#define BLOCK_LEN_ERROR (1 << 29)
+#define ERASE_SEQ_ERROR (1 << 28)
+#define ERASE_PARAM (1 << 27)
+#define WP_VIOLATION (1 << 26)
+#define CARD_IS_LOCKED (1 << 25)
+#define LOCK_UNLOCK_FAILED (1 << 24)
+#define COM_CRC_ERROR (1 << 23)
+#define ILLEGAL_COMMAND (1 << 22)
+#define CARD_ECC_FAILED (1 << 21)
+#define CC_ERROR (1 << 20)
+#define SD_ERROR (1 << 19)
+#define CID_CSD_OVERWRITE (1 << 16)
+#define WP_ERASE_SKIP (1 << 15)
+#define CARD_ECC_DISABLED (1 << 14)
+#define ERASE_RESET (1 << 13)
+#define CURRENT_STATE (7 << 9)
+#define READY_FOR_DATA (1 << 8)
+#define APP_CMD (1 << 5)
+#define AKE_SEQ_ERROR (1 << 3)
enum SDPhySpecificationVersion {
SD_PHY_SPECv1_10_VERS = 1,
@@ -76,10 +77,10 @@ typedef enum {
typedef enum {
sd_none = -1,
- sd_bc = 0, /* broadcast -- no response */
- sd_bcr, /* broadcast with response */
- sd_ac, /* addressed -- no data transfer */
- sd_adtc, /* addressed with data transfer */
+ sd_bc = 0, /* broadcast -- no response */
+ sd_bcr, /* broadcast with response */
+ sd_ac, /* addressed -- no data transfer */
+ sd_adtc, /* addressed with data transfer */
} sd_cmd_type_t;
typedef struct {
@@ -88,24 +89,37 @@ typedef struct {
uint8_t crc;
} SDRequest;
-typedef struct SDState SDState;
-typedef struct SDBus SDBus;
#define TYPE_SD_CARD "sd-card"
-#define SD_CARD(obj) OBJECT_CHECK(SDState, (obj), TYPE_SD_CARD)
-#define SD_CARD_CLASS(klass) \
- OBJECT_CLASS_CHECK(SDCardClass, (klass), TYPE_SD_CARD)
-#define SD_CARD_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SDCardClass, (obj), TYPE_SD_CARD)
+OBJECT_DECLARE_TYPE(SDState, SDCardClass, SD_CARD)
-typedef struct {
+#define TYPE_SD_CARD_SPI "sd-card-spi"
+DECLARE_INSTANCE_CHECKER(SDState, SD_CARD_SPI, TYPE_SD_CARD_SPI)
+
+struct SDCardClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
int (*do_command)(SDState *sd, SDRequest *req, uint8_t *response);
- void (*write_data)(SDState *sd, uint8_t value);
- uint8_t (*read_data)(SDState *sd);
+ /**
+ * Write a byte to a SD card.
+ * @sd: card
+ * @value: byte to write
+ *
+ * Write a byte on the data lines of a SD card.
+ */
+ void (*write_byte)(SDState *sd, uint8_t value);
+ /**
+ * Read a byte from a SD card.
+ * @sd: card
+ *
+ * Read a byte from the data lines of a SD card.
+ *
+ * Return: byte value read
+ */
+ uint8_t (*read_byte)(SDState *sd);
+ bool (*receive_ready)(SDState *sd);
bool (*data_ready)(SDState *sd);
void (*set_voltage)(SDState *sd, uint16_t millivolts);
uint8_t (*get_dat_lines)(SDState *sd);
@@ -113,18 +127,19 @@ typedef struct {
void (*enable)(SDState *sd, bool enable);
bool (*get_inserted)(SDState *sd);
bool (*get_readonly)(SDState *sd);
-} SDCardClass;
+
+ const struct SDProto *proto;
+};
#define TYPE_SD_BUS "sd-bus"
-#define SD_BUS(obj) OBJECT_CHECK(SDBus, (obj), TYPE_SD_BUS)
-#define SD_BUS_CLASS(klass) OBJECT_CLASS_CHECK(SDBusClass, (klass), TYPE_SD_BUS)
-#define SD_BUS_GET_CLASS(obj) OBJECT_GET_CLASS(SDBusClass, (obj), TYPE_SD_BUS)
+OBJECT_DECLARE_TYPE(SDBus, SDBusClass,
+ SD_BUS)
struct SDBus {
BusState qbus;
};
-typedef struct {
+struct SDBusClass {
/*< private >*/
BusClass parent_class;
/*< public >*/
@@ -134,24 +149,7 @@ typedef struct {
*/
void (*set_inserted)(DeviceState *dev, bool inserted);
void (*set_readonly)(DeviceState *dev, bool readonly);
-} SDBusClass;
-
-/* Legacy functions to be used only by non-qdevified callers */
-SDState *sd_init(BlockBackend *bs, bool is_spi);
-int sd_do_command(SDState *sd, SDRequest *req,
- uint8_t *response);
-void sd_write_data(SDState *sd, uint8_t value);
-uint8_t sd_read_data(SDState *sd);
-void sd_set_cb(SDState *sd, qemu_irq readonly, qemu_irq insert);
-bool sd_data_ready(SDState *sd);
-/* sd_enable should not be used -- it is only used on the nseries boards,
- * where it is part of a broken implementation of the MMC card slot switch
- * (there should be two card slots which are multiplexed to a single MMC
- * controller, but instead we model it with one card and controller and
- * disable the card when the second slot is selected, so it looks like the
- * second slot is always empty).
- */
-void sd_enable(SDState *sd, bool enable);
+};
/* Functions to be used by qdevified callers (working via
* an SDBus rather than directly with SDState)
@@ -160,8 +158,42 @@ void sdbus_set_voltage(SDBus *sdbus, uint16_t millivolts);
uint8_t sdbus_get_dat_lines(SDBus *sdbus);
bool sdbus_get_cmd_line(SDBus *sdbus);
int sdbus_do_command(SDBus *sd, SDRequest *req, uint8_t *response);
-void sdbus_write_data(SDBus *sd, uint8_t value);
-uint8_t sdbus_read_data(SDBus *sd);
+/**
+ * Write a byte to a SD bus.
+ * @sd: bus
+ * @value: byte to write
+ *
+ * Write a byte on the data lines of a SD bus.
+ */
+void sdbus_write_byte(SDBus *sd, uint8_t value);
+/**
+ * Read a byte from a SD bus.
+ * @sd: bus
+ *
+ * Read a byte from the data lines of a SD bus.
+ *
+ * Return: byte value read
+ */
+uint8_t sdbus_read_byte(SDBus *sd);
+/**
+ * Write data to a SD bus.
+ * @sdbus: bus
+ * @buf: data to write
+ * @length: number of bytes to write
+ *
+ * Write multiple bytes of data on the data lines of a SD bus.
+ */
+void sdbus_write_data(SDBus *sdbus, const void *buf, size_t length);
+/**
+ * Read data from a SD bus.
+ * @sdbus: bus
+ * @buf: buffer to read data into
+ * @length: number of bytes to read
+ *
+ * Read multiple bytes of data on the data lines of a SD bus.
+ */
+void sdbus_read_data(SDBus *sdbus, void *buf, size_t length);
+bool sdbus_receive_ready(SDBus *sd);
bool sdbus_data_ready(SDBus *sd);
bool sdbus_get_inserted(SDBus *sd);
bool sdbus_get_readonly(SDBus *sd);
diff --git a/include/hw/sd/sdcard_legacy.h b/include/hw/sd/sdcard_legacy.h
new file mode 100644
index 0000000000..0dc3889555
--- /dev/null
+++ b/include/hw/sd/sdcard_legacy.h
@@ -0,0 +1,50 @@
+/*
+ * SD Memory Card emulation (deprecated legacy API)
+ *
+ * Copyright (c) 2006 Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef HW_SDCARD_LEGACY_H
+#define HW_SDCARD_LEGACY_H
+
+#include "hw/sd/sd.h"
+
+/* Legacy functions to be used only by non-qdevified callers */
+SDState *sd_init(BlockBackend *blk, bool is_spi);
+int sd_do_command(SDState *card, SDRequest *request, uint8_t *response);
+void sd_write_byte(SDState *card, uint8_t value);
+uint8_t sd_read_byte(SDState *card);
+void sd_set_cb(SDState *card, qemu_irq readonly, qemu_irq insert);
+
+/* sd_enable should not be used -- it is only used on the nseries boards,
+ * where it is part of a broken implementation of the MMC card slot switch
+ * (there should be two card slots which are multiplexed to a single MMC
+ * controller, but instead we model it with one card and controller and
+ * disable the card when the second slot is selected, so it looks like the
+ * second slot is always empty).
+ */
+void sd_enable(SDState *card, bool enable);
+
+#endif /* HW_SDCARD_LEGACY_H */
diff --git a/include/hw/sd/sdhci.h b/include/hw/sd/sdhci.h
index f321767c56..6cd2822f1d 100644
--- a/include/hw/sd/sdhci.h
+++ b/include/hw/sd/sdhci.h
@@ -25,13 +25,13 @@
#ifndef SDHCI_H
#define SDHCI_H
-#include "qemu-common.h"
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/sysbus.h"
#include "hw/sd/sd.h"
+#include "qom/object.h"
/* SD/MMC host controller state */
-typedef struct SDHCIState {
+struct SDHCIState {
/*< private >*/
union {
PCIDevice pcidev;
@@ -75,6 +75,7 @@ typedef struct SDHCIState {
uint16_t acmd12errsts; /* Auto CMD12 error status register */
uint16_t hostctl2; /* Host Control 2 */
uint64_t admasysaddr; /* ADMA System Address Register */
+ uint16_t vendor_spec; /* Vendor specific register */
/* Read-only registers */
uint64_t capareg; /* Capabilities Register */
@@ -95,9 +96,15 @@ typedef struct SDHCIState {
/* Configurable properties */
bool pending_insert_quirk; /* Quirk for Raspberry Pi card insert int */
uint32_t quirks;
+ uint8_t endianness;
uint8_t sd_spec_version;
uint8_t uhs_mode;
-} SDHCIState;
+ uint8_t vendor; /* For vendor specific functionality */
+};
+typedef struct SDHCIState SDHCIState;
+
+#define SDHCI_VENDOR_NONE 0
+#define SDHCI_VENDOR_IMX 1
/*
* Controller does not provide transfer-complete interrupt when not
@@ -109,12 +116,15 @@ typedef struct SDHCIState {
#define SDHCI_QUIRK_NO_BUSY_IRQ BIT(14)
#define TYPE_PCI_SDHCI "sdhci-pci"
-#define PCI_SDHCI(obj) OBJECT_CHECK(SDHCIState, (obj), TYPE_PCI_SDHCI)
+DECLARE_INSTANCE_CHECKER(SDHCIState, PCI_SDHCI,
+ TYPE_PCI_SDHCI)
#define TYPE_SYSBUS_SDHCI "generic-sdhci"
-#define SYSBUS_SDHCI(obj) \
- OBJECT_CHECK(SDHCIState, (obj), TYPE_SYSBUS_SDHCI)
+DECLARE_INSTANCE_CHECKER(SDHCIState, SYSBUS_SDHCI,
+ TYPE_SYSBUS_SDHCI)
#define TYPE_IMX_USDHC "imx-usdhc"
+#define TYPE_S3C_SDHCI "s3c-sdhci"
+
#endif /* SDHCI_H */
diff --git a/include/hw/sensor/emc141x_regs.h b/include/hw/sensor/emc141x_regs.h
new file mode 100644
index 0000000000..e509a43d55
--- /dev/null
+++ b/include/hw/sensor/emc141x_regs.h
@@ -0,0 +1,37 @@
+/*
+ * SMSC EMC141X temperature sensor.
+ *
+ * Browse the data sheet:
+ *
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/20005274A.pdf
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef EMC141X_REGS_H
+#define EMC141X_REGS_H
+
+#define EMC1413_DEVICE_ID 0x21
+#define EMC1414_DEVICE_ID 0x25
+#define MANUFACTURER_ID 0x5d
+#define REVISION 0x04
+
+/* the EMC141X registers */
+#define EMC141X_TEMP_HIGH0 0x00
+#define EMC141X_TEMP_HIGH1 0x01
+#define EMC141X_TEMP_HIGH2 0x23
+#define EMC141X_TEMP_HIGH3 0x2a
+#define EMC141X_TEMP_MAX_HIGH0 0x05
+#define EMC141X_TEMP_MIN_HIGH0 0x06
+#define EMC141X_TEMP_MAX_HIGH1 0x07
+#define EMC141X_TEMP_MIN_HIGH1 0x08
+#define EMC141X_TEMP_MAX_HIGH2 0x15
+#define EMC141X_TEMP_MIN_HIGH2 0x16
+#define EMC141X_TEMP_MAX_HIGH3 0x2c
+#define EMC141X_TEMP_MIN_HIGH3 0x2d
+#define EMC141X_DEVICE_ID 0xfd
+#define EMC141X_MANUFACTURER_ID 0xfe
+#define EMC141X_REVISION 0xff
+
+#endif
diff --git a/include/hw/sensor/isl_pmbus_vr.h b/include/hw/sensor/isl_pmbus_vr.h
new file mode 100644
index 0000000000..aa2c2767df
--- /dev/null
+++ b/include/hw/sensor/isl_pmbus_vr.h
@@ -0,0 +1,57 @@
+/*
+ * PMBus device for Renesas Digital Multiphase Voltage Regulators
+ *
+ * Copyright 2022 Google LLC
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_ISL_PMBUS_VR_H
+#define HW_MISC_ISL_PMBUS_VR_H
+
+#include "hw/i2c/pmbus_device.h"
+#include "qom/object.h"
+
+#define TYPE_ISL69259 "isl69259"
+#define TYPE_ISL69260 "isl69260"
+#define TYPE_RAA228000 "raa228000"
+#define TYPE_RAA229004 "raa229004"
+#define ISL_MAX_IC_DEVICE_ID_LEN 16
+
+struct ISLState {
+ PMBusDevice parent;
+
+ uint8_t ic_device_id[ISL_MAX_IC_DEVICE_ID_LEN];
+ uint8_t ic_device_id_len;
+};
+
+OBJECT_DECLARE_SIMPLE_TYPE(ISLState, ISL69260)
+
+#define ISL_CAPABILITY_DEFAULT 0x40
+#define ISL_OPERATION_DEFAULT 0x80
+#define ISL_ON_OFF_CONFIG_DEFAULT 0x16
+#define ISL_VOUT_MODE_DEFAULT 0x40
+#define ISL_VOUT_COMMAND_DEFAULT 0x0384
+#define ISL_VOUT_MAX_DEFAULT 0x08FC
+#define ISL_VOUT_MARGIN_HIGH_DEFAULT 0x0640
+#define ISL_VOUT_MARGIN_LOW_DEFAULT 0xFA
+#define ISL_VOUT_TRANSITION_RATE_DEFAULT 0x64
+#define ISL_VOUT_OV_FAULT_LIMIT_DEFAULT 0x076C
+#define ISL_OT_FAULT_LIMIT_DEFAULT 0x7D
+#define ISL_OT_WARN_LIMIT_DEFAULT 0x07D0
+#define ISL_VIN_OV_WARN_LIMIT_DEFAULT 0x36B0
+#define ISL_VIN_UV_WARN_LIMIT_DEFAULT 0x1F40
+#define ISL_IIN_OC_FAULT_LIMIT_DEFAULT 0x32
+#define ISL_TON_DELAY_DEFAULT 0x14
+#define ISL_TON_RISE_DEFAULT 0x01F4
+#define ISL_TOFF_FALL_DEFAULT 0x01F4
+#define ISL_REVISION_DEFAULT 0x33
+#define ISL_READ_VOUT_DEFAULT 1000
+#define ISL_READ_IOUT_DEFAULT 40
+#define ISL_READ_POUT_DEFAULT 4
+#define ISL_READ_TEMP_DEFAULT 25
+#define ISL_READ_VIN_DEFAULT 1100
+#define ISL_READ_IIN_DEFAULT 40
+#define ISL_READ_PIN_DEFAULT 4
+
+#endif
diff --git a/include/hw/sensor/tmp105.h b/include/hw/sensor/tmp105.h
new file mode 100644
index 0000000000..244e2989fe
--- /dev/null
+++ b/include/hw/sensor/tmp105.h
@@ -0,0 +1,55 @@
+/*
+ * Texas Instruments TMP105 Temperature Sensor
+ *
+ * Browse the data sheet:
+ *
+ * http://www.ti.com/lit/gpn/tmp105
+ *
+ * Copyright (C) 2012 Alex Horn <alex.horn@cs.ox.ac.uk>
+ * Copyright (C) 2008-2012 Andrzej Zaborowski <balrogg@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_TMP105_H
+#define QEMU_TMP105_H
+
+#include "hw/i2c/i2c.h"
+#include "hw/sensor/tmp105_regs.h"
+#include "qom/object.h"
+
+#define TYPE_TMP105 "tmp105"
+OBJECT_DECLARE_SIMPLE_TYPE(TMP105State, TMP105)
+
+/**
+ * TMP105State:
+ * @config: Bits 5 and 6 (value 32 and 64) determine the precision of the
+ * temperature. See Table 8 in the data sheet.
+ *
+ * @see_also: http://www.ti.com/lit/gpn/tmp105
+ */
+struct TMP105State {
+ /*< private >*/
+ I2CSlave i2c;
+ /*< public >*/
+
+ uint8_t len;
+ uint8_t buf[2];
+ qemu_irq pin;
+
+ uint8_t pointer;
+ uint8_t config;
+ int16_t temperature;
+ int16_t limit[2];
+ int faults;
+ uint8_t alarm;
+ /*
+ * The TMP105 initially looks for a temperature rising above T_high;
+ * once this is detected, the condition it looks for next is the
+ * temperature falling below T_low. This flag is false when initially
+ * looking for T_high, true when looking for T_low.
+ */
+ bool detect_falling;
+};
+
+#endif
diff --git a/include/hw/misc/tmp105_regs.h b/include/hw/sensor/tmp105_regs.h
index ef015ee5cf..ef015ee5cf 100644
--- a/include/hw/misc/tmp105_regs.h
+++ b/include/hw/sensor/tmp105_regs.h
diff --git a/include/hw/sh4/sh.h b/include/hw/sh4/sh.h
index 767a2df7e2..ec716cdd45 100644
--- a/include/hw/sh4/sh.h
+++ b/include/hw/sh4/sh.h
@@ -1,6 +1,31 @@
-#ifndef QEMU_SH_H
-#define QEMU_SH_H
-/* Definitions for SH board emulation. */
+/*
+ * Definitions for SH board emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#ifndef QEMU_HW_SH_H
+#define QEMU_HW_SH_H
#include "hw/sh4/sh_intc.h"
#include "target/sh4/cpu-qom.h"
@@ -10,9 +35,8 @@
/* sh7750.c */
struct SH7750State;
-struct MemoryRegion;
-struct SH7750State *sh7750_init(SuperHCPU *cpu, struct MemoryRegion *sysmem);
+struct SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem);
typedef struct {
/* The callback will be triggered if any of the designated lines change */
@@ -20,34 +44,18 @@ typedef struct {
uint16_t portbmask_trigger;
/* Return 0 if no action was taken */
int (*port_change_cb) (uint16_t porta, uint16_t portb,
- uint16_t * periph_pdtra,
- uint16_t * periph_portdira,
- uint16_t * periph_pdtrb,
- uint16_t * periph_portdirb);
+ uint16_t *periph_pdtra,
+ uint16_t *periph_portdira,
+ uint16_t *periph_pdtrb,
+ uint16_t *periph_portdirb);
} sh7750_io_device;
int sh7750_register_io_device(struct SH7750State *s,
- sh7750_io_device * device);
-/* sh_timer.c */
-#define TMU012_FEAT_TOCR (1 << 0)
-#define TMU012_FEAT_3CHAN (1 << 1)
-#define TMU012_FEAT_EXTCLK (1 << 2)
-void tmu012_init(struct MemoryRegion *sysmem, hwaddr base,
- int feat, uint32_t freq,
- qemu_irq ch0_irq, qemu_irq ch1_irq,
- qemu_irq ch2_irq0, qemu_irq ch2_irq1);
-
+ sh7750_io_device *device);
/* sh_serial.c */
+#define TYPE_SH_SERIAL "sh-serial"
#define SH_SERIAL_FEAT_SCIF (1 << 0)
-void sh_serial_init(MemoryRegion *sysmem,
- hwaddr base, int feat,
- uint32_t freq, Chardev *chr,
- qemu_irq eri_source,
- qemu_irq rxi_source,
- qemu_irq txi_source,
- qemu_irq tei_source,
- qemu_irq bri_source);
/* sh7750.c */
qemu_irq sh7750_irl(struct SH7750State *s);
diff --git a/include/hw/sh4/sh_intc.h b/include/hw/sh4/sh_intc.h
index adfedb2efc..f62d5c5e13 100644
--- a/include/hw/sh4/sh_intc.h
+++ b/include/hw/sh4/sh_intc.h
@@ -1,8 +1,7 @@
#ifndef SH_INTC_H
#define SH_INTC_H
-#include "qemu-common.h"
-#include "hw/irq.h"
+#include "exec/memory.h"
typedef unsigned char intc_enum;
@@ -59,7 +58,7 @@ struct intc_desc {
};
int sh_intc_get_pending_vector(struct intc_desc *desc, int imask);
-struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id);
+
void sh_intc_toggle_source(struct intc_source *source,
int enable_adj, int assert_adj);
diff --git a/include/hw/i386/ich9.h b/include/hw/southbridge/ich9.h
index 673d13d28f..fd01649d04 100644
--- a/include/hw/i386/ich9.h
+++ b/include/hw/southbridge/ich9.h
@@ -1,34 +1,24 @@
-#ifndef HW_ICH9_H
-#define HW_ICH9_H
+#ifndef HW_SOUTHBRIDGE_ICH9_H
+#define HW_SOUTHBRIDGE_ICH9_H
-#include "hw/hw.h"
-#include "hw/isa/isa.h"
-#include "hw/sysbus.h"
-#include "hw/i386/pc.h"
#include "hw/isa/apm.h"
-#include "hw/i386/ioapic.h"
-#include "hw/pci/pci.h"
-#include "hw/pci/pcie_host.h"
-#include "hw/pci/pci_bridge.h"
-#include "hw/acpi/acpi.h"
#include "hw/acpi/ich9.h"
-#include "hw/pci/pci_bus.h"
-
-void ich9_lpc_set_irq(void *opaque, int irq_num, int level);
-int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx);
-PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin);
-void ich9_lpc_pm_init(PCIDevice *pci_lpc, bool smm_enabled);
-I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base);
+#include "hw/intc/ioapic.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
+#include "hw/rtc/mc146818rtc.h"
+#include "exec/memory.h"
+#include "qemu/notify.h"
+#include "qom/object.h"
void ich9_generate_smi(void);
#define ICH9_CC_SIZE (16 * 1024) /* 16KB. Chipset configuration registers */
#define TYPE_ICH9_LPC_DEVICE "ICH9-LPC"
-#define ICH9_LPC_DEVICE(obj) \
- OBJECT_CHECK(ICH9LPCState, (obj), TYPE_ICH9_LPC_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(ICH9LPCState, ICH9_LPC_DEVICE)
-typedef struct ICH9LPCState {
+struct ICH9LPCState {
/* ICH9 LPC PCI to ISA bridge */
PCIDevice d;
@@ -41,6 +31,7 @@ typedef struct ICH9LPCState {
*/
uint8_t irr[PCI_SLOT_MAX][PCI_NUM_PINS];
+ MC146818RtcState rtc;
APMState apm;
ICH9LPCPMRegs pm;
uint32_t sci_level; /* track sci level */
@@ -73,17 +64,13 @@ typedef struct ICH9LPCState {
* triggers feature lockdown */
uint64_t smi_negotiated_features; /* guest-invisible, host endian */
- /* isa bus */
- ISABus *isa_bus;
MemoryRegion rcrb_mem; /* root complex register block */
Notifier machine_ready;
- qemu_irq gsi[GSI_NUM_PINS];
-} ICH9LPCState;
+ qemu_irq gsi[IOAPIC_NUM_PINS];
+};
-Object *ich9_lpc_find(void);
-
-#define Q35_MASK(bit, ms_bit, ls_bit) \
+#define ICH9_MASK(bit, ms_bit, ls_bit) \
((uint##bit##_t)(((1ULL << ((ms_bit) + 1)) - 1) & ~((1ULL << ls_bit) - 1)))
/* ICH9: Chipset Configuration Registers */
@@ -145,12 +132,13 @@ Object *ich9_lpc_find(void);
#define ICH9_LPC_NB_PIRQS 8 /* PCI A-H */
#define ICH9_LPC_PMBASE 0x40
-#define ICH9_LPC_PMBASE_BASE_ADDRESS_MASK Q35_MASK(32, 15, 7)
+#define ICH9_LPC_PMBASE_BASE_ADDRESS_MASK ICH9_MASK(32, 15, 7)
#define ICH9_LPC_PMBASE_RTE 0x1
#define ICH9_LPC_PMBASE_DEFAULT 0x1
+
#define ICH9_LPC_ACPI_CTRL 0x44
#define ICH9_LPC_ACPI_CTRL_ACPI_EN 0x80
-#define ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK Q35_MASK(8, 2, 0)
+#define ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK ICH9_MASK(8, 2, 0)
#define ICH9_LPC_ACPI_CTRL_9 0x0
#define ICH9_LPC_ACPI_CTRL_10 0x1
#define ICH9_LPC_ACPI_CTRL_11 0x2
@@ -169,7 +157,7 @@ Object *ich9_lpc_find(void);
#define ICH9_LPC_PIRQH_ROUT 0x6b
#define ICH9_LPC_PIRQ_ROUT_IRQEN 0x80
-#define ICH9_LPC_PIRQ_ROUT_MASK Q35_MASK(8, 3, 0)
+#define ICH9_LPC_PIRQ_ROUT_MASK ICH9_MASK(8, 3, 0)
#define ICH9_LPC_PIRQ_ROUT_DEFAULT 0x80
#define ICH9_LPC_GEN_PMCON_1 0xa0
@@ -179,7 +167,7 @@ Object *ich9_lpc_find(void);
#define ICH9_LPC_GEN_PMCON_LOCK 0xa6
#define ICH9_LPC_RCBA 0xf0
-#define ICH9_LPC_RCBA_BA_MASK Q35_MASK(32, 31, 14)
+#define ICH9_LPC_RCBA_BA_MASK ICH9_MASK(32, 31, 14)
#define ICH9_LPC_RCBA_EN 0x1
#define ICH9_LPC_RCBA_DEFAULT 0x0
@@ -219,7 +207,7 @@ Object *ich9_lpc_find(void);
/* D31:F3 SMBus controller */
-#define TYPE_ICH9_SMB_DEVICE "ICH9 SMB"
+#define TYPE_ICH9_SMB_DEVICE "ICH9-SMB"
#define ICH9_A2_SMB_REVISION 0x02
#define ICH9_SMB_PI 0x00
@@ -249,7 +237,11 @@ Object *ich9_lpc_find(void);
#define ICH9_SMB_HST_D1 0x06
#define ICH9_SMB_HOST_BLOCK_DB 0x07
+#define ICH9_LPC_SMI_NEGOTIATED_FEAT_PROP "x-smi-negotiated-features"
+
/* bit positions used in fw_cfg SMI feature negotiation */
#define ICH9_LPC_SMI_F_BROADCAST_BIT 0
+#define ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT 1
+#define ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT 2
-#endif /* HW_ICH9_H */
+#endif /* HW_SOUTHBRIDGE_ICH9_H */
diff --git a/include/hw/southbridge/piix.h b/include/hw/southbridge/piix.h
new file mode 100644
index 0000000000..86709ba2e4
--- /dev/null
+++ b/include/hw/southbridge/piix.h
@@ -0,0 +1,84 @@
+/*
+ * QEMU PIIX South Bridge Emulation
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ * Copyright (c) 2018 Hervé Poussineau
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HW_SOUTHBRIDGE_PIIX_H
+#define HW_SOUTHBRIDGE_PIIX_H
+
+#include "hw/pci/pci_device.h"
+#include "hw/acpi/piix4.h"
+#include "hw/ide/pci.h"
+#include "hw/rtc/mc146818rtc.h"
+#include "hw/usb/hcd-uhci.h"
+
+/* PIRQRC[A:D]: PIRQx Route Control Registers */
+#define PIIX_PIRQCA 0x60
+#define PIIX_PIRQCB 0x61
+#define PIIX_PIRQCC 0x62
+#define PIIX_PIRQCD 0x63
+
+/*
+ * Reset Control Register: PCI-accessible ISA-Compatible Register at address
+ * 0xcf9, provided by the PCI/ISA bridge (PIIX3 PCI function 0, 8086:7000).
+ */
+#define PIIX_RCR_IOPORT 0xcf9
+
+#define PIIX_NUM_PIRQS 4ULL /* PIRQ[A-D] */
+
+struct PIIXState {
+ PCIDevice dev;
+
+ /*
+ * bitmap to track pic levels.
+ * The pic level is the logical OR of all the PCI irqs mapped to it
+ * So one PIC level is tracked by PIIX_NUM_PIRQS bits.
+ *
+ * PIRQ is mapped to PIC pins, we track it by
+ * PIIX_NUM_PIRQS * ISA_NUM_IRQS = 64 bits with
+ * pic_irq * PIIX_NUM_PIRQS + pirq
+ */
+#if ISA_NUM_IRQS * PIIX_NUM_PIRQS > 64
+#error "unable to encode pic state in 64bit in pic_levels."
+#endif
+ uint64_t pic_levels;
+
+ qemu_irq cpu_intr;
+ qemu_irq isa_irqs_in[ISA_NUM_IRQS];
+
+ /* This member isn't used. Just for save/load compatibility */
+ int32_t pci_irq_levels_vmstate[PIIX_NUM_PIRQS];
+
+ MC146818RtcState rtc;
+ PCIIDEState ide;
+ UHCIState uhci;
+ PIIX4PMState pm;
+
+ uint32_t smb_io_base;
+
+ /* Reset Control Register contents */
+ uint8_t rcr;
+
+ /* IO memory region for Reset Control Register (PIIX_RCR_IOPORT) */
+ MemoryRegion rcr_mem;
+
+ bool has_acpi;
+ bool has_pic;
+ bool has_pit;
+ bool has_usb;
+ bool smm_enabled;
+};
+
+#define TYPE_PIIX_PCI_DEVICE "pci-piix"
+OBJECT_DECLARE_SIMPLE_TYPE(PIIXState, PIIX_PCI_DEVICE)
+
+#define TYPE_PIIX3_DEVICE "PIIX3"
+#define TYPE_PIIX4_PCI_DEVICE "piix4-isa"
+
+#endif
diff --git a/include/hw/sparc/grlib.h b/include/hw/sparc/grlib.h
deleted file mode 100644
index 61a345c269..0000000000
--- a/include/hw/sparc/grlib.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * QEMU GRLIB Components
- *
- * Copyright (c) 2010-2011 AdaCore
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef GRLIB_H
-#define GRLIB_H
-
-#include "hw/qdev.h"
-#include "hw/sysbus.h"
-
-/* Emulation of GrLib device is base on the GRLIB IP Core User's Manual:
- * http://www.gaisler.com/products/grlib/grip.pdf
- */
-
-/* IRQMP */
-
-typedef void (*set_pil_in_fn) (void *opaque, uint32_t pil_in);
-
-void grlib_irqmp_set_irq(void *opaque, int irq, int level);
-
-void grlib_irqmp_ack(DeviceState *dev, int intno);
-
-static inline
-DeviceState *grlib_irqmp_create(hwaddr base,
- CPUSPARCState *env,
- qemu_irq **cpu_irqs,
- uint32_t nr_irqs,
- set_pil_in_fn set_pil_in)
-{
- DeviceState *dev;
-
- assert(cpu_irqs != NULL);
-
- dev = qdev_create(NULL, "grlib,irqmp");
- qdev_prop_set_ptr(dev, "set_pil_in", set_pil_in);
- qdev_prop_set_ptr(dev, "set_pil_in_opaque", env);
-
- qdev_init_nofail(dev);
-
- env->irq_manager = dev;
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
-
- *cpu_irqs = qemu_allocate_irqs(grlib_irqmp_set_irq,
- dev,
- nr_irqs);
-
- return dev;
-}
-
-/* GPTimer */
-
-static inline
-DeviceState *grlib_gptimer_create(hwaddr base,
- uint32_t nr_timers,
- uint32_t freq,
- qemu_irq *cpu_irqs,
- int base_irq)
-{
- DeviceState *dev;
- int i;
-
- dev = qdev_create(NULL, "grlib,gptimer");
- qdev_prop_set_uint32(dev, "nr-timers", nr_timers);
- qdev_prop_set_uint32(dev, "frequency", freq);
- qdev_prop_set_uint32(dev, "irq-line", base_irq);
-
- qdev_init_nofail(dev);
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
-
- for (i = 0; i < nr_timers; i++) {
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, cpu_irqs[base_irq + i]);
- }
-
- return dev;
-}
-
-/* APB UART */
-
-static inline
-DeviceState *grlib_apbuart_create(hwaddr base,
- Chardev *serial,
- qemu_irq irq)
-{
- DeviceState *dev;
-
- dev = qdev_create(NULL, "grlib,apbuart");
- qdev_prop_set_chr(dev, "chrdev", serial);
-
- qdev_init_nofail(dev);
-
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
-
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
-
- return dev;
-}
-
-#endif /* GRLIB_H */
diff --git a/include/hw/sparc/sparc32_dma.h b/include/hw/sparc/sparc32_dma.h
index ab42c5421b..cde8ec02cb 100644
--- a/include/hw/sparc/sparc32_dma.h
+++ b/include/hw/sparc/sparc32_dma.h
@@ -4,14 +4,13 @@
#include "hw/sysbus.h"
#include "hw/scsi/esp.h"
#include "hw/net/lance.h"
+#include "qom/object.h"
#define DMA_REGS 4
#define TYPE_SPARC32_DMA_DEVICE "sparc32-dma-device"
-#define SPARC32_DMA_DEVICE(obj) OBJECT_CHECK(DMADeviceState, (obj), \
- TYPE_SPARC32_DMA_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(DMADeviceState, SPARC32_DMA_DEVICE)
-typedef struct DMADeviceState DMADeviceState;
struct DMADeviceState {
SysBusDevice parent_obj;
@@ -24,37 +23,34 @@ struct DMADeviceState {
};
#define TYPE_SPARC32_ESPDMA_DEVICE "sparc32-espdma"
-#define SPARC32_ESPDMA_DEVICE(obj) OBJECT_CHECK(ESPDMADeviceState, (obj), \
- TYPE_SPARC32_ESPDMA_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(ESPDMADeviceState, SPARC32_ESPDMA_DEVICE)
-typedef struct ESPDMADeviceState {
+struct ESPDMADeviceState {
DMADeviceState parent_obj;
- SysBusESPState *esp;
-} ESPDMADeviceState;
+ SysBusESPState esp;
+};
#define TYPE_SPARC32_LEDMA_DEVICE "sparc32-ledma"
-#define SPARC32_LEDMA_DEVICE(obj) OBJECT_CHECK(LEDMADeviceState, (obj), \
- TYPE_SPARC32_LEDMA_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(LEDMADeviceState, SPARC32_LEDMA_DEVICE)
-typedef struct LEDMADeviceState {
+struct LEDMADeviceState {
DMADeviceState parent_obj;
- SysBusPCNetState *lance;
-} LEDMADeviceState;
+ SysBusPCNetState lance;
+};
#define TYPE_SPARC32_DMA "sparc32-dma"
-#define SPARC32_DMA(obj) OBJECT_CHECK(SPARC32DMAState, (obj), \
- TYPE_SPARC32_DMA)
+OBJECT_DECLARE_SIMPLE_TYPE(SPARC32DMAState, SPARC32_DMA)
-typedef struct SPARC32DMAState {
+struct SPARC32DMAState {
SysBusDevice parent_obj;
MemoryRegion dmamem;
MemoryRegion ledma_alias;
- ESPDMADeviceState *espdma;
- LEDMADeviceState *ledma;
-} SPARC32DMAState;
+ ESPDMADeviceState espdma;
+ LEDMADeviceState ledma;
+};
/* sparc32_dma.c */
void ledma_memory_read(void *opaque, hwaddr addr,
diff --git a/include/hw/sparc/sparc64.h b/include/hw/sparc/sparc64.h
index 5af4344459..4ced36fb5a 100644
--- a/include/hw/sparc/sparc64.h
+++ b/include/hw/sparc/sparc64.h
@@ -1,6 +1,12 @@
+#ifndef HW_SPARC_SPARC64_H
+#define HW_SPARC_SPARC64_H
+
+#include "target/sparc/cpu-qom.h"
#define IVEC_MAX 0x40
SPARCCPU *sparc64_cpu_devinit(const char *cpu_type, uint64_t prom_addr);
void sparc64_cpu_set_ivec_irq(void *opaque, int irq, int level);
+
+#endif
diff --git a/include/hw/sparc/sun4m_iommu.h b/include/hw/sparc/sun4m_iommu.h
index 938937eb04..4e2ab34cde 100644
--- a/include/hw/sparc/sun4m_iommu.h
+++ b/include/hw/sparc/sun4m_iommu.h
@@ -25,12 +25,12 @@
#ifndef SUN4M_IOMMU_H
#define SUN4M_IOMMU_H
-#include "qemu-common.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define IOMMU_NREGS (4 * 4096 / 4)
-typedef struct IOMMUState {
+struct IOMMUState {
SysBusDevice parent_obj;
AddressSpace iommu_as;
@@ -41,10 +41,12 @@ typedef struct IOMMUState {
hwaddr iostart;
qemu_irq irq;
uint32_t version;
-} IOMMUState;
+};
+typedef struct IOMMUState IOMMUState;
#define TYPE_SUN4M_IOMMU "sun4m-iommu"
-#define SUN4M_IOMMU(obj) OBJECT_CHECK(IOMMUState, (obj), TYPE_SUN4M_IOMMU)
+DECLARE_INSTANCE_CHECKER(IOMMUState, SUN4M_IOMMU,
+ TYPE_SUN4M_IOMMU)
#define TYPE_SUN4M_IOMMU_MEMORY_REGION "sun4m-iommu-memory-region"
diff --git a/include/hw/sparc/sun4u_iommu.h b/include/hw/sparc/sun4u_iommu.h
index a760172e8e..f94566a72c 100644
--- a/include/hw/sparc/sun4u_iommu.h
+++ b/include/hw/sparc/sun4u_iommu.h
@@ -27,12 +27,12 @@
#ifndef SUN4U_IOMMU_H
#define SUN4U_IOMMU_H
-#include "qemu-common.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define IOMMU_NREGS 3
-typedef struct IOMMUState {
+struct IOMMUState {
SysBusDevice parent_obj;
AddressSpace iommu_as;
@@ -40,10 +40,12 @@ typedef struct IOMMUState {
MemoryRegion iomem;
uint64_t regs[IOMMU_NREGS];
-} IOMMUState;
+};
+typedef struct IOMMUState IOMMUState;
#define TYPE_SUN4U_IOMMU "sun4u-iommu"
-#define SUN4U_IOMMU(obj) OBJECT_CHECK(IOMMUState, (obj), TYPE_SUN4U_IOMMU)
+DECLARE_INSTANCE_CHECKER(IOMMUState, SUN4U_IOMMU,
+ TYPE_SUN4U_IOMMU)
#define TYPE_SUN4U_IOMMU_MEMORY_REGION "sun4u-iommu-memory-region"
diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h
index 1f557313fa..8e1dda556b 100644
--- a/include/hw/ssi/aspeed_smc.h
+++ b/include/hw/ssi/aspeed_smc.h
@@ -26,65 +26,41 @@
#define ASPEED_SMC_H
#include "hw/ssi/ssi.h"
-
-typedef struct AspeedSegments {
- hwaddr addr;
- uint32_t size;
-} AspeedSegments;
+#include "hw/sysbus.h"
+#include "qom/object.h"
struct AspeedSMCState;
-typedef struct AspeedSMCController {
- const char *name;
- uint8_t r_conf;
- uint8_t r_ce_ctrl;
- uint8_t r_ctrl0;
- uint8_t r_timings;
- uint8_t conf_enable_w0;
- uint8_t max_slaves;
- const AspeedSegments *segments;
- hwaddr flash_window_base;
- uint32_t flash_window_size;
- bool has_dma;
- uint32_t nregs;
-} AspeedSMCController;
+struct AspeedSMCClass;
-typedef struct AspeedSMCFlash {
- struct AspeedSMCState *controller;
+#define TYPE_ASPEED_SMC_FLASH "aspeed.smc.flash"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedSMCFlash, ASPEED_SMC_FLASH)
+struct AspeedSMCFlash {
+ SysBusDevice parent_obj;
- uint8_t id;
- uint32_t size;
+ struct AspeedSMCState *controller;
+ struct AspeedSMCClass *asc;
+ uint8_t cs;
MemoryRegion mmio;
- DeviceState *flash;
-} AspeedSMCFlash;
+};
#define TYPE_ASPEED_SMC "aspeed.smc"
-#define ASPEED_SMC(obj) OBJECT_CHECK(AspeedSMCState, (obj), TYPE_ASPEED_SMC)
-#define ASPEED_SMC_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedSMCClass, (klass), TYPE_ASPEED_SMC)
-#define ASPEED_SMC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedSMCClass, (obj), TYPE_ASPEED_SMC)
-
-typedef struct AspeedSMCClass {
- SysBusDevice parent_obj;
- const AspeedSMCController *ctrl;
-} AspeedSMCClass;
+OBJECT_DECLARE_TYPE(AspeedSMCState, AspeedSMCClass, ASPEED_SMC)
#define ASPEED_SMC_R_MAX (0x100 / 4)
+#define ASPEED_SMC_CS_MAX 5
-typedef struct AspeedSMCState {
+struct AspeedSMCState {
SysBusDevice parent_obj;
- const AspeedSMCController *ctrl;
-
MemoryRegion mmio;
+ MemoryRegion mmio_flash_container;
MemoryRegion mmio_flash;
qemu_irq irq;
- int irqline;
- uint32_t num_cs;
qemu_irq *cs_lines;
+ bool inject_failure;
SSIBus *spi;
@@ -97,7 +73,46 @@ typedef struct AspeedSMCState {
uint8_t r_timings;
uint8_t conf_enable_w0;
- AspeedSMCFlash *flashes;
-} AspeedSMCState;
+ AddressSpace flash_as;
+ MemoryRegion *dram_mr;
+ AddressSpace dram_as;
+
+ AspeedSMCFlash flashes[ASPEED_SMC_CS_MAX];
+
+ uint8_t snoop_index;
+ uint8_t snoop_dummies;
+};
+
+typedef struct AspeedSegments {
+ hwaddr addr;
+ uint32_t size;
+} AspeedSegments;
+
+struct AspeedSMCClass {
+ SysBusDeviceClass parent_obj;
+
+ uint8_t r_conf;
+ uint8_t r_ce_ctrl;
+ uint8_t r_ctrl0;
+ uint8_t r_timings;
+ uint8_t nregs_timings;
+ uint8_t conf_enable_w0;
+ uint8_t cs_num_max;
+ const uint32_t *resets;
+ const AspeedSegments *segments;
+ uint32_t segment_addr_mask;
+ hwaddr flash_window_base;
+ uint32_t flash_window_size;
+ uint32_t features;
+ hwaddr dma_flash_mask;
+ hwaddr dma_dram_mask;
+ uint32_t nregs;
+ uint32_t (*segment_to_reg)(const AspeedSMCState *s,
+ const AspeedSegments *seg);
+ void (*reg_to_segment)(const AspeedSMCState *s, uint32_t reg,
+ AspeedSegments *seg);
+ void (*dma_ctrl)(AspeedSMCState *s, uint32_t value);
+ int (*addr_width)(const AspeedSMCState *s);
+};
#endif /* ASPEED_SMC_H */
diff --git a/include/hw/ssi/bcm2835_spi.h b/include/hw/ssi/bcm2835_spi.h
new file mode 100644
index 0000000000..d3f8cec111
--- /dev/null
+++ b/include/hw/ssi/bcm2835_spi.h
@@ -0,0 +1,81 @@
+/*
+ * BCM2835 SPI Master Controller
+ *
+ * Copyright (c) 2024 Rayhan Faizel <rayhan.faizel@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/ssi/ssi.h"
+#include "qom/object.h"
+#include "qemu/fifo8.h"
+
+#define TYPE_BCM2835_SPI "bcm2835-spi"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835SPIState, BCM2835_SPI)
+
+/*
+ * Though BCM2835 documentation says FIFOs have a capacity of 16,
+ * FIFOs are actually 16 words in size or effectively 64 bytes when operating
+ * in non DMA mode.
+ */
+#define FIFO_SIZE 64
+#define FIFO_SIZE_3_4 48
+
+#define RO_MASK 0x1f0000
+
+#define BCM2835_SPI_CS 0x00
+#define BCM2835_SPI_FIFO 0x04
+#define BCM2835_SPI_CLK 0x08
+#define BCM2835_SPI_DLEN 0x0c
+#define BCM2835_SPI_LTOH 0x10
+#define BCM2835_SPI_DC 0x14
+
+#define BCM2835_SPI_CS_RXF BIT(20)
+#define BCM2835_SPI_CS_RXR BIT(19)
+#define BCM2835_SPI_CS_TXD BIT(18)
+#define BCM2835_SPI_CS_RXD BIT(17)
+#define BCM2835_SPI_CS_DONE BIT(16)
+#define BCM2835_SPI_CS_LEN BIT(13)
+#define BCM2835_SPI_CS_REN BIT(12)
+#define BCM2835_SPI_CS_INTR BIT(10)
+#define BCM2835_SPI_CS_INTD BIT(9)
+#define BCM2835_SPI_CS_DMAEN BIT(8)
+#define BCM2835_SPI_CS_TA BIT(7)
+#define BCM2835_SPI_CLEAR_RX BIT(5)
+#define BCM2835_SPI_CLEAR_TX BIT(4)
+
+struct BCM2835SPIState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ SSIBus *bus;
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t cs;
+ uint32_t clk;
+ uint32_t dlen;
+ uint32_t ltoh;
+ uint32_t dc;
+
+ Fifo8 tx_fifo;
+ Fifo8 rx_fifo;
+};
diff --git a/include/hw/ssi/ibex_spi_host.h b/include/hw/ssi/ibex_spi_host.h
new file mode 100644
index 0000000000..5bd5557b9a
--- /dev/null
+++ b/include/hw/ssi/ibex_spi_host.h
@@ -0,0 +1,92 @@
+
+/*
+ * QEMU model of the Ibex SPI Controller
+ * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/
+ *
+ * Copyright (C) 2022 Western Digital
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef IBEX_SPI_HOST_H
+#define IBEX_SPI_HOST_H
+
+#include "hw/sysbus.h"
+#include "hw/ssi/ssi.h"
+#include "qemu/fifo8.h"
+#include "qom/object.h"
+#include "qemu/timer.h"
+
+#define TYPE_IBEX_SPI_HOST "ibex-spi"
+#define IBEX_SPI_HOST(obj) \
+ OBJECT_CHECK(IbexSPIHostState, (obj), TYPE_IBEX_SPI_HOST)
+
+/* SPI Registers */
+#define IBEX_SPI_HOST_INTR_STATE (0x00 / 4) /* rw1c */
+#define IBEX_SPI_HOST_INTR_ENABLE (0x04 / 4) /* rw */
+#define IBEX_SPI_HOST_INTR_TEST (0x08 / 4) /* wo */
+#define IBEX_SPI_HOST_ALERT_TEST (0x0c / 4) /* wo */
+#define IBEX_SPI_HOST_CONTROL (0x10 / 4) /* rw */
+#define IBEX_SPI_HOST_STATUS (0x14 / 4) /* ro */
+#define IBEX_SPI_HOST_CONFIGOPTS (0x18 / 4) /* rw */
+#define IBEX_SPI_HOST_CSID (0x1c / 4) /* rw */
+#define IBEX_SPI_HOST_COMMAND (0x20 / 4) /* wo */
+/* RX/TX Modelled by FIFO */
+#define IBEX_SPI_HOST_RXDATA (0x24 / 4)
+#define IBEX_SPI_HOST_TXDATA (0x28 / 4)
+
+#define IBEX_SPI_HOST_ERROR_ENABLE (0x2c / 4) /* rw */
+#define IBEX_SPI_HOST_ERROR_STATUS (0x30 / 4) /* rw1c */
+#define IBEX_SPI_HOST_EVENT_ENABLE (0x34 / 4) /* rw */
+
+/* FIFO Len in Bytes */
+#define IBEX_SPI_HOST_TXFIFO_LEN 288
+#define IBEX_SPI_HOST_RXFIFO_LEN 256
+
+/* Max Register (Based on addr) */
+#define IBEX_SPI_HOST_MAX_REGS (IBEX_SPI_HOST_EVENT_ENABLE + 1)
+
+/* MISC */
+#define TX_INTERRUPT_TRIGGER_DELAY_NS 100
+#define BIDIRECTIONAL_TRANSFER 3
+
+typedef struct {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+ uint32_t regs[IBEX_SPI_HOST_MAX_REGS];
+ /* Multi-reg that sets config opts per CS */
+ uint32_t *config_opts;
+ Fifo8 rx_fifo;
+ Fifo8 tx_fifo;
+ QEMUTimer *fifo_trigger_handle;
+
+ qemu_irq event;
+ qemu_irq host_err;
+ uint32_t num_cs;
+ qemu_irq *cs_lines;
+ SSIBus *ssi;
+
+ /* Used to track the init status, for replicating TXDATA ghost writes */
+ bool init_status;
+} IbexSPIHostState;
+
+#endif
diff --git a/include/hw/ssi/imx_spi.h b/include/hw/ssi/imx_spi.h
index 7103953581..eeaf49bbac 100644
--- a/include/hw/ssi/imx_spi.h
+++ b/include/hw/ssi/imx_spi.h
@@ -14,6 +14,7 @@
#include "hw/ssi/ssi.h"
#include "qemu/bitops.h"
#include "qemu/fifo32.h"
+#include "qom/object.h"
#define ECSPI_FIFO_SIZE 64
@@ -76,10 +77,13 @@
#define EXTRACT(value, name) extract32(value, name##_SHIFT, name##_LENGTH)
+/* number of chip selects supported */
+#define ECSPI_NUM_CS 4
+
#define TYPE_IMX_SPI "imx.spi"
-#define IMX_SPI(obj) OBJECT_CHECK(IMXSPIState, (obj), TYPE_IMX_SPI)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXSPIState, IMX_SPI)
-typedef struct IMXSPIState {
+struct IMXSPIState {
/* <private> */
SysBusDevice parent_obj;
@@ -88,7 +92,7 @@ typedef struct IMXSPIState {
qemu_irq irq;
- qemu_irq cs_lines[4];
+ qemu_irq cs_lines[ECSPI_NUM_CS];
SSIBus *bus;
@@ -98,6 +102,6 @@ typedef struct IMXSPIState {
Fifo32 tx_fifo;
int16_t burst_length;
-} IMXSPIState;
+};
#endif /* IMX_SPI_H */
diff --git a/include/hw/ssi/mss-spi.h b/include/hw/ssi/mss-spi.h
index f0cf3243e0..ce6279c431 100644
--- a/include/hw/ssi/mss-spi.h
+++ b/include/hw/ssi/mss-spi.h
@@ -28,13 +28,14 @@
#include "hw/sysbus.h"
#include "hw/ssi/ssi.h"
#include "qemu/fifo32.h"
+#include "qom/object.h"
#define TYPE_MSS_SPI "mss-spi"
-#define MSS_SPI(obj) OBJECT_CHECK(MSSSpiState, (obj), TYPE_MSS_SPI)
+OBJECT_DECLARE_SIMPLE_TYPE(MSSSpiState, MSS_SPI)
#define R_SPI_MAX 16
-typedef struct MSSSpiState {
+struct MSSSpiState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -53,6 +54,6 @@ typedef struct MSSSpiState {
bool enabled;
uint32_t regs[R_SPI_MAX];
-} MSSSpiState;
+};
#endif /* HW_MSS_SPI_H */
diff --git a/include/hw/ssi/npcm7xx_fiu.h b/include/hw/ssi/npcm7xx_fiu.h
new file mode 100644
index 0000000000..a3a1704289
--- /dev/null
+++ b/include/hw/ssi/npcm7xx_fiu.h
@@ -0,0 +1,73 @@
+/*
+ * Nuvoton NPCM7xx Flash Interface Unit (FIU)
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_FIU_H
+#define NPCM7XX_FIU_H
+
+#include "hw/ssi/ssi.h"
+#include "hw/sysbus.h"
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_FIU_NR_REGS (0x7c / sizeof(uint32_t))
+
+typedef struct NPCM7xxFIUState NPCM7xxFIUState;
+
+/**
+ * struct NPCM7xxFIUFlash - Per-chipselect flash controller state.
+ * @direct_access: Memory region for direct flash access.
+ * @fiu: Pointer to flash controller shared state.
+ */
+typedef struct NPCM7xxFIUFlash {
+ MemoryRegion direct_access;
+ NPCM7xxFIUState *fiu;
+} NPCM7xxFIUFlash;
+
+/**
+ * NPCM7xxFIUState - Device state for one Flash Interface Unit.
+ * @parent: System bus device.
+ * @mmio: Memory region for register access.
+ * @cs_count: Number of flash chips that may be connected to this module.
+ * @active_cs: Currently active chip select, or -1 if no chip is selected.
+ * @cs_lines: GPIO lines that may be wired to flash chips.
+ * @flash: Array of @cs_count per-flash-chip state objects.
+ * @spi: The SPI bus mastered by this controller.
+ * @regs: Register contents.
+ *
+ * Each FIU has a shared bank of registers, and controls up to four chip
+ * selects. Each chip select has a dedicated memory region which may be used to
+ * read and write the flash connected to that chip select as if it were memory.
+ */
+struct NPCM7xxFIUState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+
+ int32_t cs_count;
+ int32_t active_cs;
+ qemu_irq *cs_lines;
+ NPCM7xxFIUFlash *flash;
+
+ SSIBus *spi;
+
+ uint32_t regs[NPCM7XX_FIU_NR_REGS];
+};
+
+#define TYPE_NPCM7XX_FIU "npcm7xx-fiu"
+#define NPCM7XX_FIU(obj) OBJECT_CHECK(NPCM7xxFIUState, (obj), TYPE_NPCM7XX_FIU)
+
+#endif /* NPCM7XX_FIU_H */
diff --git a/include/hw/ssi/npcm_pspi.h b/include/hw/ssi/npcm_pspi.h
new file mode 100644
index 0000000000..37cc784d96
--- /dev/null
+++ b/include/hw/ssi/npcm_pspi.h
@@ -0,0 +1,53 @@
+/*
+ * Nuvoton Peripheral SPI Module
+ *
+ * Copyright 2023 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM_PSPI_H
+#define NPCM_PSPI_H
+
+#include "hw/ssi/ssi.h"
+#include "hw/sysbus.h"
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM_PSPI_NR_REGS 3
+
+/**
+ * NPCMPSPIState - Device state for one Flash Interface Unit.
+ * @parent: System bus device.
+ * @mmio: Memory region for register access.
+ * @spi: The SPI bus mastered by this controller.
+ * @regs: Register contents.
+ * @irq: The interrupt request queue for this module.
+ *
+ * Each PSPI has a shared bank of registers, and controls up to four chip
+ * selects. Each chip select has a dedicated memory region which may be used to
+ * read and write the flash connected to that chip select as if it were memory.
+ */
+typedef struct NPCMPSPIState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+
+ SSIBus *spi;
+ uint16_t regs[NPCM_PSPI_NR_REGS];
+ qemu_irq irq;
+} NPCMPSPIState;
+
+#define TYPE_NPCM_PSPI "npcm-pspi"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCMPSPIState, NPCM_PSPI)
+
+#endif /* NPCM_PSPI_H */
diff --git a/include/hw/ssi/pl022.h b/include/hw/ssi/pl022.h
index a080519366..25d58db5f3 100644
--- a/include/hw/ssi/pl022.h
+++ b/include/hw/ssi/pl022.h
@@ -9,9 +9,10 @@
* (at your option) any later version.
*/
-/* This is a model of the Arm PrimeCell PL022 synchronous serial port.
+/*
+ * This is a model of the Arm PrimeCell PL022 synchronous serial port.
* The PL022 TRM is:
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0194h/DDI0194H_ssp_pl022_trm.pdf
+ * https://developer.arm.com/documentation/ddi0194/latest
*
* QEMU interface:
* + sysbus IRQ: SSPINTR combined interrupt line
@@ -22,11 +23,12 @@
#define HW_SSI_PL022_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_PL022 "pl022"
-#define PL022(obj) OBJECT_CHECK(PL022State, (obj), TYPE_PL022)
+OBJECT_DECLARE_SIMPLE_TYPE(PL022State, PL022)
-typedef struct PL022State {
+struct PL022State {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -46,6 +48,6 @@ typedef struct PL022State {
uint16_t rx_fifo[8];
qemu_irq irq;
SSIBus *ssi;
-} PL022State;
+};
#endif
diff --git a/include/hw/ssi/sifive_spi.h b/include/hw/ssi/sifive_spi.h
new file mode 100644
index 0000000000..d0c40cdb11
--- /dev/null
+++ b/include/hw/ssi/sifive_spi.h
@@ -0,0 +1,50 @@
+/*
+ * QEMU model of the SiFive SPI Controller
+ *
+ * Copyright (c) 2021 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SIFIVE_SPI_H
+#define HW_SIFIVE_SPI_H
+
+#include "qemu/fifo8.h"
+#include "hw/sysbus.h"
+
+#define SIFIVE_SPI_REG_NUM (0x78 / 4)
+
+#define TYPE_SIFIVE_SPI "sifive.spi"
+#define SIFIVE_SPI(obj) OBJECT_CHECK(SiFiveSPIState, (obj), TYPE_SIFIVE_SPI)
+
+typedef struct SiFiveSPIState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+ qemu_irq irq;
+
+ uint32_t num_cs;
+ qemu_irq *cs_lines;
+
+ SSIBus *spi;
+
+ Fifo8 tx_fifo;
+ Fifo8 rx_fifo;
+
+ uint32_t regs[SIFIVE_SPI_REG_NUM];
+} SiFiveSPIState;
+
+#endif /* HW_SIFIVE_SPI_H */
diff --git a/include/hw/ssi/ssi.h b/include/hw/ssi/ssi.h
index 6a0c3c3cdb..3cdcbd5390 100644
--- a/include/hw/ssi/ssi.h
+++ b/include/hw/ssi/ssi.h
@@ -1,29 +1,26 @@
/* QEMU Synchronous Serial Interface support. */
-/* In principle SSI is a point-point interface. As such the qemu
- implementation has a single slave device on a "bus".
- However it is fairly common for boards to have multiple slaves
- connected to a single master, and select devices with an external
- chip select. This is implemented in qemu by having an explicit mux device.
- It is assumed that master and slave are both using the same transfer width.
- */
+/*
+ * In principle SSI is a point-point interface. As such the qemu
+ * implementation has a single peripheral on a "bus".
+ * However it is fairly common for boards to have multiple peripherals
+ * connected to a single master, and select devices with an external
+ * chip select. This is implemented in qemu by having an explicit mux device.
+ * It is assumed that master and peripheral are both using the same transfer
+ * width.
+ */
#ifndef QEMU_SSI_H
#define QEMU_SSI_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
-typedef struct SSISlave SSISlave;
-typedef struct SSISlaveClass SSISlaveClass;
typedef enum SSICSMode SSICSMode;
-#define TYPE_SSI_SLAVE "ssi-slave"
-#define SSI_SLAVE(obj) \
- OBJECT_CHECK(SSISlave, (obj), TYPE_SSI_SLAVE)
-#define SSI_SLAVE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SSISlaveClass, (klass), TYPE_SSI_SLAVE)
-#define SSI_SLAVE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SSISlaveClass, (obj), TYPE_SSI_SLAVE)
+#define TYPE_SSI_PERIPHERAL "ssi-peripheral"
+OBJECT_DECLARE_TYPE(SSIPeripheral, SSIPeripheralClass,
+ SSI_PERIPHERAL)
#define SSI_GPIO_CS "ssi-gpio-cs"
@@ -33,21 +30,21 @@ enum SSICSMode {
SSI_CS_HIGH,
};
-/* Slave devices. */
-struct SSISlaveClass {
+/* Peripherals. */
+struct SSIPeripheralClass {
DeviceClass parent_class;
- void (*realize)(SSISlave *dev, Error **errp);
+ void (*realize)(SSIPeripheral *dev, Error **errp);
/* if you have standard or no CS behaviour, just override transfer.
* This is called when the device cs is active (true by default).
*/
- uint32_t (*transfer)(SSISlave *dev, uint32_t val);
+ uint32_t (*transfer)(SSIPeripheral *dev, uint32_t val);
/* called when the CS line changes. Optional, devices only need to implement
* this if they have side effects associated with the cs line (beyond
* tristating the txrx lines).
*/
- int (*set_cs)(SSISlave *dev, bool select);
+ int (*set_cs)(SSIPeripheral *dev, bool select);
/* define whether or not CS exists and is active low/high */
SSICSMode cs_polarity;
@@ -56,41 +53,65 @@ struct SSISlaveClass {
* cs_polarity are unused if this is overwritten. Transfer_raw will
* always be called for the device for every txrx access to the parent bus
*/
- uint32_t (*transfer_raw)(SSISlave *dev, uint32_t val);
+ uint32_t (*transfer_raw)(SSIPeripheral *dev, uint32_t val);
};
-struct SSISlave {
+struct SSIPeripheral {
DeviceState parent_obj;
+ /* cache the class */
+ SSIPeripheralClass *spc;
+
/* Chip select state */
bool cs;
-};
-#define FROM_SSI_SLAVE(type, dev) DO_UPCAST(type, ssidev, dev)
+ /* Chip select index */
+ uint8_t cs_index;
+};
-extern const VMStateDescription vmstate_ssi_slave;
+extern const VMStateDescription vmstate_ssi_peripheral;
-#define VMSTATE_SSI_SLAVE(_field, _state) { \
+#define VMSTATE_SSI_PERIPHERAL(_field, _state) { \
.name = (stringify(_field)), \
- .size = sizeof(SSISlave), \
- .vmsd = &vmstate_ssi_slave, \
+ .size = sizeof(SSIPeripheral), \
+ .vmsd = &vmstate_ssi_peripheral, \
.flags = VMS_STRUCT, \
- .offset = vmstate_offset_value(_state, _field, SSISlave), \
+ .offset = vmstate_offset_value(_state, _field, SSIPeripheral), \
}
-DeviceState *ssi_create_slave(SSIBus *bus, const char *name);
-DeviceState *ssi_create_slave_no_init(SSIBus *bus, const char *name);
+DeviceState *ssi_create_peripheral(SSIBus *bus, const char *name);
+/**
+ * ssi_realize_and_unref: realize and unref an SSI peripheral
+ * @dev: SSI peripheral to realize
+ * @bus: SSI bus to put it on
+ * @errp: error pointer
+ *
+ * Call 'realize' on @dev, put it on the specified @bus, and drop the
+ * reference to it. Errors are reported via @errp and by returning
+ * false.
+ *
+ * This function is useful if you have created @dev via qdev_new()
+ * (which takes a reference to the device it returns to you), so that
+ * you can set properties on it before realizing it. If you don't need
+ * to set properties then ssi_create_peripheral() is probably better (as it
+ * does the create, init and realize in one step).
+ *
+ * If you are embedding the SSI peripheral into another QOM device and
+ * initialized it via some variant on object_initialize_child() then
+ * do not use this function, because that family of functions arrange
+ * for the only reference to the child device to be held by the parent
+ * via the child<> property, and so the reference-count-drop done here
+ * would be incorrect. (Instead you would want ssi_realize(), which
+ * doesn't currently exist but would be trivial to create if we had
+ * any code that wanted it.)
+ */
+bool ssi_realize_and_unref(DeviceState *dev, SSIBus *bus, Error **errp);
/* Master interface. */
SSIBus *ssi_create_bus(DeviceState *parent, const char *name);
uint32_t ssi_transfer(SSIBus *bus, uint32_t val);
-/* Automatically connect all children nodes a spi controller as slaves */
-void ssi_auto_connect_slaves(DeviceState *parent, qemu_irq *cs_lines,
- SSIBus *bus);
-
-/* max111x.c */
-void max111x_set_input(DeviceState *dev, int line, uint8_t value);
+DeviceState *ssi_get_cs(SSIBus *bus, uint8_t cs_index);
#endif
diff --git a/include/hw/ssi/stm32f2xx_spi.h b/include/hw/ssi/stm32f2xx_spi.h
index 1cd73e4cd4..3683b4ad32 100644
--- a/include/hw/ssi/stm32f2xx_spi.h
+++ b/include/hw/ssi/stm32f2xx_spi.h
@@ -26,8 +26,8 @@
#define HW_STM32F2XX_SPI_H
#include "hw/sysbus.h"
-#include "hw/hw.h"
#include "hw/ssi/ssi.h"
+#include "qom/object.h"
#define STM_SPI_CR1 0x00
#define STM_SPI_CR2 0x04
@@ -45,10 +45,9 @@
#define STM_SPI_SR_RXNE 1
#define TYPE_STM32F2XX_SPI "stm32f2xx-spi"
-#define STM32F2XX_SPI(obj) \
- OBJECT_CHECK(STM32F2XXSPIState, (obj), TYPE_STM32F2XX_SPI)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F2XXSPIState, STM32F2XX_SPI)
-typedef struct {
+struct STM32F2XXSPIState {
/* <private> */
SysBusDevice parent_obj;
@@ -67,6 +66,6 @@ typedef struct {
qemu_irq irq;
SSIBus *ssi;
-} STM32F2XXSPIState;
+};
#endif /* HW_STM32F2XX_SPI_H */
diff --git a/include/hw/ssi/xilinx_spips.h b/include/hw/ssi/xilinx_spips.h
index a0a0ae7584..7a754bf67a 100644
--- a/include/hw/ssi/xilinx_spips.h
+++ b/include/hw/ssi/xilinx_spips.h
@@ -28,11 +28,15 @@
#include "hw/ssi/ssi.h"
#include "qemu/fifo32.h"
#include "hw/stream.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
typedef struct XilinxSPIPS XilinxSPIPS;
+/* For SPIPS, QSPIPS. */
#define XLNX_SPIPS_R_MAX (0x100 / 4)
-#define XLNX_ZYNQMP_SPIPS_R_MAX (0x830 / 4)
+/* For ZYNQMP_QSPIPS. */
+#define XLNX_ZYNQMP_SPIPS_R_MAX (0x200 / 4)
/* Bite off 4k chunks at a time */
#define LQSPI_CACHE_SIZE 1024
@@ -84,24 +88,25 @@ struct XilinxSPIPS {
bool man_start_com;
};
-typedef struct {
+struct XilinxQSPIPS {
XilinxSPIPS parent_obj;
uint8_t lqspi_buf[LQSPI_CACHE_SIZE];
hwaddr lqspi_cached_addr;
Error *migration_blocker;
bool mmio_execution_enabled;
-} XilinxQSPIPS;
+};
+typedef struct XilinxQSPIPS XilinxQSPIPS;
-typedef struct {
+struct XlnxZynqMPQSPIPS {
XilinxQSPIPS parent_obj;
- StreamSlave *dma;
+ StreamSink *dma;
int gqspi_irqline;
uint32_t regs[XLNX_ZYNQMP_SPIPS_R_MAX];
- /* GQSPI has seperate tx/rx fifos */
+ /* GQSPI has separate tx/rx fifos */
Fifo8 rx_fifo_g;
Fifo8 tx_fifo_g;
Fifo32 fifo_g;
@@ -116,32 +121,26 @@ typedef struct {
bool man_start_com_g;
uint32_t dma_burst_size;
uint8_t dma_buf[QSPI_DMA_MAX_BURST_SIZE];
-} XlnxZynqMPQSPIPS;
+};
-typedef struct XilinxSPIPSClass {
+struct XilinxSPIPSClass {
SysBusDeviceClass parent_class;
const MemoryRegionOps *reg_ops;
+ uint64_t reg_size;
uint32_t rx_fifo_size;
uint32_t tx_fifo_size;
-} XilinxSPIPSClass;
+};
#define TYPE_XILINX_SPIPS "xlnx.ps7-spi"
#define TYPE_XILINX_QSPIPS "xlnx.ps7-qspi"
#define TYPE_XLNX_ZYNQMP_QSPIPS "xlnx.usmp-gqspi"
-#define XILINX_SPIPS(obj) \
- OBJECT_CHECK(XilinxSPIPS, (obj), TYPE_XILINX_SPIPS)
-#define XILINX_SPIPS_CLASS(klass) \
- OBJECT_CLASS_CHECK(XilinxSPIPSClass, (klass), TYPE_XILINX_SPIPS)
-#define XILINX_SPIPS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XilinxSPIPSClass, (obj), TYPE_XILINX_SPIPS)
+OBJECT_DECLARE_TYPE(XilinxSPIPS, XilinxSPIPSClass, XILINX_SPIPS)
-#define XILINX_QSPIPS(obj) \
- OBJECT_CHECK(XilinxQSPIPS, (obj), TYPE_XILINX_QSPIPS)
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxQSPIPS, XILINX_QSPIPS)
-#define XLNX_ZYNQMP_QSPIPS(obj) \
- OBJECT_CHECK(XlnxZynqMPQSPIPS, (obj), TYPE_XLNX_ZYNQMP_QSPIPS)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPQSPIPS, XLNX_ZYNQMP_QSPIPS)
#endif /* XILINX_SPIPS_H */
diff --git a/include/hw/ssi/xlnx-versal-ospi.h b/include/hw/ssi/xlnx-versal-ospi.h
new file mode 100644
index 0000000000..4ac975aa2f
--- /dev/null
+++ b/include/hw/ssi/xlnx-versal-ospi.h
@@ -0,0 +1,111 @@
+/*
+ * Header file for the Xilinx Versal's OSPI controller
+ *
+ * Copyright (C) 2021 Xilinx Inc
+ * Written by Francisco Iglesias <francisco.iglesias@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This is a model of Xilinx Versal's Octal SPI flash memory controller
+ * documented in Versal's Technical Reference manual [1] and the Versal ACAP
+ * Register reference [2].
+ *
+ * References:
+ *
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/OSPI-Module
+ *
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion for the device's registers
+ * + sysbus MMIO region 1: MemoryRegion for flash memory linear address space
+ * (data transfer).
+ * + sysbus IRQ 0: Device interrupt.
+ * + Named GPIO input "ospi-mux-sel": 0: enables indirect access mode
+ * and 1: enables direct access mode.
+ * + Property "dac-with-indac": Allow both direct accesses and indirect
+ * accesses simultaneously.
+ * + Property "indac-write-disabled": Disable indirect access writes.
+ */
+
+#ifndef XLNX_VERSAL_OSPI_H
+#define XLNX_VERSAL_OSPI_H
+
+#include "hw/register.h"
+#include "hw/ssi/ssi.h"
+#include "qemu/fifo8.h"
+#include "hw/dma/xlnx_csu_dma.h"
+
+#define TYPE_XILINX_VERSAL_OSPI "xlnx.versal-ospi"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalOspi, XILINX_VERSAL_OSPI)
+
+#define XILINX_VERSAL_OSPI_R_MAX (0xfc / 4 + 1)
+
+/*
+ * Indirect operations
+ */
+typedef struct IndOp {
+ uint32_t flash_addr;
+ uint32_t num_bytes;
+ uint32_t done_bytes;
+ bool completed;
+} IndOp;
+
+struct XlnxVersalOspi {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ MemoryRegion iomem_dac;
+
+ uint8_t num_cs;
+ qemu_irq *cs_lines;
+
+ SSIBus *spi;
+
+ Fifo8 rx_fifo;
+ Fifo8 tx_fifo;
+
+ Fifo8 rx_sram;
+ Fifo8 tx_sram;
+
+ qemu_irq irq;
+
+ XlnxCSUDMA *dma_src;
+ bool ind_write_disabled;
+ bool dac_with_indac;
+ bool dac_enable;
+ bool src_dma_inprog;
+
+ IndOp rd_ind_op[2];
+ IndOp wr_ind_op[2];
+
+ uint32_t regs[XILINX_VERSAL_OSPI_R_MAX];
+ RegisterInfo regs_info[XILINX_VERSAL_OSPI_R_MAX];
+
+ /* Maximum inferred membank size is 512 bytes */
+ uint8_t stig_membank[512];
+};
+
+#endif /* XLNX_VERSAL_OSPI_H */
diff --git a/include/hw/stream.h b/include/hw/stream.h
index 15774f07ab..f166facb09 100644
--- a/include/hw/stream.h
+++ b/include/hw/stream.h
@@ -1,54 +1,52 @@
#ifndef STREAM_H
#define STREAM_H
-#include "qemu-common.h"
#include "qom/object.h"
-/* stream slave. Used until qdev provides a generic way. */
-#define TYPE_STREAM_SLAVE "stream-slave"
+#define TYPE_STREAM_SINK "stream-sink"
-#define STREAM_SLAVE_CLASS(klass) \
- OBJECT_CLASS_CHECK(StreamSlaveClass, (klass), TYPE_STREAM_SLAVE)
-#define STREAM_SLAVE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(StreamSlaveClass, (obj), TYPE_STREAM_SLAVE)
-#define STREAM_SLAVE(obj) \
- INTERFACE_CHECK(StreamSlave, (obj), TYPE_STREAM_SLAVE)
+typedef struct StreamSinkClass StreamSinkClass;
+DECLARE_CLASS_CHECKERS(StreamSinkClass, STREAM_SINK,
+ TYPE_STREAM_SINK)
+#define STREAM_SINK(obj) \
+ INTERFACE_CHECK(StreamSink, (obj), TYPE_STREAM_SINK)
-typedef struct StreamSlave StreamSlave;
+typedef struct StreamSink StreamSink;
typedef void (*StreamCanPushNotifyFn)(void *opaque);
-typedef struct StreamSlaveClass {
+struct StreamSinkClass {
InterfaceClass parent;
/**
- * can push - determine if a stream slave is capable of accepting at least
+ * can push - determine if a stream sink is capable of accepting at least
* one byte of data. Returns false if cannot accept. If not implemented, the
- * slave is assumed to always be capable of receiving.
- * @notify: Optional callback that the slave will call when the slave is
+ * sink is assumed to always be capable of receiving.
+ * @notify: Optional callback that the sink will call when the sink is
* capable of receiving again. Only called if false is returned.
* @notify_opaque: opaque data to pass to notify call.
*/
- bool (*can_push)(StreamSlave *obj, StreamCanPushNotifyFn notify,
+ bool (*can_push)(StreamSink *obj, StreamCanPushNotifyFn notify,
void *notify_opaque);
/**
- * push - push data to a Stream slave. The number of bytes pushed is
- * returned. If the slave short returns, the master must wait before trying
- * again, the slave may continue to just return 0 waiting for the vm time to
+ * push - push data to a Stream sink. The number of bytes pushed is
+ * returned. If the sink short returns, the master must wait before trying
+ * again, the sink may continue to just return 0 waiting for the vm time to
* advance. The can_push() function can be used to trap the point in time
- * where the slave is ready to receive again, otherwise polling on a QEMU
+ * where the sink is ready to receive again, otherwise polling on a QEMU
* timer will work.
- * @obj: Stream slave to push to
+ * @obj: Stream sink to push to
* @buf: Data to write
* @len: Maximum number of bytes to write
+ * @eop: End of packet flag
*/
- size_t (*push)(StreamSlave *obj, unsigned char *buf, size_t len);
-} StreamSlaveClass;
+ size_t (*push)(StreamSink *obj, unsigned char *buf, size_t len, bool eop);
+};
size_t
-stream_push(StreamSlave *sink, uint8_t *buf, size_t len);
+stream_push(StreamSink *sink, uint8_t *buf, size_t len, bool eop);
bool
-stream_can_push(StreamSlave *sink, StreamCanPushNotifyFn notify,
+stream_can_push(StreamSink *sink, StreamCanPushNotifyFn notify,
void *notify_opaque);
diff --git a/include/hw/sysbus.h b/include/hw/sysbus.h
index 1aedcf05c9..3cb29a480e 100644
--- a/include/hw/sysbus.h
+++ b/include/hw/sysbus.h
@@ -3,31 +3,24 @@
/* Devices attached directly to the main system bus. */
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
#include "exec/memory.h"
+#include "qom/object.h"
#define QDEV_MAX_MMIO 32
#define QDEV_MAX_PIO 32
#define TYPE_SYSTEM_BUS "System"
-#define SYSTEM_BUS(obj) OBJECT_CHECK(BusState, (obj), TYPE_SYSTEM_BUS)
+DECLARE_INSTANCE_CHECKER(BusState, SYSTEM_BUS,
+ TYPE_SYSTEM_BUS)
-typedef struct SysBusDevice SysBusDevice;
#define TYPE_SYS_BUS_DEVICE "sys-bus-device"
-#define SYS_BUS_DEVICE(obj) \
- OBJECT_CHECK(SysBusDevice, (obj), TYPE_SYS_BUS_DEVICE)
-#define SYS_BUS_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SysBusDeviceClass, (klass), TYPE_SYS_BUS_DEVICE)
-#define SYS_BUS_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SysBusDeviceClass, (obj), TYPE_SYS_BUS_DEVICE)
+OBJECT_DECLARE_TYPE(SysBusDevice, SysBusDeviceClass,
+ SYS_BUS_DEVICE)
/**
* SysBusDeviceClass:
- * @init: Callback function invoked when the #DeviceState.realized property
- * is changed to %true. Deprecated, new types inheriting directly from
- * TYPE_SYS_BUS_DEVICE should use #DeviceClass.realize instead, new leaf
- * types should consult their respective parent type.
*
* SysBusDeviceClass is not overriding #DeviceClass.realize, so derived
* classes overriding it are not required to invoke its implementation.
@@ -35,7 +28,7 @@ typedef struct SysBusDevice SysBusDevice;
#define SYSBUS_DEVICE_GPIO_IRQ "sysbus-irq"
-typedef struct SysBusDeviceClass {
+struct SysBusDeviceClass {
/*< private >*/
DeviceClass parent_class;
@@ -56,7 +49,7 @@ typedef struct SysBusDeviceClass {
*/
char *(*explicit_ofw_unit_address)(const SysBusDevice *dev);
void (*connect_irq_notifier)(SysBusDevice *dev, qemu_irq irq);
-} SysBusDeviceClass;
+};
struct SysBusDevice {
/*< private >*/
@@ -89,26 +82,10 @@ qemu_irq sysbus_get_connected_irq(SysBusDevice *dev, int n);
void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr);
void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr,
int priority);
-void sysbus_add_io(SysBusDevice *dev, hwaddr addr,
- MemoryRegion *mem);
-MemoryRegion *sysbus_address_space(SysBusDevice *dev);
+void sysbus_mmio_unmap(SysBusDevice *dev, int n);
-/**
- * sysbus_init_child_obj:
- * @parent: The parent object
- * @childname: Used as name of the "child<>" property in the parent
- * @child: A pointer to the memory to be used for the object.
- * @childsize: The maximum size available at @child for the object.
- * @childtype: The name of the type of the object to instantiate.
- *
- * This function will initialize an object and attach it to the main system
- * bus. The memory for the object should have already been allocated. The
- * object will then be added as child to the given parent. The returned object
- * has a reference count of 1 (for the "child<...>" property from the parent),
- * so the object will be finalized automatically when the parent gets removed.
- */
-void sysbus_init_child_obj(Object *parent, const char *childname, void *child,
- size_t childsize, const char *childtype);
+bool sysbus_realize(SysBusDevice *dev, Error **errp);
+bool sysbus_realize_and_unref(SysBusDevice *dev, Error **errp);
/* Call func for every dynamically created sysbus device in the system */
void foreach_dynamic_sysbus_device(FindSysbusDeviceFunc *func, void *opaque);
@@ -116,8 +93,7 @@ void foreach_dynamic_sysbus_device(FindSysbusDeviceFunc *func, void *opaque);
/* Legacy helper function for creating devices. */
DeviceState *sysbus_create_varargs(const char *name,
hwaddr addr, ...);
-DeviceState *sysbus_try_create_varargs(const char *name,
- hwaddr addr, ...);
+
static inline DeviceState *sysbus_create_simple(const char *name,
hwaddr addr,
qemu_irq irq)
@@ -125,11 +101,4 @@ static inline DeviceState *sysbus_create_simple(const char *name,
return sysbus_create_varargs(name, addr, irq, NULL);
}
-static inline DeviceState *sysbus_try_create_simple(const char *name,
- hwaddr addr,
- qemu_irq irq)
-{
- return sysbus_try_create_varargs(name, addr, irq, NULL);
-}
-
#endif /* HW_SYSBUS_H */
diff --git a/include/hw/timer/a9gtimer.h b/include/hw/timer/a9gtimer.h
index 81c4388784..6ae9122e4b 100644
--- a/include/hw/timer/a9gtimer.h
+++ b/include/hw/timer/a9gtimer.h
@@ -24,11 +24,12 @@
#define A9GTIMER_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define A9_GTIMER_MAX_CPUS 4
#define TYPE_A9_GTIMER "arm.cortex-a9-global-timer"
-#define A9_GTIMER(obj) OBJECT_CHECK(A9GTimerState, (obj), TYPE_A9_GTIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(A9GTimerState, A9_GTIMER)
#define R_COUNTER_LO 0x00
#define R_COUNTER_HI 0x04
@@ -55,7 +56,6 @@
#define R_AUTO_INCREMENT 0x18
typedef struct A9GTimerPerCPU A9GTimerPerCPU;
-typedef struct A9GTimerState A9GTimerState;
struct A9GTimerPerCPU {
A9GTimerState *parent;
diff --git a/include/hw/timer/allwinner-a10-pit.h b/include/hw/timer/allwinner-a10-pit.h
index c0cc3e2169..8435758ad6 100644
--- a/include/hw/timer/allwinner-a10-pit.h
+++ b/include/hw/timer/allwinner-a10-pit.h
@@ -2,9 +2,11 @@
#define ALLWINNER_A10_PIT_H
#include "hw/ptimer.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_AW_A10_PIT "allwinner-A10-timer"
-#define AW_A10_PIT(obj) OBJECT_CHECK(AwA10PITState, (obj), TYPE_AW_A10_PIT)
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10PITState, AW_A10_PIT)
#define AW_A10_PIT_TIMER_NR 6
#define AW_A10_PIT_TIMER_IRQ 0x1
@@ -35,7 +37,6 @@
#define AW_A10_PIT_DEFAULT_CLOCK 0x4
-typedef struct AwA10PITState AwA10PITState;
typedef struct AwA10TimerContext {
AwA10PITState *container;
diff --git a/include/hw/timer/arm_mptimer.h b/include/hw/timer/arm_mptimer.h
index c46d8d2309..65a96e2a0d 100644
--- a/include/hw/timer/arm_mptimer.h
+++ b/include/hw/timer/arm_mptimer.h
@@ -22,6 +22,7 @@
#define HW_TIMER_ARM_MPTIMER_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define ARM_MPTIMER_MAX_CPUS 4
@@ -35,10 +36,9 @@ typedef struct {
} TimerBlock;
#define TYPE_ARM_MPTIMER "arm_mptimer"
-#define ARM_MPTIMER(obj) \
- OBJECT_CHECK(ARMMPTimerState, (obj), TYPE_ARM_MPTIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(ARMMPTimerState, ARM_MPTIMER)
-typedef struct {
+struct ARMMPTimerState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -46,6 +46,6 @@ typedef struct {
uint32_t num_cpu;
TimerBlock timerblock[ARM_MPTIMER_MAX_CPUS];
MemoryRegion iomem;
-} ARMMPTimerState;
+};
#endif
diff --git a/include/hw/timer/armv7m_systick.h b/include/hw/timer/armv7m_systick.h
index cca04defd8..ee09b13881 100644
--- a/include/hw/timer/armv7m_systick.h
+++ b/include/hw/timer/armv7m_systick.h
@@ -13,12 +13,26 @@
#define HW_TIMER_ARMV7M_SYSTICK_H
#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "hw/ptimer.h"
+#include "hw/clock.h"
#define TYPE_SYSTICK "armv7m_systick"
-#define SYSTICK(obj) OBJECT_CHECK(SysTickState, (obj), TYPE_SYSTICK)
+OBJECT_DECLARE_SIMPLE_TYPE(SysTickState, SYSTICK)
-typedef struct SysTickState {
+/*
+ * QEMU interface:
+ * + sysbus MMIO region 0 is the register interface (covering
+ * the registers which are mapped at address 0xE000E010)
+ * + sysbus IRQ 0 is the interrupt line to the NVIC
+ * + Clock input "refclk" is the external reference clock
+ * (used when SYST_CSR.CLKSOURCE == 0)
+ * + Clock input "cpuclk" is the main CPU clock
+ * (used when SYST_CSR.CLKSOURCE == 1)
+ */
+
+struct SysTickState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -26,9 +40,11 @@ typedef struct SysTickState {
uint32_t control;
uint32_t reload;
int64_t tick;
- QEMUTimer *timer;
+ ptimer_state *ptimer;
MemoryRegion iomem;
qemu_irq irq;
-} SysTickState;
+ Clock *refclk;
+ Clock *cpuclk;
+};
#endif
diff --git a/include/hw/timer/aspeed_timer.h b/include/hw/timer/aspeed_timer.h
index 1fb949e167..07dc6b6f2c 100644
--- a/include/hw/timer/aspeed_timer.h
+++ b/include/hw/timer/aspeed_timer.h
@@ -24,10 +24,15 @@
#include "qemu/timer.h"
#include "hw/misc/aspeed_scu.h"
+#include "qom/object.h"
-#define ASPEED_TIMER(obj) \
- OBJECT_CHECK(AspeedTimerCtrlState, (obj), TYPE_ASPEED_TIMER);
#define TYPE_ASPEED_TIMER "aspeed.timer"
+OBJECT_DECLARE_TYPE(AspeedTimerCtrlState, AspeedTimerClass, ASPEED_TIMER)
+#define TYPE_ASPEED_2400_TIMER TYPE_ASPEED_TIMER "-ast2400"
+#define TYPE_ASPEED_2500_TIMER TYPE_ASPEED_TIMER "-ast2500"
+#define TYPE_ASPEED_2600_TIMER TYPE_ASPEED_TIMER "-ast2600"
+#define TYPE_ASPEED_1030_TIMER TYPE_ASPEED_TIMER "-ast1030"
+
#define ASPEED_TIMER_NR_TIMERS 8
typedef struct AspeedTimer {
@@ -46,7 +51,7 @@ typedef struct AspeedTimer {
uint64_t start;
} AspeedTimer;
-typedef struct AspeedTimerCtrlState {
+struct AspeedTimerCtrlState {
/*< private >*/
SysBusDevice parent;
@@ -55,9 +60,19 @@ typedef struct AspeedTimerCtrlState {
uint32_t ctrl;
uint32_t ctrl2;
+ uint32_t ctrl3;
+ uint32_t irq_sts;
AspeedTimer timers[ASPEED_TIMER_NR_TIMERS];
AspeedSCUState *scu;
-} AspeedTimerCtrlState;
+};
+
+
+struct AspeedTimerClass {
+ SysBusDeviceClass parent_class;
+
+ uint64_t (*read)(AspeedTimerCtrlState *s, hwaddr offset);
+ void (*write)(AspeedTimerCtrlState *s, hwaddr offset, uint64_t value);
+};
#endif /* ASPEED_TIMER_H */
diff --git a/include/hw/timer/avr_timer16.h b/include/hw/timer/avr_timer16.h
new file mode 100644
index 0000000000..a1a032a24d
--- /dev/null
+++ b/include/hw/timer/avr_timer16.h
@@ -0,0 +1,93 @@
+/*
+ * AVR 16-bit timer
+ *
+ * Copyright (c) 2018 University of Kent
+ * Author: Ed Robbins
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+/*
+ * Driver for 16 bit timers on 8 bit AVR devices.
+ * Note:
+ * On ATmega640/V-1280/V-1281/V-2560/V-2561/V timers 1, 3, 4 and 5 are 16 bit
+ */
+
+#ifndef HW_TIMER_AVR_TIMER16_H
+#define HW_TIMER_AVR_TIMER16_H
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+
+enum NextInterrupt {
+ OVERFLOW,
+ COMPA,
+ COMPB,
+ COMPC,
+ CAPT
+};
+
+#define TYPE_AVR_TIMER16 "avr-timer16"
+OBJECT_DECLARE_SIMPLE_TYPE(AVRTimer16State, AVR_TIMER16)
+
+struct AVRTimer16State {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+ MemoryRegion imsk_iomem;
+ MemoryRegion ifr_iomem;
+ QEMUTimer *timer;
+ qemu_irq capt_irq;
+ qemu_irq compa_irq;
+ qemu_irq compb_irq;
+ qemu_irq compc_irq;
+ qemu_irq ovf_irq;
+
+ bool enabled;
+
+ /* registers */
+ uint8_t cra;
+ uint8_t crb;
+ uint8_t crc;
+ uint8_t cntl;
+ uint8_t cnth;
+ uint8_t icrl;
+ uint8_t icrh;
+ uint8_t ocral;
+ uint8_t ocrah;
+ uint8_t ocrbl;
+ uint8_t ocrbh;
+ uint8_t ocrcl;
+ uint8_t ocrch;
+ /*
+ * Reads and writes to CNT and ICR utilise a bizarre temporary
+ * register, which we emulate
+ */
+ uint8_t rtmp;
+ uint8_t imsk;
+ uint8_t ifr;
+
+ uint8_t id;
+ uint64_t cpu_freq_hz;
+ uint64_t freq_hz;
+ uint64_t period_ns;
+ uint64_t reset_time_ns;
+ enum NextInterrupt next_interrupt;
+};
+
+#endif /* HW_TIMER_AVR_TIMER16_H */
diff --git a/include/hw/timer/bcm2835_systmr.h b/include/hw/timer/bcm2835_systmr.h
new file mode 100644
index 0000000000..a8f605beeb
--- /dev/null
+++ b/include/hw/timer/bcm2835_systmr.h
@@ -0,0 +1,42 @@
+/*
+ * BCM2835 SYS timer emulation
+ *
+ * Copyright (c) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef BCM2835_SYSTMR_H
+#define BCM2835_SYSTMR_H
+
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+
+#define TYPE_BCM2835_SYSTIMER "bcm2835-sys-timer"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835SystemTimerState, BCM2835_SYSTIMER)
+
+#define BCM2835_SYSTIMER_COUNT 4
+
+typedef struct {
+ unsigned id;
+ QEMUTimer timer;
+ qemu_irq irq;
+ BCM2835SystemTimerState *state;
+} BCM2835SystemTimerCompare;
+
+struct BCM2835SystemTimerState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ struct {
+ uint32_t ctrl_status;
+ uint32_t compare[BCM2835_SYSTIMER_COUNT];
+ } reg;
+ BCM2835SystemTimerCompare tmr[BCM2835_SYSTIMER_COUNT];
+};
+
+#endif
diff --git a/include/hw/timer/cadence_ttc.h b/include/hw/timer/cadence_ttc.h
new file mode 100644
index 0000000000..e1251383f2
--- /dev/null
+++ b/include/hw/timer/cadence_ttc.h
@@ -0,0 +1,54 @@
+/*
+ * Xilinx Zynq cadence TTC model
+ *
+ * Copyright (c) 2011 Xilinx Inc.
+ * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com)
+ * Copyright (c) 2012 PetaLogix Pty Ltd.
+ * Written By Haibing Ma
+ * M. Habib
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HW_TIMER_CADENCE_TTC_H
+#define HW_TIMER_CADENCE_TTC_H
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+
+typedef struct {
+ QEMUTimer *timer;
+ int freq;
+
+ uint32_t reg_clock;
+ uint32_t reg_count;
+ uint32_t reg_value;
+ uint16_t reg_interval;
+ uint16_t reg_match[3];
+ uint32_t reg_intr;
+ uint32_t reg_intr_en;
+ uint32_t reg_event_ctrl;
+ uint32_t reg_event;
+
+ uint64_t cpu_time;
+ unsigned int cpu_time_valid;
+
+ qemu_irq irq;
+} CadenceTimerState;
+
+#define TYPE_CADENCE_TTC "cadence_ttc"
+OBJECT_DECLARE_SIMPLE_TYPE(CadenceTTCState, CADENCE_TTC)
+
+struct CadenceTTCState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ CadenceTimerState timer[3];
+};
+
+#endif
diff --git a/include/hw/timer/cmsdk-apb-dualtimer.h b/include/hw/timer/cmsdk-apb-dualtimer.h
index 9843a9dbb1..f3ec86c00b 100644
--- a/include/hw/timer/cmsdk-apb-dualtimer.h
+++ b/include/hw/timer/cmsdk-apb-dualtimer.h
@@ -16,7 +16,7 @@
* https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit
*
* QEMU interface:
- * + QOM property "pclk-frq": frequency at which the timer is clocked
+ * + Clock input "TIMCLK": clock (for both timers)
* + sysbus MMIO region 0: the register bank
* + sysbus IRQ 0: combined timer interrupt TIMINTC
* + sysbus IRO 1: timer block 1 interrupt TIMINT1
@@ -28,12 +28,12 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_CMSDK_APB_DUALTIMER "cmsdk-apb-dualtimer"
-#define CMSDK_APB_DUALTIMER(obj) OBJECT_CHECK(CMSDKAPBDualTimer, (obj), \
- TYPE_CMSDK_APB_DUALTIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(CMSDKAPBDualTimer, CMSDK_APB_DUALTIMER)
-typedef struct CMSDKAPBDualTimer CMSDKAPBDualTimer;
/* One of the two identical timer modules in the dual-timer module */
typedef struct CMSDKAPBDualTimerModule {
@@ -62,7 +62,7 @@ struct CMSDKAPBDualTimer {
/*< public >*/
MemoryRegion iomem;
qemu_irq timerintc;
- uint32_t pclk_frq;
+ Clock *timclk;
CMSDKAPBDualTimerModule timermod[CMSDK_APB_DUALTIMER_NUM_MODULES];
uint32_t timeritcr;
diff --git a/include/hw/timer/cmsdk-apb-timer.h b/include/hw/timer/cmsdk-apb-timer.h
index f21686d26b..2dd615d1be 100644
--- a/include/hw/timer/cmsdk-apb-timer.h
+++ b/include/hw/timer/cmsdk-apb-timer.h
@@ -14,46 +14,32 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_CMSDK_APB_TIMER "cmsdk-apb-timer"
-#define CMSDK_APB_TIMER(obj) OBJECT_CHECK(CMSDKAPBTIMER, (obj), \
- TYPE_CMSDK_APB_TIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(CMSDKAPBTimer, CMSDK_APB_TIMER)
-typedef struct {
+/*
+ * QEMU interface:
+ * + Clock input "pclk": clock for the timer
+ * + sysbus MMIO region 0: the register bank
+ * + sysbus IRQ 0: timer interrupt TIMERINT
+ */
+struct CMSDKAPBTimer {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
qemu_irq timerint;
- uint32_t pclk_frq;
struct ptimer_state *timer;
+ Clock *pclk;
uint32_t ctrl;
uint32_t value;
uint32_t reload;
uint32_t intstatus;
-} CMSDKAPBTIMER;
-
-/**
- * cmsdk_apb_timer_create - convenience function to create TYPE_CMSDK_APB_TIMER
- * @addr: location in system memory to map registers
- * @pclk_frq: frequency in Hz of the PCLK clock (used for calculating baud rate)
- */
-static inline DeviceState *cmsdk_apb_timer_create(hwaddr addr,
- qemu_irq timerint,
- uint32_t pclk_frq)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_create(NULL, TYPE_CMSDK_APB_TIMER);
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_uint32(dev, "pclk-frq", pclk_frq);
- qdev_init_nofail(dev);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, timerint);
- return dev;
-}
+};
#endif
diff --git a/include/hw/timer/digic-timer.h b/include/hw/timer/digic-timer.h
index d9e67fe291..da82fb4663 100644
--- a/include/hw/timer/digic-timer.h
+++ b/include/hw/timer/digic-timer.h
@@ -20,9 +20,10 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "qom/object.h"
#define TYPE_DIGIC_TIMER "digic-timer"
-#define DIGIC_TIMER(obj) OBJECT_CHECK(DigicTimerState, (obj), TYPE_DIGIC_TIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(DigicTimerState, DIGIC_TIMER)
#define DIGIC_TIMER_CONTROL 0x00
#define DIGIC_TIMER_CONTROL_RST 0x80000000
@@ -30,7 +31,7 @@
#define DIGIC_TIMER_RELVALUE 0x08
#define DIGIC_TIMER_VALUE 0x0c
-typedef struct DigicTimerState {
+struct DigicTimerState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -40,6 +41,6 @@ typedef struct DigicTimerState {
uint32_t control;
uint32_t relvalue;
-} DigicTimerState;
+};
#endif /* HW_TIMER_DIGIC_TIMER_H */
diff --git a/include/hw/timer/grlib_gptimer.h b/include/hw/timer/grlib_gptimer.h
new file mode 100644
index 0000000000..e56f1b8bf3
--- /dev/null
+++ b/include/hw/timer/grlib_gptimer.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU GRLIB GPTimer
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2024 AdaCore
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef GRLIB_GPTIMER_H
+#define GRLIB_GPTIMER_H
+
+#define TYPE_GRLIB_GPTIMER "grlib-gptimer"
+
+#endif
diff --git a/include/hw/timer/hpet.h b/include/hw/timer/hpet.h
index f04c4d3238..d17a8d4319 100644
--- a/include/hw/timer/hpet.h
+++ b/include/hw/timer/hpet.h
@@ -78,6 +78,8 @@ extern struct hpet_fw_config hpet_cfg;
#define TYPE_HPET "hpet"
+#define HPET_INTCAP "hpet-intcap"
+
static inline bool hpet_find(void)
{
return object_resolve_path_type("", TYPE_HPET, NULL);
diff --git a/include/hw/timer/i8254.h b/include/hw/timer/i8254.h
index 5b12eb918e..8402caad30 100644
--- a/include/hw/timer/i8254.h
+++ b/include/hw/timer/i8254.h
@@ -25,9 +25,10 @@
#ifndef HW_I8254_H
#define HW_I8254_H
-#include "hw/hw.h"
-#include "hw/qdev.h"
+#include "hw/qdev-properties.h"
#include "hw/isa/isa.h"
+#include "qapi/error.h"
+#include "qom/object.h"
#define PIT_FREQ 1193182
@@ -39,12 +40,7 @@ typedef struct PITChannelInfo {
} PITChannelInfo;
#define TYPE_PIT_COMMON "pit-common"
-#define PIT_COMMON(obj) \
- OBJECT_CHECK(PITCommonState, (obj), TYPE_PIT_COMMON)
-#define PIT_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(PITCommonClass, (klass), TYPE_PIT_COMMON)
-#define PIT_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PITCommonClass, (obj), TYPE_PIT_COMMON)
+OBJECT_DECLARE_TYPE(PITCommonState, PITCommonClass, PIT_COMMON)
#define TYPE_I8254 "isa-pit"
#define TYPE_KVM_I8254 "kvm-pit"
@@ -55,12 +51,13 @@ static inline ISADevice *i8254_pit_init(ISABus *bus, int base, int isa_irq,
DeviceState *dev;
ISADevice *d;
- d = isa_create(bus, TYPE_I8254);
+ d = isa_new(TYPE_I8254);
dev = DEVICE(d);
qdev_prop_set_uint32(dev, "iobase", base);
- qdev_init_nofail(dev);
+ isa_realize_and_unref(d, bus, &error_fatal);
qdev_connect_gpio_out(dev, 0,
- isa_irq >= 0 ? isa_get_irq(d, isa_irq) : alt_irq);
+ isa_irq >= 0 ? isa_bus_get_irq(bus, isa_irq)
+ : alt_irq);
return d;
}
@@ -70,10 +67,10 @@ static inline ISADevice *kvm_pit_init(ISABus *bus, int base)
DeviceState *dev;
ISADevice *d;
- d = isa_create(bus, TYPE_KVM_I8254);
+ d = isa_new(TYPE_KVM_I8254);
dev = DEVICE(d);
qdev_prop_set_uint32(dev, "iobase", base);
- qdev_init_nofail(dev);
+ isa_realize_and_unref(d, bus, &error_fatal);
return d;
}
diff --git a/include/hw/timer/i8254_internal.h b/include/hw/timer/i8254_internal.h
index c37a438f82..1761deb4cf 100644
--- a/include/hw/timer/i8254_internal.h
+++ b/include/hw/timer/i8254_internal.h
@@ -25,8 +25,8 @@
#ifndef QEMU_I8254_INTERNAL_H
#define QEMU_I8254_INTERNAL_H
-#include "hw/hw.h"
#include "hw/isa/isa.h"
+#include "hw/timer/i8254.h"
#include "qemu/timer.h"
typedef struct PITChannelState {
@@ -50,22 +50,22 @@ typedef struct PITChannelState {
uint32_t irq_disabled;
} PITChannelState;
-typedef struct PITCommonState {
+struct PITCommonState {
ISADevice dev;
MemoryRegion ioports;
uint32_t iobase;
PITChannelState channels[3];
-} PITCommonState;
+};
-typedef struct PITCommonClass {
- ISADeviceClass parent_class;
+struct PITCommonClass {
+ DeviceClass parent_class;
void (*set_channel_gate)(PITCommonState *s, PITChannelState *sc, int val);
void (*get_channel_info)(PITCommonState *s, PITChannelState *sc,
PITChannelInfo *info);
void (*pre_save)(PITCommonState *s);
void (*post_load)(PITCommonState *s);
-} PITCommonClass;
+};
int pit_get_out(PITChannelState *s, int64_t current_time);
int64_t pit_get_next_transition_time(PITChannelState *s, int64_t current_time);
diff --git a/include/hw/timer/ibex_timer.h b/include/hw/timer/ibex_timer.h
new file mode 100644
index 0000000000..41f5c82a92
--- /dev/null
+++ b/include/hw/timer/ibex_timer.h
@@ -0,0 +1,55 @@
+/*
+ * QEMU lowRISC Ibex Timer device
+ *
+ * Copyright (c) 2021 Western Digital
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_IBEX_TIMER_H
+#define HW_IBEX_TIMER_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_IBEX_TIMER "ibex-timer"
+OBJECT_DECLARE_SIMPLE_TYPE(IbexTimerState, IBEX_TIMER)
+
+struct IbexTimerState {
+ /* <private> */
+ SysBusDevice parent_obj;
+ uint64_t mtimecmp;
+ QEMUTimer *mtimer; /* Internal timer for M-mode interrupt */
+
+ /* <public> */
+ MemoryRegion mmio;
+
+ uint32_t timer_ctrl;
+ uint32_t timer_cfg0;
+ uint32_t timer_compare_lower0;
+ uint32_t timer_compare_upper0;
+ uint32_t timer_intr_enable;
+ uint32_t timer_intr_state;
+
+ uint32_t timebase_freq;
+
+ qemu_irq irq;
+
+ qemu_irq m_timer_irq;
+};
+#endif /* HW_IBEX_TIMER_H */
diff --git a/include/hw/timer/imx_epit.h b/include/hw/timer/imx_epit.h
index 0730ac35e6..79aff0cec2 100644
--- a/include/hw/timer/imx_epit.h
+++ b/include/hw/timer/imx_epit.h
@@ -32,6 +32,7 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
/*
* EPIT: Enhanced periodic interrupt timer
@@ -42,7 +43,7 @@
#define CR_OCIEN (1 << 2)
#define CR_RLD (1 << 3)
#define CR_PRESCALE_SHIFT (4)
-#define CR_PRESCALE_MASK (0xfff)
+#define CR_PRESCALE_BITS (12)
#define CR_SWR (1 << 16)
#define CR_IOVW (1 << 17)
#define CR_DBGEN (1 << 18)
@@ -50,14 +51,16 @@
#define CR_DOZEN (1 << 20)
#define CR_STOPEN (1 << 21)
#define CR_CLKSRC_SHIFT (24)
-#define CR_CLKSRC_MASK (0x3 << CR_CLKSRC_SHIFT)
+#define CR_CLKSRC_BITS (2)
+
+#define SR_OCIF (1 << 0)
#define EPIT_TIMER_MAX 0XFFFFFFFFUL
#define TYPE_IMX_EPIT "imx.epit"
-#define IMX_EPIT(obj) OBJECT_CHECK(IMXEPITState, (obj), TYPE_IMX_EPIT)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXEPITState, IMX_EPIT)
-typedef struct IMXEPITState{
+struct IMXEPITState {
/*< private >*/
SysBusDevice parent_obj;
@@ -71,10 +74,8 @@ typedef struct IMXEPITState{
uint32_t sr;
uint32_t lr;
uint32_t cmp;
- uint32_t cnt;
- uint32_t freq;
qemu_irq irq;
-} IMXEPITState;
+};
#endif /* IMX_EPIT_H */
diff --git a/include/hw/timer/imx_gpt.h b/include/hw/timer/imx_gpt.h
index 20ccb327c4..5a1230da35 100644
--- a/include/hw/timer/imx_gpt.h
+++ b/include/hw/timer/imx_gpt.h
@@ -32,6 +32,7 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
/*
* GPT : General purpose timer
@@ -77,13 +78,16 @@
#define TYPE_IMX25_GPT "imx25.gpt"
#define TYPE_IMX31_GPT "imx31.gpt"
#define TYPE_IMX6_GPT "imx6.gpt"
+#define TYPE_IMX6UL_GPT "imx6ul.gpt"
#define TYPE_IMX7_GPT "imx7.gpt"
#define TYPE_IMX_GPT TYPE_IMX25_GPT
-#define IMX_GPT(obj) OBJECT_CHECK(IMXGPTState, (obj), TYPE_IMX_GPT)
+typedef struct IMXGPTState IMXGPTState;
+DECLARE_INSTANCE_CHECKER(IMXGPTState, IMX_GPT,
+ TYPE_IMX_GPT)
-typedef struct IMXGPTState{
+struct IMXGPTState {
/*< private >*/
SysBusDevice parent_obj;
@@ -111,6 +115,6 @@ typedef struct IMXGPTState{
qemu_irq irq;
const IMXClk *clocks;
-} IMXGPTState;
+};
#endif /* IMX_GPT_H */
diff --git a/include/hw/timer/m48t59.h b/include/hw/timer/m48t59.h
deleted file mode 100644
index 6f8db04fce..0000000000
--- a/include/hw/timer/m48t59.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef HW_M48T59_H
-#define HW_M48T59_H
-
-#include "qemu-common.h"
-#include "qom/object.h"
-
-#define TYPE_NVRAM "nvram"
-
-#define NVRAM_CLASS(klass) \
- OBJECT_CLASS_CHECK(NvramClass, (klass), TYPE_NVRAM)
-#define NVRAM_GET_CLASS(obj) \
- OBJECT_GET_CLASS(NvramClass, (obj), TYPE_NVRAM)
-#define NVRAM(obj) \
- INTERFACE_CHECK(Nvram, (obj), TYPE_NVRAM)
-
-typedef struct Nvram Nvram;
-
-typedef struct NvramClass {
- InterfaceClass parent;
-
- uint32_t (*read)(Nvram *obj, uint32_t addr);
- void (*write)(Nvram *obj, uint32_t addr, uint32_t val);
- void (*toggle_lock)(Nvram *obj, int lock);
-} NvramClass;
-
-Nvram *m48t59_init_isa(ISABus *bus, uint32_t io_base, uint16_t size,
- int base_year, int type);
-Nvram *m48t59_init(qemu_irq IRQ, hwaddr mem_base,
- uint32_t io_base, uint16_t size, int base_year,
- int type);
-
-#endif /* HW_M48T59_H */
diff --git a/include/hw/timer/mc146818rtc.h b/include/hw/timer/mc146818rtc.h
deleted file mode 100644
index fe6ed63f71..0000000000
--- a/include/hw/timer/mc146818rtc.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef MC146818RTC_H
-#define MC146818RTC_H
-
-#include "hw/isa/isa.h"
-#include "hw/timer/mc146818rtc_regs.h"
-
-#define TYPE_MC146818_RTC "mc146818rtc"
-
-ISADevice *mc146818_rtc_init(ISABus *bus, int base_year,
- qemu_irq intercept_irq);
-void rtc_set_memory(ISADevice *dev, int addr, int val);
-int rtc_get_memory(ISADevice *dev, int addr);
-
-#endif /* MC146818RTC_H */
diff --git a/include/hw/timer/mss-timer.h b/include/hw/timer/mss-timer.h
index d15d1732f8..da38512904 100644
--- a/include/hw/timer/mss-timer.h
+++ b/include/hw/timer/mss-timer.h
@@ -27,10 +27,10 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "qom/object.h"
#define TYPE_MSS_TIMER "mss-timer"
-#define MSS_TIMER(obj) OBJECT_CHECK(MSSTimerState, \
- (obj), TYPE_MSS_TIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(MSSTimerState, MSS_TIMER)
/*
* There are two 32-bit down counting timers.
@@ -46,19 +46,18 @@
#define R_TIM1_MAX 6
struct Msf2Timer {
- QEMUBH *bh;
ptimer_state *ptimer;
uint32_t regs[R_TIM1_MAX];
qemu_irq irq;
};
-typedef struct MSSTimerState {
+struct MSSTimerState {
SysBusDevice parent_obj;
MemoryRegion mmio;
uint32_t freq_hz;
struct Msf2Timer timers[NUM_TIMERS];
-} MSSTimerState;
+};
#endif /* HW_MSS_TIMER_H */
diff --git a/include/hw/timer/npcm7xx_timer.h b/include/hw/timer/npcm7xx_timer.h
new file mode 100644
index 0000000000..d45c051b56
--- /dev/null
+++ b/include/hw/timer/npcm7xx_timer.h
@@ -0,0 +1,113 @@
+/*
+ * Nuvoton NPCM7xx Timer Controller
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_TIMER_H
+#define NPCM7XX_TIMER_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+
+/* Each Timer Module (TIM) instance holds five 25 MHz timers. */
+#define NPCM7XX_TIMERS_PER_CTRL (5)
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_TIMER_NR_REGS (0x54 / sizeof(uint32_t))
+
+/* The basic watchdog timer period is 2^14 clock cycles. */
+#define NPCM7XX_WATCHDOG_BASETIME_SHIFT 14
+
+#define NPCM7XX_WATCHDOG_RESET_GPIO_OUT "npcm7xx-clk-watchdog-reset-gpio-out"
+
+typedef struct NPCM7xxTimerCtrlState NPCM7xxTimerCtrlState;
+
+/**
+ * struct NPCM7xxBaseTimer - Basic functionality that both regular timer and
+ * watchdog timer use.
+ * @qtimer: QEMU timer that notifies us on expiration.
+ * @expires_ns: Absolute virtual expiration time.
+ * @remaining_ns: Remaining time until expiration if timer is paused.
+ */
+typedef struct NPCM7xxBaseTimer {
+ QEMUTimer qtimer;
+ int64_t expires_ns;
+ int64_t remaining_ns;
+} NPCM7xxBaseTimer;
+
+/**
+ * struct NPCM7xxTimer - Individual timer state.
+ * @ctrl: The timer module that owns this timer.
+ * @irq: GIC interrupt line to fire on expiration (if enabled).
+ * @base_timer: The basic timer functionality for this timer.
+ * @tcsr: The Timer Control and Status Register.
+ * @ticr: The Timer Initial Count Register.
+ */
+typedef struct NPCM7xxTimer {
+ NPCM7xxTimerCtrlState *ctrl;
+
+ qemu_irq irq;
+ NPCM7xxBaseTimer base_timer;
+
+ uint32_t tcsr;
+ uint32_t ticr;
+} NPCM7xxTimer;
+
+/**
+ * struct NPCM7xxWatchdogTimer - The watchdog timer state.
+ * @ctrl: The timer module that owns this timer.
+ * @irq: GIC interrupt line to fire on expiration (if enabled).
+ * @reset_signal: The GPIO used to send a reset signal.
+ * @base_timer: The basic timer functionality for this timer.
+ * @wtcr: The Watchdog Timer Control Register.
+ */
+typedef struct NPCM7xxWatchdogTimer {
+ NPCM7xxTimerCtrlState *ctrl;
+
+ qemu_irq irq;
+ qemu_irq reset_signal;
+ NPCM7xxBaseTimer base_timer;
+
+ uint32_t wtcr;
+} NPCM7xxWatchdogTimer;
+
+/**
+ * struct NPCM7xxTimerCtrlState - Timer Module device state.
+ * @parent: System bus device.
+ * @iomem: Memory region through which registers are accessed.
+ * @index: The index of this timer module.
+ * @tisr: The Timer Interrupt Status Register.
+ * @timer: The five individual timers managed by this module.
+ * @watchdog_timer: The watchdog timer managed by this module.
+ */
+struct NPCM7xxTimerCtrlState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ uint32_t tisr;
+
+ Clock *clock;
+ NPCM7xxTimer timer[NPCM7XX_TIMERS_PER_CTRL];
+ NPCM7xxWatchdogTimer watchdog_timer;
+};
+
+#define TYPE_NPCM7XX_TIMER "npcm7xx-timer"
+#define NPCM7XX_TIMER(obj) \
+ OBJECT_CHECK(NPCM7xxTimerCtrlState, (obj), TYPE_NPCM7XX_TIMER)
+
+#endif /* NPCM7XX_TIMER_H */
diff --git a/include/hw/timer/nrf51_timer.h b/include/hw/timer/nrf51_timer.h
index 85cad2300d..76827c11dc 100644
--- a/include/hw/timer/nrf51_timer.h
+++ b/include/hw/timer/nrf51_timer.h
@@ -15,8 +15,9 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
+#include "qom/object.h"
#define TYPE_NRF51_TIMER "nrf51_soc.timer"
-#define NRF51_TIMER(obj) OBJECT_CHECK(NRF51TimerState, (obj), TYPE_NRF51_TIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51TimerState, NRF51_TIMER)
#define NRF51_TIMER_REG_COUNT 4
@@ -53,12 +54,13 @@
#define NRF51_TIMER_REG_CC0 0x540
#define NRF51_TIMER_REG_CC3 0x54C
-typedef struct NRF51TimerState {
+struct NRF51TimerState {
SysBusDevice parent_obj;
MemoryRegion iomem;
qemu_irq irq;
+ uint8_t id;
QEMUTimer timer;
int64_t timer_start_ns;
int64_t update_counter_ns;
@@ -74,7 +76,7 @@ typedef struct NRF51TimerState {
uint32_t bitmode;
uint32_t prescaler;
-} NRF51TimerState;
+};
#endif
diff --git a/include/hw/timer/renesas_cmt.h b/include/hw/timer/renesas_cmt.h
new file mode 100644
index 0000000000..1c0b65c1d5
--- /dev/null
+++ b/include/hw/timer/renesas_cmt.h
@@ -0,0 +1,43 @@
+/*
+ * Renesas Compare-match timer Object
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_TIMER_RENESAS_CMT_H
+#define HW_TIMER_RENESAS_CMT_H
+
+#include "qemu/timer.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_RENESAS_CMT "renesas-cmt"
+typedef struct RCMTState RCMTState;
+DECLARE_INSTANCE_CHECKER(RCMTState, RCMT,
+ TYPE_RENESAS_CMT)
+
+enum {
+ CMT_CH = 2,
+ CMT_NR_IRQ = 1 * CMT_CH
+};
+
+struct RCMTState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ uint64_t input_freq;
+ MemoryRegion memory;
+
+ uint16_t cmstr;
+ uint16_t cmcr[CMT_CH];
+ uint16_t cmcnt[CMT_CH];
+ uint16_t cmcor[CMT_CH];
+ int64_t tick[CMT_CH];
+ qemu_irq cmi[CMT_CH];
+ QEMUTimer timer[CMT_CH];
+};
+
+#endif
diff --git a/include/hw/timer/renesas_tmr.h b/include/hw/timer/renesas_tmr.h
new file mode 100644
index 0000000000..caf7eec0dc
--- /dev/null
+++ b/include/hw/timer/renesas_tmr.h
@@ -0,0 +1,58 @@
+/*
+ * Renesas 8bit timer Object
+ *
+ * Copyright (c) 2018 Yoshinori Sato
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_TIMER_RENESAS_TMR_H
+#define HW_TIMER_RENESAS_TMR_H
+
+#include "qemu/timer.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_RENESAS_TMR "renesas-tmr"
+typedef struct RTMRState RTMRState;
+DECLARE_INSTANCE_CHECKER(RTMRState, RTMR,
+ TYPE_RENESAS_TMR)
+
+enum timer_event {
+ cmia = 0,
+ cmib = 1,
+ ovi = 2,
+ none = 3,
+ TMR_NR_EVENTS = 4
+};
+
+enum {
+ TMR_CH = 2,
+ TMR_NR_IRQ = 3 * TMR_CH
+};
+
+struct RTMRState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ uint64_t input_freq;
+ MemoryRegion memory;
+
+ int64_t tick;
+ uint8_t tcnt[TMR_CH];
+ uint8_t tcora[TMR_CH];
+ uint8_t tcorb[TMR_CH];
+ uint8_t tcr[TMR_CH];
+ uint8_t tccr[TMR_CH];
+ uint8_t tcor[TMR_CH];
+ uint8_t tcsr[TMR_CH];
+ int64_t div_round[TMR_CH];
+ uint8_t next[TMR_CH];
+ qemu_irq cmia[TMR_CH];
+ qemu_irq cmib[TMR_CH];
+ qemu_irq ovi[TMR_CH];
+ QEMUTimer timer[TMR_CH];
+};
+
+#endif
diff --git a/include/hw/timer/sifive_pwm.h b/include/hw/timer/sifive_pwm.h
new file mode 100644
index 0000000000..6a8cf7b29e
--- /dev/null
+++ b/include/hw/timer/sifive_pwm.h
@@ -0,0 +1,62 @@
+/*
+ * SiFive PWM
+ *
+ * Copyright (c) 2020 Western Digital
+ *
+ * Author: Alistair Francis <alistair.francis@wdc.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_SIFIVE_PWM_H
+#define HW_SIFIVE_PWM_H
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+
+#define TYPE_SIFIVE_PWM "sifive-pwm"
+
+#define SIFIVE_PWM(obj) \
+ OBJECT_CHECK(SiFivePwmState, (obj), TYPE_SIFIVE_PWM)
+
+#define SIFIVE_PWM_CHANS 4
+#define SIFIVE_PWM_IRQS SIFIVE_PWM_CHANS
+
+typedef struct SiFivePwmState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+ QEMUTimer timer[SIFIVE_PWM_CHANS];
+ /*
+ * if en bit(s) set, is the number of ticks when pwmcount was 0
+ * if en bit(s) not set, is the number of ticks in pwmcount
+ */
+ uint64_t tick_offset;
+ uint64_t freq_hz;
+
+ uint32_t pwmcfg;
+ uint32_t pwmcmp[SIFIVE_PWM_CHANS];
+
+ qemu_irq irqs[SIFIVE_PWM_IRQS];
+} SiFivePwmState;
+
+#endif /* HW_SIFIVE_PWM_H */
diff --git a/include/hw/timer/sse-counter.h b/include/hw/timer/sse-counter.h
new file mode 100644
index 0000000000..b433e58d37
--- /dev/null
+++ b/include/hw/timer/sse-counter.h
@@ -0,0 +1,105 @@
+/*
+ * Arm SSE Subsystem System Counter
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "System counter" which is documented in
+ * the Arm SSE-123 Example Subsystem Technical Reference Manual:
+ * https://developer.arm.com/documentation/101370/latest/
+ *
+ * QEMU interface:
+ * + Clock input "CLK": clock
+ * + sysbus MMIO region 0: the control register frame
+ * + sysbus MMIO region 1: the status register frame
+ *
+ * Consumers of the system counter's timestamp, such as the SSE
+ * System Timer device, can also use the APIs sse_counter_for_timestamp(),
+ * sse_counter_tick_to_time() and sse_counter_register_consumer() to
+ * interact with an instance of the System Counter. Generally the
+ * consumer device should have a QOM link property which the board
+ * code can set to the appropriate instance of the system counter.
+ */
+
+#ifndef SSE_COUNTER_H
+#define SSE_COUNTER_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "qemu/notify.h"
+
+#define TYPE_SSE_COUNTER "sse-counter"
+OBJECT_DECLARE_SIMPLE_TYPE(SSECounter, SSE_COUNTER)
+
+struct SSECounter {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion control_mr;
+ MemoryRegion status_mr;
+ Clock *clk;
+ NotifierList notifier_list;
+
+ uint32_t cntcr;
+ uint32_t cntscr0;
+
+ /*
+ * These are used for handling clock frequency changes: they are a
+ * tuple of (QEMU_CLOCK_VIRTUAL timestamp, CNTCV at that time),
+ * taken when the clock frequency changes. sse_cntcv() needs them
+ * to calculate the current CNTCV.
+ */
+ uint64_t ns_then;
+ uint64_t ticks_then;
+};
+
+/*
+ * These functions are the interface by which a consumer of
+ * the system timestamp (such as the SSE system timer device)
+ * can communicate with the SSECounter.
+ */
+
+/**
+ * sse_counter_for_timestamp:
+ * @counter: SSECounter
+ * @ns: timestamp of QEMU_CLOCK_VIRTUAL in nanoseconds
+ *
+ * Returns the value of the timestamp counter at the specified
+ * point in time (assuming that no changes to scale factor, enable, etc
+ * happen in the meantime).
+ */
+uint64_t sse_counter_for_timestamp(SSECounter *counter, uint64_t ns);
+
+/**
+ * sse_counter_tick_to_time:
+ * @counter: SSECounter
+ * @tick: tick value
+ *
+ * Returns the time (a QEMU_CLOCK_VIRTUAL timestamp in nanoseconds)
+ * when the timestamp counter will reach the specified tick count.
+ * If the counter is not currently running, returns UINT64_MAX.
+ */
+uint64_t sse_counter_tick_to_time(SSECounter *counter, uint64_t tick);
+
+/**
+ * sse_counter_register_consumer:
+ * @counter: SSECounter
+ * @notifier: Notifier which is notified on counter changes
+ *
+ * Registers @notifier with the SSECounter. When the counter's
+ * configuration changes in a way that might invalidate information
+ * previously returned via sse_counter_for_timestamp() or
+ * sse_counter_tick_to_time(), the notifier will be called.
+ * Devices which consume the timestamp counter can use this as
+ * a cue to recalculate timer events.
+ */
+void sse_counter_register_consumer(SSECounter *counter, Notifier *notifier);
+
+#endif
diff --git a/include/hw/timer/sse-timer.h b/include/hw/timer/sse-timer.h
new file mode 100644
index 0000000000..265ad32400
--- /dev/null
+++ b/include/hw/timer/sse-timer.h
@@ -0,0 +1,54 @@
+/*
+ * Arm SSE Subsystem System Timer
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "System timer" which is documented in
+ * the Arm SSE-123 Example Subsystem Technical Reference Manual:
+ * https://developer.arm.com/documentation/101370/latest/
+ *
+ * QEMU interface:
+ * + QOM property "counter": link property to be set to the
+ * TYPE_SSE_COUNTER timestamp counter device this timer runs off
+ * + sysbus MMIO region 0: the register bank
+ * + sysbus IRQ 0: timer interrupt
+ */
+
+#ifndef SSE_TIMER_H
+#define SSE_TIMER_H
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+#include "hw/timer/sse-counter.h"
+
+#define TYPE_SSE_TIMER "sse-timer"
+OBJECT_DECLARE_SIMPLE_TYPE(SSETimer, SSE_TIMER)
+
+struct SSETimer {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ qemu_irq irq;
+ SSECounter *counter;
+ QEMUTimer timer;
+ Notifier counter_notifier;
+
+ uint32_t cntfrq;
+ uint32_t cntp_ctl;
+ uint64_t cntp_cval;
+ uint64_t cntp_aival;
+ uint32_t cntp_aival_ctl;
+ uint32_t cntp_aival_reload;
+};
+
+#endif
diff --git a/include/hw/timer/stellaris-gptm.h b/include/hw/timer/stellaris-gptm.h
new file mode 100644
index 0000000000..fde1fc6f0c
--- /dev/null
+++ b/include/hw/timer/stellaris-gptm.h
@@ -0,0 +1,51 @@
+/*
+ * Luminary Micro Stellaris General Purpose Timer Module
+ *
+ * Copyright (c) 2006 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#ifndef HW_TIMER_STELLARIS_GPTM_H
+#define HW_TIMER_STELLARIS_GPTM_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "hw/clock.h"
+
+#define TYPE_STELLARIS_GPTM "stellaris-gptm"
+OBJECT_DECLARE_SIMPLE_TYPE(gptm_state, STELLARIS_GPTM)
+
+/*
+ * QEMU interface:
+ * + sysbus MMIO region 0: register bank
+ * + sysbus IRQ 0: timer interrupt
+ * + unnamed GPIO output 0: trigger output for the ADC
+ * + Clock input "clk": the 32-bit countdown timer runs at this speed
+ */
+struct gptm_state {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ uint32_t config;
+ uint32_t mode[2];
+ uint32_t control;
+ uint32_t state;
+ uint32_t mask;
+ uint32_t load[2];
+ uint32_t match[2];
+ uint32_t prescale[2];
+ uint32_t match_prescale[2];
+ uint32_t rtc;
+ int64_t tick[2];
+ struct gptm_state *opaque[2];
+ QEMUTimer *timer[2];
+ /* The timers have an alternate output used to trigger the ADC. */
+ qemu_irq trigger;
+ qemu_irq irq;
+ Clock *clk;
+};
+
+#endif
diff --git a/include/hw/timer/stm32f2xx_timer.h b/include/hw/timer/stm32f2xx_timer.h
index e6a83237a5..90f40f1746 100644
--- a/include/hw/timer/stm32f2xx_timer.h
+++ b/include/hw/timer/stm32f2xx_timer.h
@@ -27,7 +27,7 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
-#include "sysemu/sysemu.h"
+#include "qom/object.h"
#define TIM_CR1 0x00
#define TIM_CR2 0x04
@@ -62,10 +62,11 @@
#define TIM_DIER_UIE 1
#define TYPE_STM32F2XX_TIMER "stm32f2xx-timer"
-#define STM32F2XXTIMER(obj) OBJECT_CHECK(STM32F2XXTimerState, \
- (obj), TYPE_STM32F2XX_TIMER)
+typedef struct STM32F2XXTimerState STM32F2XXTimerState;
+DECLARE_INSTANCE_CHECKER(STM32F2XXTimerState, STM32F2XXTIMER,
+ TYPE_STM32F2XX_TIMER)
-typedef struct STM32F2XXTimerState {
+struct STM32F2XXTimerState {
/* <private> */
SysBusDevice parent_obj;
@@ -96,6 +97,6 @@ typedef struct STM32F2XXTimerState {
uint32_t tim_dcr;
uint32_t tim_dmar;
uint32_t tim_or;
-} STM32F2XXTimerState;
+};
#endif /* HW_STM32F2XX_TIMER_H */
diff --git a/include/hw/timer/sun4v-rtc.h b/include/hw/timer/sun4v-rtc.h
deleted file mode 100644
index 407278f918..0000000000
--- a/include/hw/timer/sun4v-rtc.h
+++ /dev/null
@@ -1 +0,0 @@
-void sun4v_rtc_init(hwaddr addr);
diff --git a/include/hw/timer/tmu012.h b/include/hw/timer/tmu012.h
new file mode 100644
index 0000000000..808ed8de1d
--- /dev/null
+++ b/include/hw/timer/tmu012.h
@@ -0,0 +1,23 @@
+/*
+ * SuperH Timer
+ *
+ * Copyright (c) 2007 Magnus Damm
+ *
+ * This code is licensed under the GPL.
+ */
+
+#ifndef HW_TIMER_TMU012_H
+#define HW_TIMER_TMU012_H
+
+#include "exec/hwaddr.h"
+
+#define TMU012_FEAT_TOCR (1 << 0)
+#define TMU012_FEAT_3CHAN (1 << 1)
+#define TMU012_FEAT_EXTCLK (1 << 2)
+
+void tmu012_init(MemoryRegion *sysmem, hwaddr base,
+ int feat, uint32_t freq,
+ qemu_irq ch0_irq, qemu_irq ch1_irq,
+ qemu_irq ch2_irq0, qemu_irq ch2_irq1);
+
+#endif
diff --git a/include/hw/tricore/tc27x_soc.h b/include/hw/tricore/tc27x_soc.h
new file mode 100644
index 0000000000..dd3a7485c8
--- /dev/null
+++ b/include/hw/tricore/tc27x_soc.h
@@ -0,0 +1,129 @@
+/*
+ * Infineon tc27x SoC System emulation.
+ *
+ * Copyright (c) 2020 Andreas Konopik <andreas.konopik@efs-auto.de>
+ * Copyright (c) 2020 David Brenken <david.brenken@efs-auto.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TC27X_SOC_H
+#define TC27X_SOC_H
+
+#include "hw/sysbus.h"
+#include "target/tricore/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_TC27X_SOC ("tc27x-soc")
+OBJECT_DECLARE_TYPE(TC27XSoCState, TC27XSoCClass, TC27X_SOC)
+
+typedef struct TC27XSoCCPUMemState {
+
+ MemoryRegion dspr;
+ MemoryRegion pspr;
+
+ MemoryRegion dcache;
+ MemoryRegion dtag;
+ MemoryRegion pcache;
+ MemoryRegion ptag;
+
+} TC27XSoCCPUMemState;
+
+typedef struct TC27XSoCFlashMemState {
+
+ MemoryRegion pflash0_c;
+ MemoryRegion pflash1_c;
+ MemoryRegion pflash0_u;
+ MemoryRegion pflash1_u;
+ MemoryRegion dflash0;
+ MemoryRegion dflash1;
+ MemoryRegion olda_c;
+ MemoryRegion olda_u;
+ MemoryRegion brom_c;
+ MemoryRegion brom_u;
+ MemoryRegion lmuram_c;
+ MemoryRegion lmuram_u;
+ MemoryRegion emem_c;
+ MemoryRegion emem_u;
+
+} TC27XSoCFlashMemState;
+
+typedef struct TC27XSoCState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ TriCoreCPU cpu;
+
+ MemoryRegion dsprX;
+ MemoryRegion psprX;
+
+ TC27XSoCCPUMemState cpu0mem;
+ TC27XSoCCPUMemState cpu1mem;
+ TC27XSoCCPUMemState cpu2mem;
+
+ TC27XSoCFlashMemState flashmem;
+
+} TC27XSoCState;
+
+typedef struct MemmapEntry {
+ hwaddr base;
+ hwaddr size;
+} MemmapEntry;
+
+typedef struct TC27XSoCClass {
+ DeviceClass parent_class;
+
+ const char *name;
+ const char *cpu_type;
+ const MemmapEntry *memmap;
+ uint32_t num_cpus;
+} TC27XSoCClass;
+
+enum {
+ TC27XD_DSPR2,
+ TC27XD_DCACHE2,
+ TC27XD_DTAG2,
+ TC27XD_PSPR2,
+ TC27XD_PCACHE2,
+ TC27XD_PTAG2,
+ TC27XD_DSPR1,
+ TC27XD_DCACHE1,
+ TC27XD_DTAG1,
+ TC27XD_PSPR1,
+ TC27XD_PCACHE1,
+ TC27XD_PTAG1,
+ TC27XD_DSPR0,
+ TC27XD_PSPR0,
+ TC27XD_PCACHE0,
+ TC27XD_PTAG0,
+ TC27XD_PFLASH0_C,
+ TC27XD_PFLASH1_C,
+ TC27XD_OLDA_C,
+ TC27XD_BROM_C,
+ TC27XD_LMURAM_C,
+ TC27XD_EMEM_C,
+ TC27XD_PFLASH0_U,
+ TC27XD_PFLASH1_U,
+ TC27XD_DFLASH0,
+ TC27XD_DFLASH1,
+ TC27XD_OLDA_U,
+ TC27XD_BROM_U,
+ TC27XD_LMURAM_U,
+ TC27XD_EMEM_U,
+ TC27XD_PSPRX,
+ TC27XD_DSPRX,
+};
+
+#endif
diff --git a/include/hw/tricore/triboard.h b/include/hw/tricore/triboard.h
new file mode 100644
index 0000000000..4fdd2d7d97
--- /dev/null
+++ b/include/hw/tricore/triboard.h
@@ -0,0 +1,48 @@
+/*
+ * Infineon TriBoard System emulation.
+ *
+ * Copyright (c) 2020 Andreas Konopik <andreas.konopik@efs-auto.de>
+ * Copyright (c) 2020 David Brenken <david.brenken@efs-auto.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qapi/error.h"
+#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "exec/address-spaces.h"
+#include "qom/object.h"
+
+#include "hw/tricore/tc27x_soc.h"
+
+#define TYPE_TRIBOARD_MACHINE MACHINE_TYPE_NAME("triboard")
+typedef struct TriBoardMachineState TriBoardMachineState;
+typedef struct TriBoardMachineClass TriBoardMachineClass;
+DECLARE_OBJ_CHECKERS(TriBoardMachineState, TriBoardMachineClass,
+ TRIBOARD_MACHINE, TYPE_TRIBOARD_MACHINE)
+
+
+struct TriBoardMachineState {
+ MachineState parent;
+
+ TC27XSoCState tc27x_soc;
+};
+
+struct TriBoardMachineClass {
+ MachineClass parent_obj;
+
+ const char *name;
+ const char *desc;
+ const char *soc_name;
+};
diff --git a/include/hw/tricore/tricore.h b/include/hw/tricore/tricore.h
index 89ef922c67..c19ed3f013 100644
--- a/include/hw/tricore/tricore.h
+++ b/include/hw/tricore/tricore.h
@@ -2,7 +2,6 @@
#define HW_TRICORE_H
#include "exec/memory.h"
-#include "hw/irq.h"
struct tricore_boot_info {
uint64_t ram_size;
diff --git a/include/exec/tb-context.h b/include/hw/tricore/tricore_testdevice.h
index feb585e0a7..2c57b62f22 100644
--- a/include/exec/tb-context.h
+++ b/include/hw/tricore/tricore_testdevice.h
@@ -1,7 +1,5 @@
/*
- * Internal structs that QEMU exports to TCG
- *
- * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2018-2021 Bastian Koppelmann Paderborn University
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -17,26 +15,19 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef QEMU_TB_CONTEXT_H
-#define QEMU_TB_CONTEXT_H
-
-#include "qemu/thread.h"
-#include "qemu/qht.h"
-
-#define CODE_GEN_HTABLE_BITS 15
-#define CODE_GEN_HTABLE_SIZE (1 << CODE_GEN_HTABLE_BITS)
-
-typedef struct TranslationBlock TranslationBlock;
-typedef struct TBContext TBContext;
+#ifndef HW_TRICORE_TESTDEVICE_H
+#define HW_TRICORE_TESTDEVICE_H
-struct TBContext {
+#include "hw/sysbus.h"
- struct qht htable;
+#define TYPE_TRICORE_TESTDEVICE "tricore_testdevice"
+#define TRICORE_TESTDEVICE(obj) \
+ OBJECT_CHECK(TriCoreTestDeviceState, (obj), TYPE_TRICORE_TESTDEVICE)
- /* statistics */
- unsigned tb_flush_count;
-};
+typedef struct {
+ SysBusDevice parent_obj;
-extern TBContext tb_ctx;
+ MemoryRegion iomem;
+} TriCoreTestDeviceState;
#endif
diff --git a/include/hw/unicore32/puv3.h b/include/hw/unicore32/puv3.h
deleted file mode 100644
index f587a1f622..0000000000
--- a/include/hw/unicore32/puv3.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Misc PKUnity SoC declarations
- *
- * Copyright (C) 2010-2012 Guan Xuetao
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation, or any later version.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef QEMU_HW_PUV3_H
-#define QEMU_HW_PUV3_H
-
-#define PUV3_REGS_OFFSET (0x1000) /* 4K is reasonable */
-
-/* Hardware interrupts */
-#define PUV3_IRQS_NR (32)
-
-#define PUV3_IRQS_GPIOLOW0 (0)
-#define PUV3_IRQS_GPIOLOW1 (1)
-#define PUV3_IRQS_GPIOLOW2 (2)
-#define PUV3_IRQS_GPIOLOW3 (3)
-#define PUV3_IRQS_GPIOLOW4 (4)
-#define PUV3_IRQS_GPIOLOW5 (5)
-#define PUV3_IRQS_GPIOLOW6 (6)
-#define PUV3_IRQS_GPIOLOW7 (7)
-#define PUV3_IRQS_GPIOHIGH (8)
-#define PUV3_IRQS_PS2_KBD (22)
-#define PUV3_IRQS_PS2_AUX (23)
-#define PUV3_IRQS_OST0 (26)
-
-/* All puv3_*.c use DPRINTF for debug. */
-#ifdef DEBUG_PUV3
-#define DPRINTF(fmt, ...) printf("%s: " fmt , __func__, ## __VA_ARGS__)
-#else
-#define DPRINTF(fmt, ...) do {} while (0)
-#endif
-
-#endif /* QEMU_HW_PUV3_H */
diff --git a/include/hw/usb.h b/include/hw/usb.h
index c21f41c8a9..d46d96779a 100644
--- a/include/hw/usb.h
+++ b/include/hw/usb.h
@@ -25,9 +25,12 @@
* THE SOFTWARE.
*/
-#include "hw/qdev.h"
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
#include "qemu/iov.h"
#include "qemu/queue.h"
+#include "qom/object.h"
+#include "qapi/error.h"
/* Constants related to the USB / PCI interaction */
#define USB_SBRN 0x60 /* Serial Bus Release Number Register */
@@ -64,42 +67,42 @@
//#define USB_STATE_POWERED 2
#define USB_STATE_DEFAULT 3
//#define USB_STATE_ADDRESS 4
-//#define USB_STATE_CONFIGURED 5
+//#define USB_STATE_CONFIGURED 5
#define USB_STATE_SUSPENDED 6
-#define USB_CLASS_AUDIO 1
-#define USB_CLASS_COMM 2
-#define USB_CLASS_HID 3
-#define USB_CLASS_PHYSICAL 5
-#define USB_CLASS_STILL_IMAGE 6
-#define USB_CLASS_PRINTER 7
-#define USB_CLASS_MASS_STORAGE 8
-#define USB_CLASS_HUB 9
-#define USB_CLASS_CDC_DATA 0x0a
-#define USB_CLASS_CSCID 0x0b
-#define USB_CLASS_CONTENT_SEC 0x0d
-#define USB_CLASS_APP_SPEC 0xfe
-#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_CLASS_AUDIO 1
+#define USB_CLASS_COMM 2
+#define USB_CLASS_HID 3
+#define USB_CLASS_PHYSICAL 5
+#define USB_CLASS_STILL_IMAGE 6
+#define USB_CLASS_PRINTER 7
+#define USB_CLASS_MASS_STORAGE 8
+#define USB_CLASS_HUB 9
+#define USB_CLASS_CDC_DATA 0x0a
+#define USB_CLASS_CSCID 0x0b
+#define USB_CLASS_CONTENT_SEC 0x0d
+#define USB_CLASS_APP_SPEC 0xfe
+#define USB_CLASS_VENDOR_SPEC 0xff
#define USB_SUBCLASS_UNDEFINED 0
#define USB_SUBCLASS_AUDIO_CONTROL 1
#define USB_SUBCLASS_AUDIO_STREAMING 2
#define USB_SUBCLASS_AUDIO_MIDISTREAMING 3
-#define USB_DIR_OUT 0
-#define USB_DIR_IN 0x80
+#define USB_DIR_OUT 0
+#define USB_DIR_IN 0x80
-#define USB_TYPE_MASK (0x03 << 5)
-#define USB_TYPE_STANDARD (0x00 << 5)
-#define USB_TYPE_CLASS (0x01 << 5)
-#define USB_TYPE_VENDOR (0x02 << 5)
-#define USB_TYPE_RESERVED (0x03 << 5)
+#define USB_TYPE_MASK (0x03 << 5)
+#define USB_TYPE_STANDARD (0x00 << 5)
+#define USB_TYPE_CLASS (0x01 << 5)
+#define USB_TYPE_VENDOR (0x02 << 5)
+#define USB_TYPE_RESERVED (0x03 << 5)
-#define USB_RECIP_MASK 0x1f
-#define USB_RECIP_DEVICE 0x00
-#define USB_RECIP_INTERFACE 0x01
-#define USB_RECIP_ENDPOINT 0x02
-#define USB_RECIP_OTHER 0x03
+#define USB_RECIP_MASK 0x1f
+#define USB_RECIP_DEVICE 0x00
+#define USB_RECIP_INTERFACE 0x01
+#define USB_RECIP_ENDPOINT 0x02
+#define USB_RECIP_OTHER 0x03
#define DeviceRequest ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
#define DeviceOutRequest ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
@@ -124,28 +127,28 @@
#define EndpointOutRequest \
((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
-#define USB_REQ_GET_STATUS 0x00
-#define USB_REQ_CLEAR_FEATURE 0x01
-#define USB_REQ_SET_FEATURE 0x03
-#define USB_REQ_SET_ADDRESS 0x05
-#define USB_REQ_GET_DESCRIPTOR 0x06
-#define USB_REQ_SET_DESCRIPTOR 0x07
-#define USB_REQ_GET_CONFIGURATION 0x08
-#define USB_REQ_SET_CONFIGURATION 0x09
-#define USB_REQ_GET_INTERFACE 0x0A
-#define USB_REQ_SET_INTERFACE 0x0B
-#define USB_REQ_SYNCH_FRAME 0x0C
+#define USB_REQ_GET_STATUS 0x00
+#define USB_REQ_CLEAR_FEATURE 0x01
+#define USB_REQ_SET_FEATURE 0x03
+#define USB_REQ_SET_ADDRESS 0x05
+#define USB_REQ_GET_DESCRIPTOR 0x06
+#define USB_REQ_SET_DESCRIPTOR 0x07
+#define USB_REQ_GET_CONFIGURATION 0x08
+#define USB_REQ_SET_CONFIGURATION 0x09
+#define USB_REQ_GET_INTERFACE 0x0A
+#define USB_REQ_SET_INTERFACE 0x0B
+#define USB_REQ_SYNCH_FRAME 0x0C
#define USB_REQ_SET_SEL 0x30
#define USB_REQ_SET_ISOCH_DELAY 0x31
-#define USB_DEVICE_SELF_POWERED 0
-#define USB_DEVICE_REMOTE_WAKEUP 1
+#define USB_DEVICE_SELF_POWERED 0
+#define USB_DEVICE_REMOTE_WAKEUP 1
-#define USB_DT_DEVICE 0x01
-#define USB_DT_CONFIG 0x02
-#define USB_DT_STRING 0x03
-#define USB_DT_INTERFACE 0x04
-#define USB_DT_ENDPOINT 0x05
+#define USB_DT_DEVICE 0x01
+#define USB_DT_CONFIG 0x02
+#define USB_DT_STRING 0x03
+#define USB_DT_INTERFACE 0x04
+#define USB_DT_ENDPOINT 0x05
#define USB_DT_DEVICE_QUALIFIER 0x06
#define USB_DT_OTHER_SPEED_CONFIG 0x07
#define USB_DT_DEBUG 0x0A
@@ -165,15 +168,14 @@
#define USB_CFG_ATT_WAKEUP (1 << 5)
#define USB_CFG_ATT_BATTERY (1 << 4)
-#define USB_ENDPOINT_XFER_CONTROL 0
-#define USB_ENDPOINT_XFER_ISOC 1
-#define USB_ENDPOINT_XFER_BULK 2
-#define USB_ENDPOINT_XFER_INT 3
+#define USB_ENDPOINT_XFER_CONTROL 0
+#define USB_ENDPOINT_XFER_ISOC 1
+#define USB_ENDPOINT_XFER_BULK 2
+#define USB_ENDPOINT_XFER_INT 3
#define USB_ENDPOINT_XFER_INVALID 255
#define USB_INTERFACE_INVALID 255
-typedef struct USBBus USBBus;
typedef struct USBBusOps USBBusOps;
typedef struct USBPort USBPort;
typedef struct USBDevice USBDevice;
@@ -215,10 +217,10 @@ struct USBEndpoint {
};
enum USBDeviceFlags {
- USB_DEV_FLAG_FULL_PATH,
USB_DEV_FLAG_IS_HOST,
USB_DEV_FLAG_MSOS_DESC_ENABLE,
USB_DEV_FLAG_MSOS_DESC_IN_USE,
+ USB_DEV_FLAG_IS_SCSI_STORAGE,
};
/* definition of a USB device */
@@ -230,6 +232,9 @@ struct USBDevice {
void *opaque;
uint32_t flags;
+ char *pcap_filename;
+ FILE *pcap;
+
/* Actual connected speed */
int speed;
/* Supported speeds, not in info because it may be variable (hostdevs) */
@@ -263,17 +268,12 @@ struct USBDevice {
};
#define TYPE_USB_DEVICE "usb-device"
-#define USB_DEVICE(obj) \
- OBJECT_CHECK(USBDevice, (obj), TYPE_USB_DEVICE)
-#define USB_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(USBDeviceClass, (klass), TYPE_USB_DEVICE)
-#define USB_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(USBDeviceClass, (obj), TYPE_USB_DEVICE)
+OBJECT_DECLARE_TYPE(USBDevice, USBDeviceClass, USB_DEVICE)
typedef void (*USBDeviceRealize)(USBDevice *dev, Error **errp);
-typedef void (*USBDeviceUnrealize)(USBDevice *dev, Error **errp);
+typedef void (*USBDeviceUnrealize)(USBDevice *dev);
-typedef struct USBDeviceClass {
+struct USBDeviceClass {
DeviceClass parent_class;
USBDeviceRealize realize;
@@ -345,7 +345,7 @@ typedef struct USBDeviceClass {
const char *product_desc;
const USBDesc *usb_desc;
bool attached_settable;
-} USBDeviceClass;
+};
typedef struct USBPortOps {
void (*attach)(USBPort *port);
@@ -467,46 +467,15 @@ void usb_generic_async_ctrl_complete(USBDevice *s, USBPacket *p);
/* usb-linux.c */
void hmp_info_usbhost(Monitor *mon, const QDict *qdict);
-bool usb_host_dev_is_scsi_storage(USBDevice *usbdev);
/* usb ports of the VM */
#define VM_USB_HUB_SIZE 8
-/* hw/usb/hdc-musb.c */
-
-enum musb_irq_source_e {
- musb_irq_suspend = 0,
- musb_irq_resume,
- musb_irq_rst_babble,
- musb_irq_sof,
- musb_irq_connect,
- musb_irq_disconnect,
- musb_irq_vbus_request,
- musb_irq_vbus_error,
- musb_irq_rx,
- musb_irq_tx,
- musb_set_vbus,
- musb_set_session,
- /* Add new interrupts here */
- musb_irq_max, /* total number of interrupts defined */
-};
-
-typedef struct MUSBState MUSBState;
-
-extern CPUReadMemoryFunc * const musb_read[];
-extern CPUWriteMemoryFunc * const musb_write[];
-
-MUSBState *musb_init(DeviceState *parent_device, int gpio_base);
-void musb_reset(MUSBState *s);
-uint32_t musb_core_intr_get(MUSBState *s);
-void musb_core_intr_clear(MUSBState *s, uint32_t mask);
-void musb_set_size(MUSBState *s, int epnum, int size, int is_tx);
-
/* usb-bus.c */
#define TYPE_USB_BUS "usb-bus"
-#define USB_BUS(obj) OBJECT_CHECK(USBBus, (obj), TYPE_USB_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(USBBus, USB_BUS)
struct USBBus {
BusState qbus;
@@ -529,12 +498,8 @@ struct USBBusOps {
void usb_bus_new(USBBus *bus, size_t bus_size,
USBBusOps *ops, DeviceState *host);
void usb_bus_release(USBBus *bus);
-USBBus *usb_bus_find(int busnr);
void usb_legacy_register(const char *typename, const char *usbdevice_name,
- USBDevice *(*usbdevice_init)(USBBus *bus,
- const char *params));
-USBDevice *usb_create(USBBus *bus, const char *name);
-USBDevice *usb_create_simple(USBBus *bus, const char *name);
+ USBDevice *(*usbdevice_init)(void));
USBDevice *usbdevice_create(const char *cmdline);
void usb_register_port(USBBus *bus, USBPort *port, void *opaque, int index,
USBPortOps *ops, int speedmask);
@@ -593,15 +558,48 @@ const char *usb_device_get_product_desc(USBDevice *dev);
const USBDesc *usb_device_get_usb_desc(USBDevice *dev);
+static inline bool usb_device_is_scsi_storage(USBDevice *dev)
+{
+ return dev->flags & (1 << USB_DEV_FLAG_IS_SCSI_STORAGE);
+}
+
/* quirks.c */
/* In bulk endpoints are streaming data sources (iow behave like isoc eps) */
-#define USB_QUIRK_BUFFER_BULK_IN 0x01
+#define USB_QUIRK_BUFFER_BULK_IN 0x01
/* Bulk pkts in FTDI format, need special handling when combining packets */
-#define USB_QUIRK_IS_FTDI 0x02
+#define USB_QUIRK_IS_FTDI 0x02
int usb_get_quirks(uint16_t vendor_id, uint16_t product_id,
uint8_t interface_class, uint8_t interface_subclass,
uint8_t interface_protocol);
+/* pcap.c */
+void usb_pcap_init(FILE *fp);
+void usb_pcap_ctrl(USBPacket *p, bool setup);
+void usb_pcap_data(USBPacket *p, bool setup);
+
+static inline USBDevice *usb_new(const char *name)
+{
+ return USB_DEVICE(qdev_new(name));
+}
+
+static inline USBDevice *usb_try_new(const char *name)
+{
+ return USB_DEVICE(qdev_try_new(name));
+}
+
+static inline bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp)
+{
+ return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
+}
+
+static inline USBDevice *usb_create_simple(USBBus *bus, const char *name)
+{
+ USBDevice *dev = usb_new(name);
+
+ usb_realize_and_unref(dev, bus, &error_abort);
+ return dev;
+}
+
#endif
diff --git a/include/hw/usb/chipidea.h b/include/hw/usb/chipidea.h
index 1ec2e9dbda..fe4113ee01 100644
--- a/include/hw/usb/chipidea.h
+++ b/include/hw/usb/chipidea.h
@@ -2,15 +2,16 @@
#define CHIPIDEA_H
#include "hw/usb/hcd-ehci.h"
+#include "qom/object.h"
-typedef struct ChipideaState {
+struct ChipideaState {
/*< private >*/
EHCISysBusState parent_obj;
MemoryRegion iomem[3];
-} ChipideaState;
+};
#define TYPE_CHIPIDEA "usb-chipidea"
-#define CHIPIDEA(obj) OBJECT_CHECK(ChipideaState, (obj), TYPE_CHIPIDEA)
+OBJECT_DECLARE_SIMPLE_TYPE(ChipideaState, CHIPIDEA)
#endif /* CHIPIDEA_H */
diff --git a/include/hw/usb/dwc2-regs.h b/include/hw/usb/dwc2-regs.h
new file mode 100644
index 0000000000..0bf3f2aa17
--- /dev/null
+++ b/include/hw/usb/dwc2-regs.h
@@ -0,0 +1,899 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Imported from the Linux kernel file drivers/usb/dwc2/hw.h, commit
+ * a89bae709b3492b478480a2c9734e7e9393b279c ("usb: dwc2: Move
+ * UTMI_PHY_DATA defines closer")
+ *
+ * hw.h - DesignWare HS OTG Controller hardware definitions
+ *
+ * Copyright 2004-2013 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DWC2_REGS_H
+#define DWC2_REGS_H
+
+#define HSOTG_REG(x) (x)
+
+#define GOTGCTL HSOTG_REG(0x000)
+#define GOTGCTL_CHIRPEN BIT(27)
+#define GOTGCTL_MULT_VALID_BC_MASK (0x1f << 22)
+#define GOTGCTL_MULT_VALID_BC_SHIFT 22
+#define GOTGCTL_OTGVER BIT(20)
+#define GOTGCTL_BSESVLD BIT(19)
+#define GOTGCTL_ASESVLD BIT(18)
+#define GOTGCTL_DBNC_SHORT BIT(17)
+#define GOTGCTL_CONID_B BIT(16)
+#define GOTGCTL_DBNCE_FLTR_BYPASS BIT(15)
+#define GOTGCTL_DEVHNPEN BIT(11)
+#define GOTGCTL_HSTSETHNPEN BIT(10)
+#define GOTGCTL_HNPREQ BIT(9)
+#define GOTGCTL_HSTNEGSCS BIT(8)
+#define GOTGCTL_SESREQ BIT(1)
+#define GOTGCTL_SESREQSCS BIT(0)
+
+#define GOTGINT HSOTG_REG(0x004)
+#define GOTGINT_DBNCE_DONE BIT(19)
+#define GOTGINT_A_DEV_TOUT_CHG BIT(18)
+#define GOTGINT_HST_NEG_DET BIT(17)
+#define GOTGINT_HST_NEG_SUC_STS_CHNG BIT(9)
+#define GOTGINT_SES_REQ_SUC_STS_CHNG BIT(8)
+#define GOTGINT_SES_END_DET BIT(2)
+
+#define GAHBCFG HSOTG_REG(0x008)
+#define GAHBCFG_AHB_SINGLE BIT(23)
+#define GAHBCFG_NOTI_ALL_DMA_WRIT BIT(22)
+#define GAHBCFG_REM_MEM_SUPP BIT(21)
+#define GAHBCFG_P_TXF_EMP_LVL BIT(8)
+#define GAHBCFG_NP_TXF_EMP_LVL BIT(7)
+#define GAHBCFG_DMA_EN BIT(5)
+#define GAHBCFG_HBSTLEN_MASK (0xf << 1)
+#define GAHBCFG_HBSTLEN_SHIFT 1
+#define GAHBCFG_HBSTLEN_SINGLE 0
+#define GAHBCFG_HBSTLEN_INCR 1
+#define GAHBCFG_HBSTLEN_INCR4 3
+#define GAHBCFG_HBSTLEN_INCR8 5
+#define GAHBCFG_HBSTLEN_INCR16 7
+#define GAHBCFG_GLBL_INTR_EN BIT(0)
+#define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \
+ GAHBCFG_NP_TXF_EMP_LVL | \
+ GAHBCFG_DMA_EN | \
+ GAHBCFG_GLBL_INTR_EN)
+
+#define GUSBCFG HSOTG_REG(0x00C)
+#define GUSBCFG_FORCEDEVMODE BIT(30)
+#define GUSBCFG_FORCEHOSTMODE BIT(29)
+#define GUSBCFG_TXENDDELAY BIT(28)
+#define GUSBCFG_ICTRAFFICPULLREMOVE BIT(27)
+#define GUSBCFG_ICUSBCAP BIT(26)
+#define GUSBCFG_ULPI_INT_PROT_DIS BIT(25)
+#define GUSBCFG_INDICATORPASSTHROUGH BIT(24)
+#define GUSBCFG_INDICATORCOMPLEMENT BIT(23)
+#define GUSBCFG_TERMSELDLPULSE BIT(22)
+#define GUSBCFG_ULPI_INT_VBUS_IND BIT(21)
+#define GUSBCFG_ULPI_EXT_VBUS_DRV BIT(20)
+#define GUSBCFG_ULPI_CLK_SUSP_M BIT(19)
+#define GUSBCFG_ULPI_AUTO_RES BIT(18)
+#define GUSBCFG_ULPI_FS_LS BIT(17)
+#define GUSBCFG_OTG_UTMI_FS_SEL BIT(16)
+#define GUSBCFG_PHY_LP_CLK_SEL BIT(15)
+#define GUSBCFG_USBTRDTIM_MASK (0xf << 10)
+#define GUSBCFG_USBTRDTIM_SHIFT 10
+#define GUSBCFG_HNPCAP BIT(9)
+#define GUSBCFG_SRPCAP BIT(8)
+#define GUSBCFG_DDRSEL BIT(7)
+#define GUSBCFG_PHYSEL BIT(6)
+#define GUSBCFG_FSINTF BIT(5)
+#define GUSBCFG_ULPI_UTMI_SEL BIT(4)
+#define GUSBCFG_PHYIF16 BIT(3)
+#define GUSBCFG_PHYIF8 (0 << 3)
+#define GUSBCFG_TOUTCAL_MASK (0x7 << 0)
+#define GUSBCFG_TOUTCAL_SHIFT 0
+#define GUSBCFG_TOUTCAL_LIMIT 0x7
+#define GUSBCFG_TOUTCAL(_x) ((_x) << 0)
+
+#define GRSTCTL HSOTG_REG(0x010)
+#define GRSTCTL_AHBIDLE BIT(31)
+#define GRSTCTL_DMAREQ BIT(30)
+#define GRSTCTL_TXFNUM_MASK (0x1f << 6)
+#define GRSTCTL_TXFNUM_SHIFT 6
+#define GRSTCTL_TXFNUM_LIMIT 0x1f
+#define GRSTCTL_TXFNUM(_x) ((_x) << 6)
+#define GRSTCTL_TXFFLSH BIT(5)
+#define GRSTCTL_RXFFLSH BIT(4)
+#define GRSTCTL_IN_TKNQ_FLSH BIT(3)
+#define GRSTCTL_FRMCNTRRST BIT(2)
+#define GRSTCTL_HSFTRST BIT(1)
+#define GRSTCTL_CSFTRST BIT(0)
+
+#define GINTSTS HSOTG_REG(0x014)
+#define GINTMSK HSOTG_REG(0x018)
+#define GINTSTS_WKUPINT BIT(31)
+#define GINTSTS_SESSREQINT BIT(30)
+#define GINTSTS_DISCONNINT BIT(29)
+#define GINTSTS_CONIDSTSCHNG BIT(28)
+#define GINTSTS_LPMTRANRCVD BIT(27)
+#define GINTSTS_PTXFEMP BIT(26)
+#define GINTSTS_HCHINT BIT(25)
+#define GINTSTS_PRTINT BIT(24)
+#define GINTSTS_RESETDET BIT(23)
+#define GINTSTS_FET_SUSP BIT(22)
+#define GINTSTS_INCOMPL_IP BIT(21)
+#define GINTSTS_INCOMPL_SOOUT BIT(21)
+#define GINTSTS_INCOMPL_SOIN BIT(20)
+#define GINTSTS_OEPINT BIT(19)
+#define GINTSTS_IEPINT BIT(18)
+#define GINTSTS_EPMIS BIT(17)
+#define GINTSTS_RESTOREDONE BIT(16)
+#define GINTSTS_EOPF BIT(15)
+#define GINTSTS_ISOUTDROP BIT(14)
+#define GINTSTS_ENUMDONE BIT(13)
+#define GINTSTS_USBRST BIT(12)
+#define GINTSTS_USBSUSP BIT(11)
+#define GINTSTS_ERLYSUSP BIT(10)
+#define GINTSTS_I2CINT BIT(9)
+#define GINTSTS_ULPI_CK_INT BIT(8)
+#define GINTSTS_GOUTNAKEFF BIT(7)
+#define GINTSTS_GINNAKEFF BIT(6)
+#define GINTSTS_NPTXFEMP BIT(5)
+#define GINTSTS_RXFLVL BIT(4)
+#define GINTSTS_SOF BIT(3)
+#define GINTSTS_OTGINT BIT(2)
+#define GINTSTS_MODEMIS BIT(1)
+#define GINTSTS_CURMODE_HOST BIT(0)
+
+#define GRXSTSR HSOTG_REG(0x01C)
+#define GRXSTSP HSOTG_REG(0x020)
+#define GRXSTS_FN_MASK (0x7f << 25)
+#define GRXSTS_FN_SHIFT 25
+#define GRXSTS_PKTSTS_MASK (0xf << 17)
+#define GRXSTS_PKTSTS_SHIFT 17
+#define GRXSTS_PKTSTS_GLOBALOUTNAK 1
+#define GRXSTS_PKTSTS_OUTRX 2
+#define GRXSTS_PKTSTS_HCHIN 2
+#define GRXSTS_PKTSTS_OUTDONE 3
+#define GRXSTS_PKTSTS_HCHIN_XFER_COMP 3
+#define GRXSTS_PKTSTS_SETUPDONE 4
+#define GRXSTS_PKTSTS_DATATOGGLEERR 5
+#define GRXSTS_PKTSTS_SETUPRX 6
+#define GRXSTS_PKTSTS_HCHHALTED 7
+#define GRXSTS_HCHNUM_MASK (0xf << 0)
+#define GRXSTS_HCHNUM_SHIFT 0
+#define GRXSTS_DPID_MASK (0x3 << 15)
+#define GRXSTS_DPID_SHIFT 15
+#define GRXSTS_BYTECNT_MASK (0x7ff << 4)
+#define GRXSTS_BYTECNT_SHIFT 4
+#define GRXSTS_EPNUM_MASK (0xf << 0)
+#define GRXSTS_EPNUM_SHIFT 0
+
+#define GRXFSIZ HSOTG_REG(0x024)
+#define GRXFSIZ_DEPTH_MASK (0xffff << 0)
+#define GRXFSIZ_DEPTH_SHIFT 0
+
+#define GNPTXFSIZ HSOTG_REG(0x028)
+/* Use FIFOSIZE_* constants to access this register */
+
+#define GNPTXSTS HSOTG_REG(0x02C)
+#define GNPTXSTS_NP_TXQ_TOP_MASK (0x7f << 24)
+#define GNPTXSTS_NP_TXQ_TOP_SHIFT 24
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_MASK (0xff << 16)
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_SHIFT 16
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(_v) (((_v) >> 16) & 0xff)
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_MASK (0xffff << 0)
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_SHIFT 0
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_GET(_v) (((_v) >> 0) & 0xffff)
+
+#define GI2CCTL HSOTG_REG(0x0030)
+#define GI2CCTL_BSYDNE BIT(31)
+#define GI2CCTL_RW BIT(30)
+#define GI2CCTL_I2CDATSE0 BIT(28)
+#define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26)
+#define GI2CCTL_I2CDEVADDR_SHIFT 26
+#define GI2CCTL_I2CSUSPCTL BIT(25)
+#define GI2CCTL_ACK BIT(24)
+#define GI2CCTL_I2CEN BIT(23)
+#define GI2CCTL_ADDR_MASK (0x7f << 16)
+#define GI2CCTL_ADDR_SHIFT 16
+#define GI2CCTL_REGADDR_MASK (0xff << 8)
+#define GI2CCTL_REGADDR_SHIFT 8
+#define GI2CCTL_RWDATA_MASK (0xff << 0)
+#define GI2CCTL_RWDATA_SHIFT 0
+
+#define GPVNDCTL HSOTG_REG(0x0034)
+#define GGPIO HSOTG_REG(0x0038)
+#define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16)
+
+#define GUID HSOTG_REG(0x003c)
+#define GSNPSID HSOTG_REG(0x0040)
+#define GHWCFG1 HSOTG_REG(0x0044)
+#define GSNPSID_ID_MASK GENMASK(31, 16)
+
+#define GHWCFG2 HSOTG_REG(0x0048)
+#define GHWCFG2_OTG_ENABLE_IC_USB BIT(31)
+#define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26)
+#define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26
+#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24)
+#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT 24
+#define GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK (0x3 << 22)
+#define GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT 22
+#define GHWCFG2_MULTI_PROC_INT BIT(20)
+#define GHWCFG2_DYNAMIC_FIFO BIT(19)
+#define GHWCFG2_PERIO_EP_SUPPORTED BIT(18)
+#define GHWCFG2_NUM_HOST_CHAN_MASK (0xf << 14)
+#define GHWCFG2_NUM_HOST_CHAN_SHIFT 14
+#define GHWCFG2_NUM_DEV_EP_MASK (0xf << 10)
+#define GHWCFG2_NUM_DEV_EP_SHIFT 10
+#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8)
+#define GHWCFG2_FS_PHY_TYPE_SHIFT 8
+#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_FS_PHY_TYPE_DEDICATED 1
+#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI 2
+#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI 3
+#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6)
+#define GHWCFG2_HS_PHY_TYPE_SHIFT 6
+#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_HS_PHY_TYPE_UTMI 1
+#define GHWCFG2_HS_PHY_TYPE_ULPI 2
+#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+#define GHWCFG2_POINT2POINT BIT(5)
+#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3)
+#define GHWCFG2_ARCHITECTURE_SHIFT 3
+#define GHWCFG2_SLAVE_ONLY_ARCH 0
+#define GHWCFG2_EXT_DMA_ARCH 1
+#define GHWCFG2_INT_DMA_ARCH 2
+#define GHWCFG2_OP_MODE_MASK (0x7 << 0)
+#define GHWCFG2_OP_MODE_SHIFT 0
+#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE 0
+#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE 1
+#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE 2
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+#define GHWCFG2_OP_MODE_UNDEFINED 7
+
+#define GHWCFG3 HSOTG_REG(0x004c)
+#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16)
+#define GHWCFG3_DFIFO_DEPTH_SHIFT 16
+#define GHWCFG3_OTG_LPM_EN BIT(15)
+#define GHWCFG3_BC_SUPPORT BIT(14)
+#define GHWCFG3_OTG_ENABLE_HSIC BIT(13)
+#define GHWCFG3_ADP_SUPP BIT(12)
+#define GHWCFG3_SYNCH_RESET_TYPE BIT(11)
+#define GHWCFG3_OPTIONAL_FEATURES BIT(10)
+#define GHWCFG3_VENDOR_CTRL_IF BIT(9)
+#define GHWCFG3_I2C BIT(8)
+#define GHWCFG3_OTG_FUNC BIT(7)
+#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK (0x7 << 4)
+#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT 4
+#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK (0xf << 0)
+#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0
+
+#define GHWCFG4 HSOTG_REG(0x0050)
+#define GHWCFG4_DESC_DMA_DYN BIT(31)
+#define GHWCFG4_DESC_DMA BIT(30)
+#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26)
+#define GHWCFG4_NUM_IN_EPS_SHIFT 26
+#define GHWCFG4_DED_FIFO_EN BIT(25)
+#define GHWCFG4_DED_FIFO_SHIFT 25
+#define GHWCFG4_SESSION_END_FILT_EN BIT(24)
+#define GHWCFG4_B_VALID_FILT_EN BIT(23)
+#define GHWCFG4_A_VALID_FILT_EN BIT(22)
+#define GHWCFG4_VBUS_VALID_FILT_EN BIT(21)
+#define GHWCFG4_IDDIG_FILT_EN BIT(20)
+#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_MASK (0xf << 16)
+#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14)
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
+#define GHWCFG4_ACG_SUPPORTED BIT(12)
+#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
+#define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10)
+#define GHWCFG4_XHIBER BIT(7)
+#define GHWCFG4_HIBER BIT(6)
+#define GHWCFG4_MIN_AHB_FREQ BIT(5)
+#define GHWCFG4_POWER_OPTIMIZ BIT(4)
+#define GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK (0xf << 0)
+#define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0
+
+#define GLPMCFG HSOTG_REG(0x0054)
+#define GLPMCFG_INVSELHSIC BIT(31)
+#define GLPMCFG_HSICCON BIT(30)
+#define GLPMCFG_RSTRSLPSTS BIT(29)
+#define GLPMCFG_ENBESL BIT(28)
+#define GLPMCFG_LPM_RETRYCNT_STS_MASK (0x7 << 25)
+#define GLPMCFG_LPM_RETRYCNT_STS_SHIFT 25
+#define GLPMCFG_SNDLPM BIT(24)
+#define GLPMCFG_RETRY_CNT_MASK (0x7 << 21)
+#define GLPMCFG_RETRY_CNT_SHIFT 21
+#define GLPMCFG_LPM_REJECT_CTRL_CONTROL BIT(21)
+#define GLPMCFG_LPM_ACCEPT_CTRL_ISOC BIT(22)
+#define GLPMCFG_LPM_CHNL_INDX_MASK (0xf << 17)
+#define GLPMCFG_LPM_CHNL_INDX_SHIFT 17
+#define GLPMCFG_L1RESUMEOK BIT(16)
+#define GLPMCFG_SLPSTS BIT(15)
+#define GLPMCFG_COREL1RES_MASK (0x3 << 13)
+#define GLPMCFG_COREL1RES_SHIFT 13
+#define GLPMCFG_HIRD_THRES_MASK (0x1f << 8)
+#define GLPMCFG_HIRD_THRES_SHIFT 8
+#define GLPMCFG_HIRD_THRES_EN (0x10 << 8)
+#define GLPMCFG_ENBLSLPM BIT(7)
+#define GLPMCFG_BREMOTEWAKE BIT(6)
+#define GLPMCFG_HIRD_MASK (0xf << 2)
+#define GLPMCFG_HIRD_SHIFT 2
+#define GLPMCFG_APPL1RES BIT(1)
+#define GLPMCFG_LPMCAP BIT(0)
+
+#define GPWRDN HSOTG_REG(0x0058)
+#define GPWRDN_MULT_VAL_ID_BC_MASK (0x1f << 24)
+#define GPWRDN_MULT_VAL_ID_BC_SHIFT 24
+#define GPWRDN_ADP_INT BIT(23)
+#define GPWRDN_BSESSVLD BIT(22)
+#define GPWRDN_IDSTS BIT(21)
+#define GPWRDN_LINESTATE_MASK (0x3 << 19)
+#define GPWRDN_LINESTATE_SHIFT 19
+#define GPWRDN_STS_CHGINT_MSK BIT(18)
+#define GPWRDN_STS_CHGINT BIT(17)
+#define GPWRDN_SRP_DET_MSK BIT(16)
+#define GPWRDN_SRP_DET BIT(15)
+#define GPWRDN_CONNECT_DET_MSK BIT(14)
+#define GPWRDN_CONNECT_DET BIT(13)
+#define GPWRDN_DISCONN_DET_MSK BIT(12)
+#define GPWRDN_DISCONN_DET BIT(11)
+#define GPWRDN_RST_DET_MSK BIT(10)
+#define GPWRDN_RST_DET BIT(9)
+#define GPWRDN_LNSTSCHG_MSK BIT(8)
+#define GPWRDN_LNSTSCHG BIT(7)
+#define GPWRDN_DIS_VBUS BIT(6)
+#define GPWRDN_PWRDNSWTCH BIT(5)
+#define GPWRDN_PWRDNRSTN BIT(4)
+#define GPWRDN_PWRDNCLMP BIT(3)
+#define GPWRDN_RESTORE BIT(2)
+#define GPWRDN_PMUACTV BIT(1)
+#define GPWRDN_PMUINTSEL BIT(0)
+
+#define GDFIFOCFG HSOTG_REG(0x005c)
+#define GDFIFOCFG_EPINFOBASE_MASK (0xffff << 16)
+#define GDFIFOCFG_EPINFOBASE_SHIFT 16
+#define GDFIFOCFG_GDFIFOCFG_MASK (0xffff << 0)
+#define GDFIFOCFG_GDFIFOCFG_SHIFT 0
+
+#define ADPCTL HSOTG_REG(0x0060)
+#define ADPCTL_AR_MASK (0x3 << 27)
+#define ADPCTL_AR_SHIFT 27
+#define ADPCTL_ADP_TMOUT_INT_MSK BIT(26)
+#define ADPCTL_ADP_SNS_INT_MSK BIT(25)
+#define ADPCTL_ADP_PRB_INT_MSK BIT(24)
+#define ADPCTL_ADP_TMOUT_INT BIT(23)
+#define ADPCTL_ADP_SNS_INT BIT(22)
+#define ADPCTL_ADP_PRB_INT BIT(21)
+#define ADPCTL_ADPENA BIT(20)
+#define ADPCTL_ADPRES BIT(19)
+#define ADPCTL_ENASNS BIT(18)
+#define ADPCTL_ENAPRB BIT(17)
+#define ADPCTL_RTIM_MASK (0x7ff << 6)
+#define ADPCTL_RTIM_SHIFT 6
+#define ADPCTL_PRB_PER_MASK (0x3 << 4)
+#define ADPCTL_PRB_PER_SHIFT 4
+#define ADPCTL_PRB_DELTA_MASK (0x3 << 2)
+#define ADPCTL_PRB_DELTA_SHIFT 2
+#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0)
+#define ADPCTL_PRB_DSCHRG_SHIFT 0
+
+#define GREFCLK HSOTG_REG(0x0064)
+#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15)
+#define GREFCLK_REFCLKPER_SHIFT 15
+#define GREFCLK_REF_CLK_MODE BIT(14)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT 0
+
+#define GINTMSK2 HSOTG_REG(0x0068)
+#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0)
+
+#define GINTSTS2 HSOTG_REG(0x006c)
+#define GINTSTS2_WKUP_ALERT_INT BIT(0)
+
+#define HPTXFSIZ HSOTG_REG(0x100)
+/* Use FIFOSIZE_* constants to access this register */
+
+#define DPTXFSIZN(_a) HSOTG_REG(0x104 + (((_a) - 1) * 4))
+/* Use FIFOSIZE_* constants to access this register */
+
+/* These apply to the GNPTXFSIZ, HPTXFSIZ and DPTXFSIZN registers */
+#define FIFOSIZE_DEPTH_MASK (0xffff << 16)
+#define FIFOSIZE_DEPTH_SHIFT 16
+#define FIFOSIZE_STARTADDR_MASK (0xffff << 0)
+#define FIFOSIZE_STARTADDR_SHIFT 0
+#define FIFOSIZE_DEPTH_GET(_x) (((_x) >> 16) & 0xffff)
+
+/* Device mode registers */
+
+#define DCFG HSOTG_REG(0x800)
+#define DCFG_DESCDMA_EN BIT(23)
+#define DCFG_EPMISCNT_MASK (0x1f << 18)
+#define DCFG_EPMISCNT_SHIFT 18
+#define DCFG_EPMISCNT_LIMIT 0x1f
+#define DCFG_EPMISCNT(_x) ((_x) << 18)
+#define DCFG_IPG_ISOC_SUPPORDED BIT(17)
+#define DCFG_PERFRINT_MASK (0x3 << 11)
+#define DCFG_PERFRINT_SHIFT 11
+#define DCFG_PERFRINT_LIMIT 0x3
+#define DCFG_PERFRINT(_x) ((_x) << 11)
+#define DCFG_DEVADDR_MASK (0x7f << 4)
+#define DCFG_DEVADDR_SHIFT 4
+#define DCFG_DEVADDR_LIMIT 0x7f
+#define DCFG_DEVADDR(_x) ((_x) << 4)
+#define DCFG_NZ_STS_OUT_HSHK BIT(2)
+#define DCFG_DEVSPD_MASK (0x3 << 0)
+#define DCFG_DEVSPD_SHIFT 0
+#define DCFG_DEVSPD_HS 0
+#define DCFG_DEVSPD_FS 1
+#define DCFG_DEVSPD_LS 2
+#define DCFG_DEVSPD_FS48 3
+
+#define DCTL HSOTG_REG(0x804)
+#define DCTL_SERVICE_INTERVAL_SUPPORTED BIT(19)
+#define DCTL_PWRONPRGDONE BIT(11)
+#define DCTL_CGOUTNAK BIT(10)
+#define DCTL_SGOUTNAK BIT(9)
+#define DCTL_CGNPINNAK BIT(8)
+#define DCTL_SGNPINNAK BIT(7)
+#define DCTL_TSTCTL_MASK (0x7 << 4)
+#define DCTL_TSTCTL_SHIFT 4
+#define DCTL_GOUTNAKSTS BIT(3)
+#define DCTL_GNPINNAKSTS BIT(2)
+#define DCTL_SFTDISCON BIT(1)
+#define DCTL_RMTWKUPSIG BIT(0)
+
+#define DSTS HSOTG_REG(0x808)
+#define DSTS_SOFFN_MASK (0x3fff << 8)
+#define DSTS_SOFFN_SHIFT 8
+#define DSTS_SOFFN_LIMIT 0x3fff
+#define DSTS_SOFFN(_x) ((_x) << 8)
+#define DSTS_ERRATICERR BIT(3)
+#define DSTS_ENUMSPD_MASK (0x3 << 1)
+#define DSTS_ENUMSPD_SHIFT 1
+#define DSTS_ENUMSPD_HS 0
+#define DSTS_ENUMSPD_FS 1
+#define DSTS_ENUMSPD_LS 2
+#define DSTS_ENUMSPD_FS48 3
+#define DSTS_SUSPSTS BIT(0)
+
+#define DIEPMSK HSOTG_REG(0x810)
+#define DIEPMSK_NAKMSK BIT(13)
+#define DIEPMSK_BNAININTRMSK BIT(9)
+#define DIEPMSK_TXFIFOUNDRNMSK BIT(8)
+#define DIEPMSK_TXFIFOEMPTY BIT(7)
+#define DIEPMSK_INEPNAKEFFMSK BIT(6)
+#define DIEPMSK_INTKNEPMISMSK BIT(5)
+#define DIEPMSK_INTKNTXFEMPMSK BIT(4)
+#define DIEPMSK_TIMEOUTMSK BIT(3)
+#define DIEPMSK_AHBERRMSK BIT(2)
+#define DIEPMSK_EPDISBLDMSK BIT(1)
+#define DIEPMSK_XFERCOMPLMSK BIT(0)
+
+#define DOEPMSK HSOTG_REG(0x814)
+#define DOEPMSK_BNAMSK BIT(9)
+#define DOEPMSK_BACK2BACKSETUP BIT(6)
+#define DOEPMSK_STSPHSERCVDMSK BIT(5)
+#define DOEPMSK_OUTTKNEPDISMSK BIT(4)
+#define DOEPMSK_SETUPMSK BIT(3)
+#define DOEPMSK_AHBERRMSK BIT(2)
+#define DOEPMSK_EPDISBLDMSK BIT(1)
+#define DOEPMSK_XFERCOMPLMSK BIT(0)
+
+#define DAINT HSOTG_REG(0x818)
+#define DAINTMSK HSOTG_REG(0x81C)
+#define DAINT_OUTEP_SHIFT 16
+#define DAINT_OUTEP(_x) (1 << ((_x) + 16))
+#define DAINT_INEP(_x) (1 << (_x))
+
+#define DTKNQR1 HSOTG_REG(0x820)
+#define DTKNQR2 HSOTG_REG(0x824)
+#define DTKNQR3 HSOTG_REG(0x830)
+#define DTKNQR4 HSOTG_REG(0x834)
+#define DIEPEMPMSK HSOTG_REG(0x834)
+
+#define DVBUSDIS HSOTG_REG(0x828)
+#define DVBUSPULSE HSOTG_REG(0x82C)
+
+#define DIEPCTL0 HSOTG_REG(0x900)
+#define DIEPCTL(_a) HSOTG_REG(0x900 + ((_a) * 0x20))
+
+#define DOEPCTL0 HSOTG_REG(0xB00)
+#define DOEPCTL(_a) HSOTG_REG(0xB00 + ((_a) * 0x20))
+
+/* EP0 specialness:
+ * bits[29..28] - reserved (no SetD0PID, SetD1PID)
+ * bits[25..22] - should always be zero, this isn't a periodic endpoint
+ * bits[10..0] - MPS setting different for EP0
+ */
+#define D0EPCTL_MPS_MASK (0x3 << 0)
+#define D0EPCTL_MPS_SHIFT 0
+#define D0EPCTL_MPS_64 0
+#define D0EPCTL_MPS_32 1
+#define D0EPCTL_MPS_16 2
+#define D0EPCTL_MPS_8 3
+
+#define DXEPCTL_EPENA BIT(31)
+#define DXEPCTL_EPDIS BIT(30)
+#define DXEPCTL_SETD1PID BIT(29)
+#define DXEPCTL_SETODDFR BIT(29)
+#define DXEPCTL_SETD0PID BIT(28)
+#define DXEPCTL_SETEVENFR BIT(28)
+#define DXEPCTL_SNAK BIT(27)
+#define DXEPCTL_CNAK BIT(26)
+#define DXEPCTL_TXFNUM_MASK (0xf << 22)
+#define DXEPCTL_TXFNUM_SHIFT 22
+#define DXEPCTL_TXFNUM_LIMIT 0xf
+#define DXEPCTL_TXFNUM(_x) ((_x) << 22)
+#define DXEPCTL_STALL BIT(21)
+#define DXEPCTL_SNP BIT(20)
+#define DXEPCTL_EPTYPE_MASK (0x3 << 18)
+#define DXEPCTL_EPTYPE_CONTROL (0x0 << 18)
+#define DXEPCTL_EPTYPE_ISO (0x1 << 18)
+#define DXEPCTL_EPTYPE_BULK (0x2 << 18)
+#define DXEPCTL_EPTYPE_INTERRUPT (0x3 << 18)
+
+#define DXEPCTL_NAKSTS BIT(17)
+#define DXEPCTL_DPID BIT(16)
+#define DXEPCTL_EOFRNUM BIT(16)
+#define DXEPCTL_USBACTEP BIT(15)
+#define DXEPCTL_NEXTEP_MASK (0xf << 11)
+#define DXEPCTL_NEXTEP_SHIFT 11
+#define DXEPCTL_NEXTEP_LIMIT 0xf
+#define DXEPCTL_NEXTEP(_x) ((_x) << 11)
+#define DXEPCTL_MPS_MASK (0x7ff << 0)
+#define DXEPCTL_MPS_SHIFT 0
+#define DXEPCTL_MPS_LIMIT 0x7ff
+#define DXEPCTL_MPS(_x) ((_x) << 0)
+
+#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20))
+#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20))
+#define DXEPINT_SETUP_RCVD BIT(15)
+#define DXEPINT_NYETINTRPT BIT(14)
+#define DXEPINT_NAKINTRPT BIT(13)
+#define DXEPINT_BBLEERRINTRPT BIT(12)
+#define DXEPINT_PKTDRPSTS BIT(11)
+#define DXEPINT_BNAINTR BIT(9)
+#define DXEPINT_TXFIFOUNDRN BIT(8)
+#define DXEPINT_OUTPKTERR BIT(8)
+#define DXEPINT_TXFEMP BIT(7)
+#define DXEPINT_INEPNAKEFF BIT(6)
+#define DXEPINT_BACK2BACKSETUP BIT(6)
+#define DXEPINT_INTKNEPMIS BIT(5)
+#define DXEPINT_STSPHSERCVD BIT(5)
+#define DXEPINT_INTKNTXFEMP BIT(4)
+#define DXEPINT_OUTTKNEPDIS BIT(4)
+#define DXEPINT_TIMEOUT BIT(3)
+#define DXEPINT_SETUP BIT(3)
+#define DXEPINT_AHBERR BIT(2)
+#define DXEPINT_EPDISBLD BIT(1)
+#define DXEPINT_XFERCOMPL BIT(0)
+
+#define DIEPTSIZ0 HSOTG_REG(0x910)
+#define DIEPTSIZ0_PKTCNT_MASK (0x3 << 19)
+#define DIEPTSIZ0_PKTCNT_SHIFT 19
+#define DIEPTSIZ0_PKTCNT_LIMIT 0x3
+#define DIEPTSIZ0_PKTCNT(_x) ((_x) << 19)
+#define DIEPTSIZ0_XFERSIZE_MASK (0x7f << 0)
+#define DIEPTSIZ0_XFERSIZE_SHIFT 0
+#define DIEPTSIZ0_XFERSIZE_LIMIT 0x7f
+#define DIEPTSIZ0_XFERSIZE(_x) ((_x) << 0)
+
+#define DOEPTSIZ0 HSOTG_REG(0xB10)
+#define DOEPTSIZ0_SUPCNT_MASK (0x3 << 29)
+#define DOEPTSIZ0_SUPCNT_SHIFT 29
+#define DOEPTSIZ0_SUPCNT_LIMIT 0x3
+#define DOEPTSIZ0_SUPCNT(_x) ((_x) << 29)
+#define DOEPTSIZ0_PKTCNT BIT(19)
+#define DOEPTSIZ0_XFERSIZE_MASK (0x7f << 0)
+#define DOEPTSIZ0_XFERSIZE_SHIFT 0
+
+#define DIEPTSIZ(_a) HSOTG_REG(0x910 + ((_a) * 0x20))
+#define DOEPTSIZ(_a) HSOTG_REG(0xB10 + ((_a) * 0x20))
+#define DXEPTSIZ_MC_MASK (0x3 << 29)
+#define DXEPTSIZ_MC_SHIFT 29
+#define DXEPTSIZ_MC_LIMIT 0x3
+#define DXEPTSIZ_MC(_x) ((_x) << 29)
+#define DXEPTSIZ_PKTCNT_MASK (0x3ff << 19)
+#define DXEPTSIZ_PKTCNT_SHIFT 19
+#define DXEPTSIZ_PKTCNT_LIMIT 0x3ff
+#define DXEPTSIZ_PKTCNT_GET(_v) (((_v) >> 19) & 0x3ff)
+#define DXEPTSIZ_PKTCNT(_x) ((_x) << 19)
+#define DXEPTSIZ_XFERSIZE_MASK (0x7ffff << 0)
+#define DXEPTSIZ_XFERSIZE_SHIFT 0
+#define DXEPTSIZ_XFERSIZE_LIMIT 0x7ffff
+#define DXEPTSIZ_XFERSIZE_GET(_v) (((_v) >> 0) & 0x7ffff)
+#define DXEPTSIZ_XFERSIZE(_x) ((_x) << 0)
+
+#define DIEPDMA(_a) HSOTG_REG(0x914 + ((_a) * 0x20))
+#define DOEPDMA(_a) HSOTG_REG(0xB14 + ((_a) * 0x20))
+
+#define DTXFSTS(_a) HSOTG_REG(0x918 + ((_a) * 0x20))
+
+#define PCGCTL HSOTG_REG(0x0e00)
+#define PCGCTL_IF_DEV_MODE BIT(31)
+#define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29)
+#define PCGCTL_P2HD_PRT_SPD_SHIFT 29
+#define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27)
+#define PCGCTL_P2HD_DEV_ENUM_SPD_SHIFT 27
+#define PCGCTL_MAC_DEV_ADDR_MASK (0x7f << 20)
+#define PCGCTL_MAC_DEV_ADDR_SHIFT 20
+#define PCGCTL_MAX_TERMSEL BIT(19)
+#define PCGCTL_MAX_XCVRSELECT_MASK (0x3 << 17)
+#define PCGCTL_MAX_XCVRSELECT_SHIFT 17
+#define PCGCTL_PORT_POWER BIT(16)
+#define PCGCTL_PRT_CLK_SEL_MASK (0x3 << 14)
+#define PCGCTL_PRT_CLK_SEL_SHIFT 14
+#define PCGCTL_ESS_REG_RESTORED BIT(13)
+#define PCGCTL_EXTND_HIBER_SWITCH BIT(12)
+#define PCGCTL_EXTND_HIBER_PWRCLMP BIT(11)
+#define PCGCTL_ENBL_EXTND_HIBER BIT(10)
+#define PCGCTL_RESTOREMODE BIT(9)
+#define PCGCTL_RESETAFTSUSP BIT(8)
+#define PCGCTL_DEEP_SLEEP BIT(7)
+#define PCGCTL_PHY_IN_SLEEP BIT(6)
+#define PCGCTL_ENBL_SLEEP_GATING BIT(5)
+#define PCGCTL_RSTPDWNMODULE BIT(3)
+#define PCGCTL_PWRCLMP BIT(2)
+#define PCGCTL_GATEHCLK BIT(1)
+#define PCGCTL_STOPPCLK BIT(0)
+
+#define PCGCCTL1 HSOTG_REG(0xe04)
+#define PCGCCTL1_TIMER (0x3 << 1)
+#define PCGCCTL1_GATEEN BIT(0)
+
+#define EPFIFO(_a) HSOTG_REG(0x1000 + ((_a) * 0x1000))
+
+/* Host Mode Registers */
+
+#define HCFG HSOTG_REG(0x0400)
+#define HCFG_MODECHTIMEN BIT(31)
+#define HCFG_PERSCHEDENA BIT(26)
+#define HCFG_FRLISTEN_MASK (0x3 << 24)
+#define HCFG_FRLISTEN_SHIFT 24
+#define HCFG_FRLISTEN_8 (0 << 24)
+#define FRLISTEN_8_SIZE 8
+#define HCFG_FRLISTEN_16 BIT(24)
+#define FRLISTEN_16_SIZE 16
+#define HCFG_FRLISTEN_32 (2 << 24)
+#define FRLISTEN_32_SIZE 32
+#define HCFG_FRLISTEN_64 (3 << 24)
+#define FRLISTEN_64_SIZE 64
+#define HCFG_DESCDMA BIT(23)
+#define HCFG_RESVALID_MASK (0xff << 8)
+#define HCFG_RESVALID_SHIFT 8
+#define HCFG_ENA32KHZ BIT(7)
+#define HCFG_FSLSSUPP BIT(2)
+#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0)
+#define HCFG_FSLSPCLKSEL_SHIFT 0
+#define HCFG_FSLSPCLKSEL_30_60_MHZ 0
+#define HCFG_FSLSPCLKSEL_48_MHZ 1
+#define HCFG_FSLSPCLKSEL_6_MHZ 2
+
+#define HFIR HSOTG_REG(0x0404)
+#define HFIR_FRINT_MASK (0xffff << 0)
+#define HFIR_FRINT_SHIFT 0
+#define HFIR_RLDCTRL BIT(16)
+
+#define HFNUM HSOTG_REG(0x0408)
+#define HFNUM_FRREM_MASK (0xffff << 16)
+#define HFNUM_FRREM_SHIFT 16
+#define HFNUM_FRNUM_MASK (0xffff << 0)
+#define HFNUM_FRNUM_SHIFT 0
+#define HFNUM_MAX_FRNUM 0x3fff
+
+#define HPTXSTS HSOTG_REG(0x0410)
+#define TXSTS_QTOP_ODD BIT(31)
+#define TXSTS_QTOP_CHNEP_MASK (0xf << 27)
+#define TXSTS_QTOP_CHNEP_SHIFT 27
+#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
+#define TXSTS_QTOP_TOKEN_SHIFT 25
+#define TXSTS_QTOP_TERMINATE BIT(24)
+#define TXSTS_QSPCAVAIL_MASK (0xff << 16)
+#define TXSTS_QSPCAVAIL_SHIFT 16
+#define TXSTS_FSPCAVAIL_MASK (0xffff << 0)
+#define TXSTS_FSPCAVAIL_SHIFT 0
+
+#define HAINT HSOTG_REG(0x0414)
+#define HAINTMSK HSOTG_REG(0x0418)
+#define HFLBADDR HSOTG_REG(0x041c)
+
+#define HPRT0 HSOTG_REG(0x0440)
+#define HPRT0_SPD_MASK (0x3 << 17)
+#define HPRT0_SPD_SHIFT 17
+#define HPRT0_SPD_HIGH_SPEED 0
+#define HPRT0_SPD_FULL_SPEED 1
+#define HPRT0_SPD_LOW_SPEED 2
+#define HPRT0_TSTCTL_MASK (0xf << 13)
+#define HPRT0_TSTCTL_SHIFT 13
+#define HPRT0_PWR BIT(12)
+#define HPRT0_LNSTS_MASK (0x3 << 10)
+#define HPRT0_LNSTS_SHIFT 10
+#define HPRT0_RST BIT(8)
+#define HPRT0_SUSP BIT(7)
+#define HPRT0_RES BIT(6)
+#define HPRT0_OVRCURRCHG BIT(5)
+#define HPRT0_OVRCURRACT BIT(4)
+#define HPRT0_ENACHG BIT(3)
+#define HPRT0_ENA BIT(2)
+#define HPRT0_CONNDET BIT(1)
+#define HPRT0_CONNSTS BIT(0)
+
+#define HCCHAR(_ch) HSOTG_REG(0x0500 + 0x20 * (_ch))
+#define HCCHAR_CHENA BIT(31)
+#define HCCHAR_CHDIS BIT(30)
+#define HCCHAR_ODDFRM BIT(29)
+#define HCCHAR_DEVADDR_MASK (0x7f << 22)
+#define HCCHAR_DEVADDR_SHIFT 22
+#define HCCHAR_MULTICNT_MASK (0x3 << 20)
+#define HCCHAR_MULTICNT_SHIFT 20
+#define HCCHAR_EPTYPE_MASK (0x3 << 18)
+#define HCCHAR_EPTYPE_SHIFT 18
+#define HCCHAR_LSPDDEV BIT(17)
+#define HCCHAR_EPDIR BIT(15)
+#define HCCHAR_EPNUM_MASK (0xf << 11)
+#define HCCHAR_EPNUM_SHIFT 11
+#define HCCHAR_MPS_MASK (0x7ff << 0)
+#define HCCHAR_MPS_SHIFT 0
+
+#define HCSPLT(_ch) HSOTG_REG(0x0504 + 0x20 * (_ch))
+#define HCSPLT_SPLTENA BIT(31)
+#define HCSPLT_COMPSPLT BIT(16)
+#define HCSPLT_XACTPOS_MASK (0x3 << 14)
+#define HCSPLT_XACTPOS_SHIFT 14
+#define HCSPLT_XACTPOS_MID 0
+#define HCSPLT_XACTPOS_END 1
+#define HCSPLT_XACTPOS_BEGIN 2
+#define HCSPLT_XACTPOS_ALL 3
+#define HCSPLT_HUBADDR_MASK (0x7f << 7)
+#define HCSPLT_HUBADDR_SHIFT 7
+#define HCSPLT_PRTADDR_MASK (0x7f << 0)
+#define HCSPLT_PRTADDR_SHIFT 0
+
+#define HCINT(_ch) HSOTG_REG(0x0508 + 0x20 * (_ch))
+#define HCINTMSK(_ch) HSOTG_REG(0x050c + 0x20 * (_ch))
+#define HCINTMSK_RESERVED14_31 (0x3ffff << 14)
+#define HCINTMSK_FRM_LIST_ROLL BIT(13)
+#define HCINTMSK_XCS_XACT BIT(12)
+#define HCINTMSK_BNA BIT(11)
+#define HCINTMSK_DATATGLERR BIT(10)
+#define HCINTMSK_FRMOVRUN BIT(9)
+#define HCINTMSK_BBLERR BIT(8)
+#define HCINTMSK_XACTERR BIT(7)
+#define HCINTMSK_NYET BIT(6)
+#define HCINTMSK_ACK BIT(5)
+#define HCINTMSK_NAK BIT(4)
+#define HCINTMSK_STALL BIT(3)
+#define HCINTMSK_AHBERR BIT(2)
+#define HCINTMSK_CHHLTD BIT(1)
+#define HCINTMSK_XFERCOMPL BIT(0)
+
+#define HCTSIZ(_ch) HSOTG_REG(0x0510 + 0x20 * (_ch))
+#define TSIZ_DOPNG BIT(31)
+#define TSIZ_SC_MC_PID_MASK (0x3 << 29)
+#define TSIZ_SC_MC_PID_SHIFT 29
+#define TSIZ_SC_MC_PID_DATA0 0
+#define TSIZ_SC_MC_PID_DATA2 1
+#define TSIZ_SC_MC_PID_DATA1 2
+#define TSIZ_SC_MC_PID_MDATA 3
+#define TSIZ_SC_MC_PID_SETUP 3
+#define TSIZ_PKTCNT_MASK (0x3ff << 19)
+#define TSIZ_PKTCNT_SHIFT 19
+#define TSIZ_NTD_MASK (0xff << 8)
+#define TSIZ_NTD_SHIFT 8
+#define TSIZ_SCHINFO_MASK (0xff << 0)
+#define TSIZ_SCHINFO_SHIFT 0
+#define TSIZ_XFERSIZE_MASK (0x7ffff << 0)
+#define TSIZ_XFERSIZE_SHIFT 0
+
+#define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch))
+
+#define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch))
+
+#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch))
+
+/**
+ * struct dwc2_dma_desc - DMA descriptor structure,
+ * used for both host and gadget modes
+ *
+ * @status: DMA descriptor status quadlet
+ * @buf: DMA descriptor data buffer pointer
+ *
+ * DMA Descriptor structure contains two quadlets:
+ * Status quadlet and Data buffer pointer.
+ */
+struct dwc2_dma_desc {
+ uint32_t status;
+ uint32_t buf;
+} __packed;
+
+/* Host Mode DMA descriptor status quadlet */
+
+#define HOST_DMA_A BIT(31)
+#define HOST_DMA_STS_MASK (0x3 << 28)
+#define HOST_DMA_STS_SHIFT 28
+#define HOST_DMA_STS_PKTERR BIT(28)
+#define HOST_DMA_EOL BIT(26)
+#define HOST_DMA_IOC BIT(25)
+#define HOST_DMA_SUP BIT(24)
+#define HOST_DMA_ALT_QTD BIT(23)
+#define HOST_DMA_QTD_OFFSET_MASK (0x3f << 17)
+#define HOST_DMA_QTD_OFFSET_SHIFT 17
+#define HOST_DMA_ISOC_NBYTES_MASK (0xfff << 0)
+#define HOST_DMA_ISOC_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_MASK (0x1ffff << 0)
+#define HOST_DMA_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_LIMIT 131071
+
+/* Device Mode DMA descriptor status quadlet */
+
+#define DEV_DMA_BUFF_STS_MASK (0x3 << 30)
+#define DEV_DMA_BUFF_STS_SHIFT 30
+#define DEV_DMA_BUFF_STS_HREADY 0
+#define DEV_DMA_BUFF_STS_DMABUSY 1
+#define DEV_DMA_BUFF_STS_DMADONE 2
+#define DEV_DMA_BUFF_STS_HBUSY 3
+#define DEV_DMA_STS_MASK (0x3 << 28)
+#define DEV_DMA_STS_SHIFT 28
+#define DEV_DMA_STS_SUCC 0
+#define DEV_DMA_STS_BUFF_FLUSH 1
+#define DEV_DMA_STS_BUFF_ERR 3
+#define DEV_DMA_L BIT(27)
+#define DEV_DMA_SHORT BIT(26)
+#define DEV_DMA_IOC BIT(25)
+#define DEV_DMA_SR BIT(24)
+#define DEV_DMA_MTRF BIT(23)
+#define DEV_DMA_ISOC_PID_MASK (0x3 << 23)
+#define DEV_DMA_ISOC_PID_SHIFT 23
+#define DEV_DMA_ISOC_PID_DATA0 0
+#define DEV_DMA_ISOC_PID_DATA2 1
+#define DEV_DMA_ISOC_PID_DATA1 2
+#define DEV_DMA_ISOC_PID_MDATA 3
+#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12)
+#define DEV_DMA_ISOC_FRNUM_SHIFT 12
+#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0)
+#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff
+#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0)
+#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff
+#define DEV_DMA_ISOC_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_MASK (0xffff << 0)
+#define DEV_DMA_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_LIMIT 0xffff
+
+#define MAX_DMA_DESC_NUM_GENERIC 64
+#define MAX_DMA_DESC_NUM_HS_ISOC 256
+
+#endif /* DWC2_REGS_H */
diff --git a/include/hw/usb/hcd-dwc3.h b/include/hw/usb/hcd-dwc3.h
new file mode 100644
index 0000000000..f752a27e94
--- /dev/null
+++ b/include/hw/usb/hcd-dwc3.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU model of the USB DWC3 host controller emulation.
+ *
+ * Copyright (c) 2020 Xilinx Inc.
+ *
+ * Written by Vikram Garhwal<fnu.vikram@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef HCD_DWC3_H
+#define HCD_DWC3_H
+
+#include "hw/register.h"
+#include "hw/usb/hcd-xhci.h"
+#include "hw/usb/hcd-xhci-sysbus.h"
+
+#define TYPE_USB_DWC3 "usb_dwc3"
+
+#define USB_DWC3(obj) \
+ OBJECT_CHECK(USBDWC3, (obj), TYPE_USB_DWC3)
+
+#define USB_DWC3_R_MAX ((0x530 / 4) + 1)
+#define DWC3_SIZE 0x10000
+
+typedef struct USBDWC3 {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ XHCISysbusState sysbus_xhci;
+
+ uint32_t regs[USB_DWC3_R_MAX];
+ RegisterInfo regs_info[USB_DWC3_R_MAX];
+
+ struct {
+ uint8_t mode;
+ uint32_t dwc_usb3_user;
+ } cfg;
+
+} USBDWC3;
+
+#endif
diff --git a/include/hw/usb/hcd-musb.h b/include/hw/usb/hcd-musb.h
new file mode 100644
index 0000000000..4d4b1ec0fc
--- /dev/null
+++ b/include/hw/usb/hcd-musb.h
@@ -0,0 +1,49 @@
+/*
+ * "Inventra" High-speed Dual-Role Controller (MUSB-HDRC), Mentor Graphics,
+ * USB2.0 OTG compliant core used in various chips.
+ *
+ * Only host-mode and non-DMA accesses are currently supported.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_USB_HCD_MUSB_H
+#define HW_USB_HCD_MUSB_H
+
+#include "exec/hwaddr.h"
+
+enum musb_irq_source_e {
+ musb_irq_suspend = 0,
+ musb_irq_resume,
+ musb_irq_rst_babble,
+ musb_irq_sof,
+ musb_irq_connect,
+ musb_irq_disconnect,
+ musb_irq_vbus_request,
+ musb_irq_vbus_error,
+ musb_irq_rx,
+ musb_irq_tx,
+ musb_set_vbus,
+ musb_set_session,
+ /* Add new interrupts here */
+ musb_irq_max /* total number of interrupts defined */
+};
+
+/* TODO convert hcd-musb to QOM/qdev and remove MUSBReadFunc/MUSBWriteFunc */
+typedef void MUSBWriteFunc(void *opaque, hwaddr addr, uint32_t value);
+typedef uint32_t MUSBReadFunc(void *opaque, hwaddr addr);
+extern MUSBReadFunc * const musb_read[];
+extern MUSBWriteFunc * const musb_write[];
+
+typedef struct MUSBState MUSBState;
+
+MUSBState *musb_init(DeviceState *parent_device, int gpio_base);
+void musb_reset(MUSBState *s);
+uint32_t musb_core_intr_get(MUSBState *s);
+void musb_core_intr_clear(MUSBState *s, uint32_t mask);
+void musb_set_size(MUSBState *s, int epnum, int size, int is_tx);
+
+#endif
diff --git a/include/hw/usb/hid.h b/include/hw/usb/hid.h
new file mode 100644
index 0000000000..1c142584ff
--- /dev/null
+++ b/include/hw/usb/hid.h
@@ -0,0 +1,17 @@
+#ifndef HW_USB_HID_H
+#define HW_USB_HID_H
+
+/* HID interface requests */
+#define HID_GET_REPORT 0xa101
+#define HID_GET_IDLE 0xa102
+#define HID_GET_PROTOCOL 0xa103
+#define HID_SET_REPORT 0x2109
+#define HID_SET_IDLE 0x210a
+#define HID_SET_PROTOCOL 0x210b
+
+/* HID descriptor types */
+#define USB_DT_HID 0x21
+#define USB_DT_REPORT 0x22
+#define USB_DT_PHY 0x23
+
+#endif
diff --git a/include/hw/usb/imx-usb-phy.h b/include/hw/usb/imx-usb-phy.h
new file mode 100644
index 0000000000..d1e867b77a
--- /dev/null
+++ b/include/hw/usb/imx-usb-phy.h
@@ -0,0 +1,54 @@
+#ifndef IMX_USB_PHY_H
+#define IMX_USB_PHY_H
+
+#include "hw/sysbus.h"
+#include "qemu/bitops.h"
+#include "qom/object.h"
+
+enum IMXUsbPhyRegisters {
+ USBPHY_PWD,
+ USBPHY_PWD_SET,
+ USBPHY_PWD_CLR,
+ USBPHY_PWD_TOG,
+ USBPHY_TX,
+ USBPHY_TX_SET,
+ USBPHY_TX_CLR,
+ USBPHY_TX_TOG,
+ USBPHY_RX,
+ USBPHY_RX_SET,
+ USBPHY_RX_CLR,
+ USBPHY_RX_TOG,
+ USBPHY_CTRL,
+ USBPHY_CTRL_SET,
+ USBPHY_CTRL_CLR,
+ USBPHY_CTRL_TOG,
+ USBPHY_STATUS,
+ USBPHY_DEBUG = 0x14,
+ USBPHY_DEBUG_SET,
+ USBPHY_DEBUG_CLR,
+ USBPHY_DEBUG_TOG,
+ USBPHY_DEBUG0_STATUS,
+ USBPHY_DEBUG1 = 0x1c,
+ USBPHY_DEBUG1_SET,
+ USBPHY_DEBUG1_CLR,
+ USBPHY_DEBUG1_TOG,
+ USBPHY_VERSION,
+ USBPHY_MAX
+};
+
+#define USBPHY_CTRL_SFTRST BIT(31)
+
+#define TYPE_IMX_USBPHY "imx.usbphy"
+OBJECT_DECLARE_SIMPLE_TYPE(IMXUSBPHYState, IMX_USBPHY)
+
+struct IMXUSBPHYState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+
+ uint32_t usbphy[USBPHY_MAX];
+};
+
+#endif /* IMX_USB_PHY_H */
diff --git a/include/hw/usb/msd.h b/include/hw/usb/msd.h
new file mode 100644
index 0000000000..f9fd862b52
--- /dev/null
+++ b/include/hw/usb/msd.h
@@ -0,0 +1,55 @@
+/*
+ * USB Mass Storage Device emulation
+ *
+ * Copyright (c) 2006 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the LGPL.
+ */
+
+#include "hw/usb.h"
+#include "hw/scsi/scsi.h"
+
+enum USBMSDMode {
+ USB_MSDM_CBW, /* Command Block. */
+ USB_MSDM_DATAOUT, /* Transfer data to device. */
+ USB_MSDM_DATAIN, /* Transfer data from device. */
+ USB_MSDM_CSW /* Command Status. */
+};
+
+struct QEMU_PACKED usb_msd_csw {
+ uint32_t sig;
+ uint32_t tag;
+ uint32_t residue;
+ uint8_t status;
+};
+
+struct MSDState {
+ USBDevice dev;
+ enum USBMSDMode mode;
+ uint32_t scsi_off;
+ uint32_t scsi_len;
+ uint32_t data_len;
+ struct usb_msd_csw csw;
+ SCSIRequest *req;
+ SCSIBus bus;
+ /* For async completion. */
+ USBPacket *packet;
+ /* usb-storage only */
+ BlockConf conf;
+ bool removable;
+ bool commandlog;
+ SCSIDevice *scsi_dev;
+ bool needs_reset;
+};
+
+typedef struct MSDState MSDState;
+#define TYPE_USB_STORAGE "usb-storage-dev"
+DECLARE_INSTANCE_CHECKER(MSDState, USB_STORAGE_DEV,
+ TYPE_USB_STORAGE)
+
+void usb_msd_transfer_data(SCSIRequest *req, uint32_t len);
+void usb_msd_command_complete(SCSIRequest *req, size_t resid);
+void usb_msd_request_cancelled(SCSIRequest *req);
+void *usb_msd_load_request(QEMUFile *f, SCSIRequest *req);
+void usb_msd_handle_reset(USBDevice *dev);
diff --git a/include/hw/usb/xhci.h b/include/hw/usb/xhci.h
new file mode 100644
index 0000000000..5c90e1373e
--- /dev/null
+++ b/include/hw/usb/xhci.h
@@ -0,0 +1,21 @@
+#ifndef HW_USB_XHCI_H
+#define HW_USB_XHCI_H
+
+#define TYPE_XHCI "base-xhci"
+#define TYPE_NEC_XHCI "nec-usb-xhci"
+#define TYPE_QEMU_XHCI "qemu-xhci"
+#define TYPE_XHCI_SYSBUS "sysbus-xhci"
+
+#define XHCI_MAXPORTS_2 15
+#define XHCI_MAXPORTS_3 15
+
+#define XHCI_MAXPORTS (XHCI_MAXPORTS_2 + XHCI_MAXPORTS_3)
+#define XHCI_MAXSLOTS 64
+#define XHCI_MAXINTRS 16
+
+/* must be power of 2 */
+#define XHCI_LEN_REGS 0x4000
+
+void xhci_sysbus_build_aml(Aml *scope, uint32_t mmio, unsigned int irq);
+
+#endif
diff --git a/include/hw/usb/xlnx-usb-subsystem.h b/include/hw/usb/xlnx-usb-subsystem.h
new file mode 100644
index 0000000000..40f9e97e09
--- /dev/null
+++ b/include/hw/usb/xlnx-usb-subsystem.h
@@ -0,0 +1,47 @@
+/*
+ * QEMU model of the Xilinx usb subsystem
+ *
+ * Copyright (c) 2020 Xilinx Inc. Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_USB_SUBSYSTEM_H
+#define XLNX_USB_SUBSYSTEM_H
+
+#include "hw/register.h"
+#include "hw/sysbus.h"
+#include "hw/usb/xlnx-versal-usb2-ctrl-regs.h"
+#include "hw/usb/hcd-dwc3.h"
+
+#define TYPE_XILINX_VERSAL_USB2 "xlnx.versal-usb2"
+
+#define VERSAL_USB2(obj) \
+ OBJECT_CHECK(VersalUsb2, (obj), TYPE_XILINX_VERSAL_USB2)
+
+typedef struct VersalUsb2 {
+ SysBusDevice parent_obj;
+ MemoryRegion dwc3_mr;
+ MemoryRegion usb2Ctrl_mr;
+
+ VersalUsb2CtrlRegs usb2Ctrl;
+ USBDWC3 dwc3;
+} VersalUsb2;
+
+#endif
diff --git a/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h b/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h
new file mode 100644
index 0000000000..6a502006b0
--- /dev/null
+++ b/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU model of the VersalUsb2CtrlRegs Register control/Status block for
+ * USB2.0 controller
+ *
+ * Copyright (c) 2020 Xilinx Inc. Vikram Garhwal <fnu.vikram@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_VERSAL_USB2_CTRL_REGS_H
+#define XLNX_VERSAL_USB2_CTRL_REGS_H
+
+#include "hw/register.h"
+#include "hw/sysbus.h"
+
+#define TYPE_XILINX_VERSAL_USB2_CTRL_REGS "xlnx.versal-usb2-ctrl-regs"
+
+#define XILINX_VERSAL_USB2_CTRL_REGS(obj) \
+ OBJECT_CHECK(VersalUsb2CtrlRegs, (obj), TYPE_XILINX_VERSAL_USB2_CTRL_REGS)
+
+#define USB2_REGS_R_MAX ((0x78 / 4) + 1)
+
+typedef struct VersalUsb2CtrlRegs {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq_ir;
+
+ uint32_t regs[USB2_REGS_R_MAX];
+ RegisterInfo regs_info[USB2_REGS_R_MAX];
+} VersalUsb2CtrlRegs;
+
+#endif
diff --git a/include/hw/vfio/vfio-amd-xgbe.h b/include/hw/vfio/vfio-amd-xgbe.h
index 9fff65e99d..a894546c02 100644
--- a/include/hw/vfio/vfio-amd-xgbe.h
+++ b/include/hw/vfio/vfio-amd-xgbe.h
@@ -15,6 +15,7 @@
#define HW_VFIO_VFIO_AMD_XGBE_H
#include "hw/vfio/vfio-platform.h"
+#include "qom/object.h"
#define TYPE_VFIO_AMD_XGBE "vfio-amd-xgbe"
@@ -39,13 +40,7 @@ struct VFIOAmdXgbeDeviceClass {
typedef struct VFIOAmdXgbeDeviceClass VFIOAmdXgbeDeviceClass;
-#define VFIO_AMD_XGBE_DEVICE(obj) \
- OBJECT_CHECK(VFIOAmdXgbeDevice, (obj), TYPE_VFIO_AMD_XGBE)
-#define VFIO_AMD_XGBE_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VFIOAmdXgbeDeviceClass, (klass), \
- TYPE_VFIO_AMD_XGBE)
-#define VFIO_AMD_XGBE_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VFIOAmdXgbeDeviceClass, (obj), \
- TYPE_VFIO_AMD_XGBE)
+DECLARE_OBJ_CHECKERS(VFIOAmdXgbeDevice, VFIOAmdXgbeDeviceClass,
+ VFIO_AMD_XGBE_DEVICE, TYPE_VFIO_AMD_XGBE)
#endif
diff --git a/include/hw/vfio/vfio-calxeda-xgmac.h b/include/hw/vfio/vfio-calxeda-xgmac.h
index f994775c09..8482f151dd 100644
--- a/include/hw/vfio/vfio-calxeda-xgmac.h
+++ b/include/hw/vfio/vfio-calxeda-xgmac.h
@@ -15,6 +15,7 @@
#define HW_VFIO_VFIO_CALXEDA_XGMAC_H
#include "hw/vfio/vfio-platform.h"
+#include "qom/object.h"
#define TYPE_VFIO_CALXEDA_XGMAC "vfio-calxeda-xgmac"
@@ -23,24 +24,20 @@
* - a single MMIO region corresponding to its register space
* - 3 IRQS (main and 2 power related IRQs)
*/
-typedef struct VFIOCalxedaXgmacDevice {
+struct VFIOCalxedaXgmacDevice {
VFIOPlatformDevice vdev;
-} VFIOCalxedaXgmacDevice;
+};
+typedef struct VFIOCalxedaXgmacDevice VFIOCalxedaXgmacDevice;
-typedef struct VFIOCalxedaXgmacDeviceClass {
+struct VFIOCalxedaXgmacDeviceClass {
/*< private >*/
VFIOPlatformDeviceClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
-} VFIOCalxedaXgmacDeviceClass;
+};
+typedef struct VFIOCalxedaXgmacDeviceClass VFIOCalxedaXgmacDeviceClass;
-#define VFIO_CALXEDA_XGMAC_DEVICE(obj) \
- OBJECT_CHECK(VFIOCalxedaXgmacDevice, (obj), TYPE_VFIO_CALXEDA_XGMAC)
-#define VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VFIOCalxedaXgmacDeviceClass, (klass), \
- TYPE_VFIO_CALXEDA_XGMAC)
-#define VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VFIOCalxedaXgmacDeviceClass, (obj), \
- TYPE_VFIO_CALXEDA_XGMAC)
+DECLARE_OBJ_CHECKERS(VFIOCalxedaXgmacDevice, VFIOCalxedaXgmacDeviceClass,
+ VFIO_CALXEDA_XGMAC_DEVICE, TYPE_VFIO_CALXEDA_XGMAC)
#endif
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 7624c9f511..b9da6c08ef 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -21,7 +21,6 @@
#ifndef HW_VFIO_VFIO_COMMON_H
#define HW_VFIO_VFIO_COMMON_H
-#include "qemu-common.h"
#include "exec/memory.h"
#include "qemu/queue.h"
#include "qemu/notify.h"
@@ -30,6 +29,8 @@
#ifdef CONFIG_LINUX
#include <linux/vfio.h>
#endif
+#include "sysemu/sysemu.h"
+#include "hw/vfio/vfio-container-base.h"
#define VFIO_MSG_PREFIX "vfio %s: "
@@ -58,42 +59,29 @@ typedef struct VFIORegion {
uint8_t nr; /* cache the region number for debug */
} VFIORegion;
-typedef struct VFIOAddressSpace {
- AddressSpace *as;
- QLIST_HEAD(, VFIOContainer) containers;
- QLIST_ENTRY(VFIOAddressSpace) list;
-} VFIOAddressSpace;
+typedef struct VFIOMigration {
+ struct VFIODevice *vbasedev;
+ VMChangeStateEntry *vm_state;
+ NotifierWithReturn migration_state;
+ uint32_t device_state;
+ int data_fd;
+ void *data_buffer;
+ size_t data_buffer_size;
+ uint64_t mig_flags;
+ uint64_t precopy_init_size;
+ uint64_t precopy_dirty_size;
+ bool initial_data_sent;
+} VFIOMigration;
struct VFIOGroup;
typedef struct VFIOContainer {
- VFIOAddressSpace *space;
+ VFIOContainerBase bcontainer;
int fd; /* /dev/vfio/vfio, empowered by the attached groups */
- MemoryListener listener;
- MemoryListener prereg_listener;
unsigned iommu_type;
- int error;
- bool initialized;
- unsigned long pgsizes;
- /*
- * This assumes the host IOMMU can support only a single
- * contiguous IOVA window. We may need to generalize that in
- * future
- */
- QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
- QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
QLIST_HEAD(, VFIOGroup) group_list;
- QLIST_ENTRY(VFIOContainer) next;
} VFIOContainer;
-typedef struct VFIOGuestIOMMU {
- VFIOContainer *container;
- IOMMUMemoryRegion *iommu;
- hwaddr iommu_offset;
- IOMMUNotifier n;
- QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
-} VFIOGuestIOMMU;
-
typedef struct VFIOHostDMAWindow {
hwaddr min_iova;
hwaddr max_iova;
@@ -101,11 +89,22 @@ typedef struct VFIOHostDMAWindow {
QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next;
} VFIOHostDMAWindow;
+typedef struct IOMMUFDBackend IOMMUFDBackend;
+
+typedef struct VFIOIOMMUFDContainer {
+ VFIOContainerBase bcontainer;
+ IOMMUFDBackend *be;
+ uint32_t ioas_id;
+} VFIOIOMMUFDContainer;
+
typedef struct VFIODeviceOps VFIODeviceOps;
typedef struct VFIODevice {
QLIST_ENTRY(VFIODevice) next;
+ QLIST_ENTRY(VFIODevice) container_next;
+ QLIST_ENTRY(VFIODevice) global_next;
struct VFIOGroup *group;
+ VFIOContainerBase *bcontainer;
char *sysfsdev;
char *name;
DeviceState *dev;
@@ -114,17 +113,28 @@ typedef struct VFIODevice {
bool reset_works;
bool needs_reset;
bool no_mmap;
- bool balloon_allowed;
+ bool ram_block_discard_allowed;
+ OnOffAuto enable_migration;
VFIODeviceOps *ops;
unsigned int num_irqs;
unsigned int num_regions;
unsigned int flags;
+ VFIOMigration *migration;
+ Error *migration_blocker;
+ OnOffAuto pre_copy_dirty_page_tracking;
+ bool dirty_pages_supported;
+ bool dirty_tracking;
+ int devid;
+ IOMMUFDBackend *iommufd;
} VFIODevice;
struct VFIODeviceOps {
void (*vfio_compute_needs_reset)(VFIODevice *vdev);
int (*vfio_hot_reset_multi)(VFIODevice *vdev);
void (*vfio_eoi)(VFIODevice *vdev);
+ Object *(*vfio_get_object)(VFIODevice *vdev);
+ void (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f);
+ int (*vfio_load_config)(VFIODevice *vdev, QEMUFile *f);
};
typedef struct VFIOGroup {
@@ -134,7 +144,7 @@ typedef struct VFIOGroup {
QLIST_HEAD(, VFIODevice) device_list;
QLIST_ENTRY(VFIOGroup) next;
QLIST_ENTRY(VFIOGroup) container_next;
- bool balloon_allowed;
+ bool ram_block_discard_allowed;
} VFIOGroup;
typedef struct VFIODMABuf {
@@ -148,6 +158,10 @@ typedef struct VFIODMABuf {
typedef struct VFIODisplay {
QemuConsole *con;
RAMFBState *ramfb;
+ struct vfio_region_info *edid_info;
+ struct vfio_region_gfx_edid *edid_regs;
+ uint8_t *edid_blob;
+ QEMUTimer *edid_link_timer;
struct {
VFIORegion buffer;
DisplaySurface *surface;
@@ -159,10 +173,18 @@ typedef struct VFIODisplay {
} dmabuf;
} VFIODisplay;
-void vfio_put_base_device(VFIODevice *vbasedev);
+VFIOAddressSpace *vfio_get_address_space(AddressSpace *as);
+void vfio_put_address_space(VFIOAddressSpace *space);
+
+/* SPAPR specific */
+int vfio_spapr_container_init(VFIOContainer *container, Error **errp);
+void vfio_spapr_container_deinit(VFIOContainer *container);
+
void vfio_disable_irqindex(VFIODevice *vbasedev, int index);
void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index);
void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index);
+int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
+ int action, int fd, Error **errp);
void vfio_region_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size);
uint64_t vfio_region_read(void *opaque,
@@ -171,17 +193,37 @@ int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
int index, const char *name);
int vfio_region_mmap(VFIORegion *region);
void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled);
+void vfio_region_unmap(VFIORegion *region);
void vfio_region_exit(VFIORegion *region);
void vfio_region_finalize(VFIORegion *region);
void vfio_reset_handler(void *opaque);
-VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp);
-void vfio_put_group(VFIOGroup *group);
-int vfio_get_device(VFIOGroup *group, const char *name,
- VFIODevice *vbasedev, Error **errp);
+struct vfio_device_info *vfio_get_device_info(int fd);
+int vfio_attach_device(char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp);
+void vfio_detach_device(VFIODevice *vbasedev);
+
+int vfio_kvm_device_add_fd(int fd, Error **errp);
+int vfio_kvm_device_del_fd(int fd, Error **errp);
+
+int vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp);
+void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer);
extern const MemoryRegionOps vfio_region_ops;
typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
+typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList;
extern VFIOGroupList vfio_group_list;
+extern VFIODeviceList vfio_device_list;
+extern const MemoryListener vfio_memory_listener;
+extern int vfio_kvm_device_fd;
+
+bool vfio_mig_active(void);
+int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp);
+void vfio_unblock_multiple_devices_migration(void);
+bool vfio_viommu_preset(VFIODevice *vbasedev);
+int64_t vfio_mig_bytes_transferred(void);
+void vfio_reset_bytes_transferred(void);
+bool vfio_device_state_is_running(VFIODevice *vbasedev);
+bool vfio_device_state_is_precopy(VFIODevice *vbasedev);
#ifdef CONFIG_LINUX
int vfio_get_region_info(VFIODevice *vbasedev, int index,
@@ -189,13 +231,33 @@ int vfio_get_region_info(VFIODevice *vbasedev, int index,
int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
uint32_t subtype, struct vfio_region_info **info);
bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type);
+struct vfio_info_cap_header *
+vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id);
+bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
+ unsigned int *avail);
+struct vfio_info_cap_header *
+vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id);
+struct vfio_info_cap_header *
+vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id);
#endif
-extern const MemoryListener vfio_prereg_listener;
-int vfio_spapr_create_window(VFIOContainer *container,
- MemoryRegionSection *section,
- hwaddr *pgsize);
-int vfio_spapr_remove_window(VFIOContainer *container,
- hwaddr offset_within_address_space);
+bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp);
+void vfio_migration_exit(VFIODevice *vbasedev);
+
+int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size);
+bool
+vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer);
+bool
+vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer);
+int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap, hwaddr iova,
+ hwaddr size);
+int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr);
+/* Returns 0 on success, or a negative errno. */
+int vfio_device_get_name(VFIODevice *vbasedev, Error **errp);
+void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp);
+void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
+ DeviceState *dev, bool ram_discard);
#endif /* HW_VFIO_VFIO_COMMON_H */
diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h
new file mode 100644
index 0000000000..3582d5f97a
--- /dev/null
+++ b/include/hw/vfio/vfio-container-base.h
@@ -0,0 +1,141 @@
+/*
+ * VFIO BASE CONTAINER
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ * Copyright Red Hat, Inc. 2023
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ * Eric Auger <eric.auger@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_CONTAINER_BASE_H
+#define HW_VFIO_VFIO_CONTAINER_BASE_H
+
+#include "exec/memory.h"
+
+typedef struct VFIODevice VFIODevice;
+typedef struct VFIOIOMMUClass VFIOIOMMUClass;
+
+typedef struct {
+ unsigned long *bitmap;
+ hwaddr size;
+ hwaddr pages;
+} VFIOBitmap;
+
+typedef struct VFIOAddressSpace {
+ AddressSpace *as;
+ QLIST_HEAD(, VFIOContainerBase) containers;
+ QLIST_ENTRY(VFIOAddressSpace) list;
+} VFIOAddressSpace;
+
+/*
+ * This is the base object for vfio container backends
+ */
+typedef struct VFIOContainerBase {
+ const VFIOIOMMUClass *ops;
+ VFIOAddressSpace *space;
+ MemoryListener listener;
+ Error *error;
+ bool initialized;
+ uint64_t dirty_pgsizes;
+ uint64_t max_dirty_bitmap_size;
+ unsigned long pgsizes;
+ unsigned int dma_max_mappings;
+ bool dirty_pages_supported;
+ QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
+ QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
+ QLIST_ENTRY(VFIOContainerBase) next;
+ QLIST_HEAD(, VFIODevice) device_list;
+ GList *iova_ranges;
+ NotifierWithReturn cpr_reboot_notifier;
+} VFIOContainerBase;
+
+typedef struct VFIOGuestIOMMU {
+ VFIOContainerBase *bcontainer;
+ IOMMUMemoryRegion *iommu_mr;
+ hwaddr iommu_offset;
+ IOMMUNotifier n;
+ QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
+} VFIOGuestIOMMU;
+
+typedef struct VFIORamDiscardListener {
+ VFIOContainerBase *bcontainer;
+ MemoryRegion *mr;
+ hwaddr offset_within_address_space;
+ hwaddr size;
+ uint64_t granularity;
+ RamDiscardListener listener;
+ QLIST_ENTRY(VFIORamDiscardListener) next;
+} VFIORamDiscardListener;
+
+int vfio_container_dma_map(VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly);
+int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb);
+int vfio_container_add_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ Error **errp);
+void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section);
+int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+ bool start);
+int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size);
+
+void vfio_container_init(VFIOContainerBase *bcontainer,
+ VFIOAddressSpace *space,
+ const VFIOIOMMUClass *ops);
+void vfio_container_destroy(VFIOContainerBase *bcontainer);
+
+
+#define TYPE_VFIO_IOMMU "vfio-iommu"
+#define TYPE_VFIO_IOMMU_LEGACY TYPE_VFIO_IOMMU "-legacy"
+#define TYPE_VFIO_IOMMU_SPAPR TYPE_VFIO_IOMMU "-spapr"
+#define TYPE_VFIO_IOMMU_IOMMUFD TYPE_VFIO_IOMMU "-iommufd"
+
+/*
+ * VFIOContainerBase is not an abstract QOM object because it felt
+ * unnecessary to expose all the IOMMU backends to the QEMU machine
+ * and human interface. However, we can still abstract the IOMMU
+ * backend handlers using a QOM interface class. This provides more
+ * flexibility when referencing the various implementations.
+ */
+DECLARE_CLASS_CHECKERS(VFIOIOMMUClass, VFIO_IOMMU, TYPE_VFIO_IOMMU)
+
+struct VFIOIOMMUClass {
+ InterfaceClass parent_class;
+
+ /* basic feature */
+ int (*setup)(VFIOContainerBase *bcontainer, Error **errp);
+ int (*dma_map)(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly);
+ int (*dma_unmap)(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb);
+ int (*attach_device)(const char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp);
+ void (*detach_device)(VFIODevice *vbasedev);
+ /* migration feature */
+ int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer,
+ bool start);
+ int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size);
+ /* PCI specific */
+ int (*pci_hot_reset)(VFIODevice *vbasedev, bool single);
+
+ /* SPAPR specific */
+ int (*add_window)(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ Error **errp);
+ void (*del_window)(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section);
+ void (*release)(VFIOContainerBase *bcontainer);
+};
+#endif /* HW_VFIO_VFIO_CONTAINER_BASE_H */
diff --git a/include/hw/vfio/vfio-platform.h b/include/hw/vfio/vfio-platform.h
index 30d3c28d3b..c414c3dffc 100644
--- a/include/hw/vfio/vfio-platform.h
+++ b/include/hw/vfio/vfio-platform.h
@@ -20,7 +20,7 @@
#include "hw/vfio/vfio-common.h"
#include "qemu/event_notifier.h"
#include "qemu/queue.h"
-#include "hw/irq.h"
+#include "qom/object.h"
#define TYPE_VFIO_PLATFORM "vfio-platform"
@@ -47,7 +47,7 @@ typedef struct VFIOINTp {
/* function type for user side eventfd handler */
typedef void (*eventfd_user_side_handler_t)(VFIOINTp *intp);
-typedef struct VFIOPlatformDevice {
+struct VFIOPlatformDevice {
SysBusDevice sbdev;
VFIODevice vbasedev; /* not a QOM object */
VFIORegion **regions;
@@ -60,19 +60,17 @@ typedef struct VFIOPlatformDevice {
QEMUTimer *mmap_timer; /* allows fast-path resume after IRQ hit */
QemuMutex intp_mutex; /* protect the intp_list IRQ state */
bool irqfd_allowed; /* debug option to force irqfd on/off */
-} VFIOPlatformDevice;
+};
+typedef struct VFIOPlatformDevice VFIOPlatformDevice;
-typedef struct VFIOPlatformDeviceClass {
+struct VFIOPlatformDeviceClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/
-} VFIOPlatformDeviceClass;
+};
+typedef struct VFIOPlatformDeviceClass VFIOPlatformDeviceClass;
-#define VFIO_PLATFORM_DEVICE(obj) \
- OBJECT_CHECK(VFIOPlatformDevice, (obj), TYPE_VFIO_PLATFORM)
-#define VFIO_PLATFORM_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VFIOPlatformDeviceClass, (klass), TYPE_VFIO_PLATFORM)
-#define VFIO_PLATFORM_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VFIOPlatformDeviceClass, (obj), TYPE_VFIO_PLATFORM)
+DECLARE_OBJ_CHECKERS(VFIOPlatformDevice, VFIOPlatformDeviceClass,
+ VFIO_PLATFORM_DEVICE, TYPE_VFIO_PLATFORM)
#endif /* HW_VFIO_VFIO_PLATFORM_H */
diff --git a/include/hw/vfio/vfio.h b/include/hw/vfio/vfio.h
deleted file mode 100644
index 86248f5436..0000000000
--- a/include/hw/vfio/vfio.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef HW_VFIO_H
-#define HW_VFIO_H
-
-bool vfio_eeh_as_ok(AddressSpace *as);
-int vfio_eeh_as_op(AddressSpace *as, uint32_t op);
-
-#endif
diff --git a/include/hw/virtio/vdpa-dev.h b/include/hw/virtio/vdpa-dev.h
new file mode 100644
index 0000000000..4dbf98195c
--- /dev/null
+++ b/include/hw/virtio/vdpa-dev.h
@@ -0,0 +1,43 @@
+/*
+ * Vhost Vdpa Device
+ *
+ * Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved.
+ *
+ * Authors:
+ * Longpeng <longpeng2@huawei.com>
+ *
+ * Largely based on the "vhost-user-blk.h" implemented by:
+ * Changpeng Liu <changpeng.liu@intel.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+#ifndef _VHOST_VDPA_DEVICE_H
+#define _VHOST_VDPA_DEVICE_H
+
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-vdpa.h"
+#include "qom/object.h"
+
+
+#define TYPE_VHOST_VDPA_DEVICE "vhost-vdpa-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VhostVdpaDevice, VHOST_VDPA_DEVICE)
+
+struct VhostVdpaDevice {
+ VirtIODevice parent_obj;
+ char *vhostdev;
+ int vhostfd;
+ int32_t bootindex;
+ uint32_t vdev_id;
+ uint32_t num_queues;
+ struct vhost_dev dev;
+ struct vhost_vdpa vdpa;
+ VirtQueue **virtqs;
+ uint8_t *config;
+ int config_size;
+ uint16_t queue_size;
+ bool started;
+ int (*post_init)(VhostVdpaDevice *v, Error **errp);
+};
+
+#endif
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 81283ec50f..70c2e8ffee 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -17,24 +17,42 @@ typedef enum VhostBackendType {
VHOST_BACKEND_TYPE_NONE = 0,
VHOST_BACKEND_TYPE_KERNEL = 1,
VHOST_BACKEND_TYPE_USER = 2,
- VHOST_BACKEND_TYPE_MAX = 3,
+ VHOST_BACKEND_TYPE_VDPA = 3,
+ VHOST_BACKEND_TYPE_MAX = 4,
} VhostBackendType;
typedef enum VhostSetConfigType {
- VHOST_SET_CONFIG_TYPE_MASTER = 0,
+ VHOST_SET_CONFIG_TYPE_FRONTEND = 0,
VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
} VhostSetConfigType;
+typedef enum VhostDeviceStateDirection {
+ /* Transfer state from back-end (device) to front-end */
+ VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0,
+ /* Transfer state from front-end to back-end (device) */
+ VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1,
+} VhostDeviceStateDirection;
+
+typedef enum VhostDeviceStatePhase {
+ /* The device (and all its vrings) is stopped */
+ VHOST_TRANSFER_STATE_PHASE_STOPPED = 0,
+} VhostDeviceStatePhase;
+
+struct vhost_inflight;
struct vhost_dev;
struct vhost_log;
struct vhost_memory;
struct vhost_vring_file;
struct vhost_vring_state;
struct vhost_vring_addr;
+struct vhost_vring_worker;
+struct vhost_worker_state;
struct vhost_scsi_target;
struct vhost_iotlb_msg;
+struct vhost_virtqueue;
-typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
+typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque,
+ Error **errp);
typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
@@ -65,12 +83,23 @@ typedef int (*vhost_set_vring_kick_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
typedef int (*vhost_set_vring_call_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
+typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev,
+ struct vhost_vring_file *file);
typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev,
struct vhost_vring_state *r);
+typedef int (*vhost_attach_vring_worker_op)(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker);
+typedef int (*vhost_get_vring_worker_op)(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker);
+typedef int (*vhost_new_worker_op)(struct vhost_dev *dev,
+ struct vhost_worker_state *worker);
+typedef int (*vhost_free_worker_op)(struct vhost_dev *dev,
+ struct vhost_worker_state *worker);
typedef int (*vhost_set_features_op)(struct vhost_dev *dev,
uint64_t features);
typedef int (*vhost_get_features_op)(struct vhost_dev *dev,
uint64_t *features);
+typedef int (*vhost_set_backend_cap_op)(struct vhost_dev *dev);
typedef int (*vhost_set_owner_op)(struct vhost_dev *dev);
typedef int (*vhost_reset_device_op)(struct vhost_dev *dev);
typedef int (*vhost_get_vq_index_op)(struct vhost_dev *dev, int idx);
@@ -79,9 +108,6 @@ typedef int (*vhost_set_vring_enable_op)(struct vhost_dev *dev,
typedef bool (*vhost_requires_shm_log_op)(struct vhost_dev *dev);
typedef int (*vhost_migration_done_op)(struct vhost_dev *dev,
char *mac_addr);
-typedef bool (*vhost_backend_can_merge_op)(struct vhost_dev *dev,
- uint64_t start1, uint64_t size1,
- uint64_t start2, uint64_t size2);
typedef int (*vhost_vsock_set_guest_cid_op)(struct vhost_dev *dev,
uint64_t guest_cid);
typedef int (*vhost_vsock_set_running_op)(struct vhost_dev *dev, int start);
@@ -93,7 +119,7 @@ typedef int (*vhost_set_config_op)(struct vhost_dev *dev, const uint8_t *data,
uint32_t offset, uint32_t size,
uint32_t flags);
typedef int (*vhost_get_config_op)(struct vhost_dev *dev, uint8_t *config,
- uint32_t config_len);
+ uint32_t config_len, Error **errp);
typedef int (*vhost_crypto_create_session_op)(struct vhost_dev *dev,
void *session_info,
@@ -101,14 +127,45 @@ typedef int (*vhost_crypto_create_session_op)(struct vhost_dev *dev,
typedef int (*vhost_crypto_close_session_op)(struct vhost_dev *dev,
uint64_t session_id);
-typedef bool (*vhost_backend_mem_section_filter_op)(struct vhost_dev *dev,
- MemoryRegionSection *section);
+typedef bool (*vhost_backend_no_private_memslots_op)(struct vhost_dev *dev);
+
+typedef int (*vhost_get_inflight_fd_op)(struct vhost_dev *dev,
+ uint16_t queue_size,
+ struct vhost_inflight *inflight);
+
+typedef int (*vhost_set_inflight_fd_op)(struct vhost_dev *dev,
+ struct vhost_inflight *inflight);
+
+typedef int (*vhost_dev_start_op)(struct vhost_dev *dev, bool started);
+
+typedef int (*vhost_vq_get_addr_op)(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr,
+ struct vhost_virtqueue *vq);
+
+typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
+
+typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
+
+typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
+ int fd);
+
+typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
+
+typedef bool (*vhost_supports_device_state_op)(struct vhost_dev *dev);
+typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev,
+ VhostDeviceStateDirection direction,
+ VhostDeviceStatePhase phase,
+ int fd,
+ int *reply_fd,
+ Error **errp);
+typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, Error **errp);
typedef struct VhostOps {
VhostBackendType backend_type;
vhost_backend_init vhost_backend_init;
vhost_backend_cleanup vhost_backend_cleanup;
vhost_backend_memslots_limit vhost_backend_memslots_limit;
+ vhost_backend_no_private_memslots_op vhost_backend_no_private_memslots;
vhost_net_set_backend_op vhost_net_set_backend;
vhost_net_set_mtu_op vhost_net_set_mtu;
vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
@@ -123,16 +180,21 @@ typedef struct VhostOps {
vhost_get_vring_base_op vhost_get_vring_base;
vhost_set_vring_kick_op vhost_set_vring_kick;
vhost_set_vring_call_op vhost_set_vring_call;
+ vhost_set_vring_err_op vhost_set_vring_err;
vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout;
+ vhost_new_worker_op vhost_new_worker;
+ vhost_free_worker_op vhost_free_worker;
+ vhost_get_vring_worker_op vhost_get_vring_worker;
+ vhost_attach_vring_worker_op vhost_attach_vring_worker;
vhost_set_features_op vhost_set_features;
vhost_get_features_op vhost_get_features;
+ vhost_set_backend_cap_op vhost_set_backend_cap;
vhost_set_owner_op vhost_set_owner;
vhost_reset_device_op vhost_reset_device;
vhost_get_vq_index_op vhost_get_vq_index;
vhost_set_vring_enable_op vhost_set_vring_enable;
vhost_requires_shm_log_op vhost_requires_shm_log;
vhost_migration_done_op vhost_migration_done;
- vhost_backend_can_merge_op vhost_backend_can_merge;
vhost_vsock_set_guest_cid_op vhost_vsock_set_guest_cid;
vhost_vsock_set_running_op vhost_vsock_set_running;
vhost_set_iotlb_callback_op vhost_set_iotlb_callback;
@@ -141,14 +203,19 @@ typedef struct VhostOps {
vhost_set_config_op vhost_set_config;
vhost_crypto_create_session_op vhost_crypto_create_session;
vhost_crypto_close_session_op vhost_crypto_close_session;
- vhost_backend_mem_section_filter_op vhost_backend_mem_section_filter;
+ vhost_get_inflight_fd_op vhost_get_inflight_fd;
+ vhost_set_inflight_fd_op vhost_set_inflight_fd;
+ vhost_dev_start_op vhost_dev_start;
+ vhost_vq_get_addr_op vhost_vq_get_addr;
+ vhost_get_device_id_op vhost_get_device_id;
+ vhost_force_iommu_op vhost_force_iommu;
+ vhost_set_config_call_op vhost_set_config_call;
+ vhost_reset_status_op vhost_reset_status;
+ vhost_supports_device_state_op vhost_supports_device_state;
+ vhost_set_device_state_fd_op vhost_set_device_state_fd;
+ vhost_check_device_state_op vhost_check_device_state;
} VhostOps;
-extern const VhostOps user_ops;
-
-int vhost_set_backend_type(struct vhost_dev *dev,
- VhostBackendType backend_type);
-
int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
uint64_t iova, uint64_t uaddr,
uint64_t len,
@@ -160,4 +227,9 @@ int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *imsg);
+int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd);
+
+int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
+ int *dmabuf_fd);
+
#endif /* VHOST_BACKEND_H */
diff --git a/include/hw/virtio/vhost-scsi-common.h b/include/hw/virtio/vhost-scsi-common.h
index 57fb1d87b5..c5d2c09455 100644
--- a/include/hw/virtio/vhost-scsi-common.h
+++ b/include/hw/virtio/vhost-scsi-common.h
@@ -14,17 +14,15 @@
#ifndef VHOST_SCSI_COMMON_H
#define VHOST_SCSI_COMMON_H
-#include "qemu-common.h"
-#include "hw/qdev.h"
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/vhost.h"
#include "hw/fw-path-provider.h"
+#include "qom/object.h"
#define TYPE_VHOST_SCSI_COMMON "vhost-scsi-common"
-#define VHOST_SCSI_COMMON(obj) \
- OBJECT_CHECK(VHostSCSICommon, (obj), TYPE_VHOST_SCSI_COMMON)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostSCSICommon, VHOST_SCSI_COMMON)
-typedef struct VHostSCSICommon {
+struct VHostSCSICommon {
VirtIOSCSICommon parent_obj;
Error *migration_blocker;
@@ -36,9 +34,12 @@ typedef struct VHostSCSICommon {
int target;
int lun;
uint64_t host_features;
-} VHostSCSICommon;
+ bool migratable;
-int vhost_scsi_common_start(VHostSCSICommon *vsc);
+ struct vhost_inflight *inflight;
+};
+
+int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp);
void vhost_scsi_common_stop(VHostSCSICommon *vsc);
char *vhost_scsi_common_get_fw_dev_path(FWPathProvider *p, BusState *bus,
DeviceState *dev);
diff --git a/include/hw/virtio/vhost-scsi.h b/include/hw/virtio/vhost-scsi.h
index 04658d14f5..7dc2bdd69d 100644
--- a/include/hw/virtio/vhost-scsi.h
+++ b/include/hw/virtio/vhost-scsi.h
@@ -14,11 +14,10 @@
#ifndef VHOST_SCSI_H
#define VHOST_SCSI_H
-#include "qemu-common.h"
-#include "hw/qdev.h"
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-scsi-common.h"
+#include "qom/object.h"
enum vhost_scsi_vq_list {
VHOST_SCSI_VQ_CONTROL = 0,
@@ -27,11 +26,10 @@ enum vhost_scsi_vq_list {
};
#define TYPE_VHOST_SCSI "vhost-scsi"
-#define VHOST_SCSI(obj) \
- OBJECT_CHECK(VHostSCSI, (obj), TYPE_VHOST_SCSI)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostSCSI, VHOST_SCSI)
-typedef struct VHostSCSI {
+struct VHostSCSI {
VHostSCSICommon parent_obj;
-} VHostSCSI;
+};
#endif
diff --git a/include/hw/virtio/vhost-user-base.h b/include/hw/virtio/vhost-user-base.h
new file mode 100644
index 0000000000..51d0968b89
--- /dev/null
+++ b/include/hw/virtio/vhost-user-base.h
@@ -0,0 +1,49 @@
+/*
+ * Vhost-user generic virtio device
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_BASE_H
+#define QEMU_VHOST_USER_BASE_H
+
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+
+#define TYPE_VHOST_USER_BASE "vhost-user-base"
+
+OBJECT_DECLARE_TYPE(VHostUserBase, VHostUserBaseClass, VHOST_USER_BASE)
+
+struct VHostUserBase {
+ VirtIODevice parent_obj;
+
+ /* Properties */
+ CharBackend chardev;
+ uint16_t virtio_id;
+ uint32_t num_vqs;
+ uint32_t vq_size; /* can't exceed VIRTIO_QUEUE_MAX */
+ uint32_t config_size;
+ /* State tracking */
+ VhostUserState vhost_user;
+ struct vhost_virtqueue *vhost_vq;
+ struct vhost_dev vhost_dev;
+ GPtrArray *vqs;
+ bool connected;
+};
+
+/*
+ * Needed so we can use the base realize after specialisation
+ * tweaks
+ */
+struct VHostUserBaseClass {
+ VirtioDeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+};
+
+
+#define TYPE_VHOST_USER_DEVICE "vhost-user-device"
+
+#endif /* QEMU_VHOST_USER_BASE_H */
diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h
index d52944aeeb..ea085ee1ed 100644
--- a/include/hw/virtio/vhost-user-blk.h
+++ b/include/hw/virtio/vhost-user-blk.h
@@ -16,27 +16,40 @@
#define VHOST_USER_BLK_H
#include "standard-headers/linux/virtio_blk.h"
-#include "qemu-common.h"
-#include "hw/qdev.h"
#include "hw/block/block.h"
#include "chardev/char-fe.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user.h"
+#include "qom/object.h"
#define TYPE_VHOST_USER_BLK "vhost-user-blk"
-#define VHOST_USER_BLK(obj) \
- OBJECT_CHECK(VHostUserBlk, (obj), TYPE_VHOST_USER_BLK)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserBlk, VHOST_USER_BLK)
-typedef struct VHostUserBlk {
+#define VHOST_USER_BLK_AUTO_NUM_QUEUES UINT16_MAX
+
+struct VHostUserBlk {
VirtIODevice parent_obj;
CharBackend chardev;
int32_t bootindex;
struct virtio_blk_config blkcfg;
uint16_t num_queues;
uint32_t queue_size;
- uint32_t config_wce;
struct vhost_dev dev;
- VhostUserState *vhost_user;
-} VHostUserBlk;
+ struct vhost_inflight *inflight;
+ VhostUserState vhost_user;
+ struct vhost_virtqueue *vhost_vqs;
+ VirtQueue **virtqs;
+
+ /*
+ * There are at least two steps of initialization of the
+ * vhost-user device. The first is a "connect" step and
+ * second is a "start" step. Make a separation between
+ * those initialization phases by using two fields.
+ */
+ /* vhost_user_blk_connect/vhost_user_blk_disconnect */
+ bool connected;
+ /* vhost_user_blk_start/vhost_user_blk_stop */
+ bool started_vu;
+};
#endif
diff --git a/include/hw/virtio/vhost-user-fs.h b/include/hw/virtio/vhost-user-fs.h
new file mode 100644
index 0000000000..94c3aaa84e
--- /dev/null
+++ b/include/hw/virtio/vhost-user-fs.h
@@ -0,0 +1,47 @@
+/*
+ * Vhost-user filesystem virtio device
+ *
+ * Copyright 2018-2019 Red Hat, Inc.
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QEMU_VHOST_USER_FS_H
+#define QEMU_VHOST_USER_FS_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "chardev/char-fe.h"
+#include "qom/object.h"
+
+#define TYPE_VHOST_USER_FS "vhost-user-fs-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserFS, VHOST_USER_FS)
+
+typedef struct {
+ CharBackend chardev;
+ char *tag;
+ uint16_t num_request_queues;
+ uint16_t queue_size;
+} VHostUserFSConf;
+
+struct VHostUserFS {
+ /*< private >*/
+ VirtIODevice parent;
+ VHostUserFSConf conf;
+ struct vhost_virtqueue *vhost_vqs;
+ struct vhost_dev vhost_dev;
+ VhostUserState vhost_user;
+ VirtQueue **req_vqs;
+ VirtQueue *hiprio_vq;
+ int32_t bootindex;
+
+ /*< public >*/
+};
+
+#endif /* QEMU_VHOST_USER_FS_H */
diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h
new file mode 100644
index 0000000000..5814a8400a
--- /dev/null
+++ b/include/hw/virtio/vhost-user-gpio.h
@@ -0,0 +1,24 @@
+/*
+ * Vhost-user GPIO virtio device
+ *
+ * Copyright (c) 2021 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _QEMU_VHOST_USER_GPIO_H
+#define _QEMU_VHOST_USER_GPIO_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_GPIO "vhost-user-gpio-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserGPIO, VHOST_USER_GPIO);
+
+struct VHostUserGPIO {
+ VHostUserBase parent_obj;
+};
+
+#endif /* _QEMU_VHOST_USER_GPIO_H */
diff --git a/include/hw/virtio/vhost-user-i2c.h b/include/hw/virtio/vhost-user-i2c.h
new file mode 100644
index 0000000000..a9b5612ad0
--- /dev/null
+++ b/include/hw/virtio/vhost-user-i2c.h
@@ -0,0 +1,25 @@
+/*
+ * Vhost-user i2c virtio device
+ *
+ * Copyright (c) 2021 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_I2C_H
+#define QEMU_VHOST_USER_I2C_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_I2C "vhost-user-i2c-device"
+
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserI2C, VHOST_USER_I2C)
+
+struct VHostUserI2C {
+ VHostUserBase parent_obj;
+};
+
+#endif /* QEMU_VHOST_USER_I2C_H */
diff --git a/include/hw/virtio/vhost-user-rng.h b/include/hw/virtio/vhost-user-rng.h
new file mode 100644
index 0000000000..10868c7de4
--- /dev/null
+++ b/include/hw/virtio/vhost-user-rng.h
@@ -0,0 +1,24 @@
+/*
+ * Vhost-user RNG virtio device
+ *
+ * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_RNG_H
+#define QEMU_VHOST_USER_RNG_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_RNG "vhost-user-rng"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserRNG, VHOST_USER_RNG)
+
+struct VHostUserRNG {
+ VHostUserBase parent_obj;
+};
+
+#endif /* QEMU_VHOST_USER_RNG_H */
diff --git a/include/hw/virtio/vhost-user-scmi.h b/include/hw/virtio/vhost-user-scmi.h
new file mode 100644
index 0000000000..c90db77dd5
--- /dev/null
+++ b/include/hw/virtio/vhost-user-scmi.h
@@ -0,0 +1,31 @@
+/*
+ * Vhost-user SCMI virtio device
+ *
+ * Copyright (c) 2023 Red Hat, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _QEMU_VHOST_USER_SCMI_H
+#define _QEMU_VHOST_USER_SCMI_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+
+#define TYPE_VHOST_USER_SCMI "vhost-user-scmi"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCMI, VHOST_USER_SCMI);
+
+struct VHostUserSCMI {
+ VirtIODevice parent;
+ CharBackend chardev;
+ struct vhost_virtqueue *vhost_vqs;
+ struct vhost_dev vhost_dev;
+ VhostUserState vhost_user;
+ VirtQueue *cmd_vq;
+ VirtQueue *event_vq;
+ bool connected;
+ bool started_vu;
+};
+
+#endif /* _QEMU_VHOST_USER_SCMI_H */
diff --git a/include/hw/virtio/vhost-user-scsi.h b/include/hw/virtio/vhost-user-scsi.h
index e429cacd8e..78fe616ccb 100644
--- a/include/hw/virtio/vhost-user-scsi.h
+++ b/include/hw/virtio/vhost-user-scsi.h
@@ -17,20 +17,24 @@
#ifndef VHOST_USER_SCSI_H
#define VHOST_USER_SCSI_H
-#include "qemu-common.h"
-#include "hw/qdev.h"
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user.h"
#include "hw/virtio/vhost-scsi-common.h"
+#include "qom/object.h"
#define TYPE_VHOST_USER_SCSI "vhost-user-scsi"
-#define VHOST_USER_SCSI(obj) \
- OBJECT_CHECK(VHostUserSCSI, (obj), TYPE_VHOST_USER_SCSI)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCSI, VHOST_USER_SCSI)
-typedef struct VHostUserSCSI {
+struct VHostUserSCSI {
VHostSCSICommon parent_obj;
- VhostUserState *vhost_user;
-} VHostUserSCSI;
+
+ /* Properties */
+ bool connected;
+ bool started_vu;
+
+ VhostUserState vhost_user;
+ struct vhost_virtqueue *vhost_vqs;
+};
#endif /* VHOST_USER_SCSI_H */
diff --git a/include/hw/virtio/vhost-user-snd.h b/include/hw/virtio/vhost-user-snd.h
new file mode 100644
index 0000000000..f9260116a7
--- /dev/null
+++ b/include/hw/virtio/vhost-user-snd.h
@@ -0,0 +1,24 @@
+/*
+ * Vhost-user Sound virtio device
+ *
+ * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_SND_H
+#define QEMU_VHOST_USER_SND_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_SND "vhost-user-snd"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSound, VHOST_USER_SND)
+
+struct VHostUserSound {
+ VHostUserBase parent_obj;
+};
+
+#endif /* QEMU_VHOST_USER_SND_H */
diff --git a/include/hw/virtio/vhost-user-vsock.h b/include/hw/virtio/vhost-user-vsock.h
new file mode 100644
index 0000000000..67aa46c952
--- /dev/null
+++ b/include/hw/virtio/vhost-user-vsock.h
@@ -0,0 +1,36 @@
+/*
+ * Vhost-user vsock virtio device
+ *
+ * Copyright 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QEMU_VHOST_USER_VSOCK_H
+#define QEMU_VHOST_USER_VSOCK_H
+
+#include "hw/virtio/vhost-vsock-common.h"
+#include "hw/virtio/vhost-user.h"
+#include "standard-headers/linux/virtio_vsock.h"
+#include "qom/object.h"
+
+#define TYPE_VHOST_USER_VSOCK "vhost-user-vsock-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserVSock, VHOST_USER_VSOCK)
+
+typedef struct {
+ CharBackend chardev;
+} VHostUserVSockConf;
+
+struct VHostUserVSock {
+ /*< private >*/
+ VHostVSockCommon parent;
+ VhostUserState vhost_user;
+ VHostUserVSockConf conf;
+ struct virtio_vsock_config vsockcfg;
+
+ /*< public >*/
+};
+
+#endif /* QEMU_VHOST_USER_VSOCK_H */
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index fd660393a0..d7c09ffd34 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -11,18 +11,104 @@
#include "chardev/char-fe.h"
#include "hw/virtio/virtio.h"
+enum VhostUserProtocolFeature {
+ VHOST_USER_PROTOCOL_F_MQ = 0,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
+ VHOST_USER_PROTOCOL_F_RARP = 2,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
+ VHOST_USER_PROTOCOL_F_NET_MTU = 4,
+ VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5,
+ VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
+ VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
+ VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
+ VHOST_USER_PROTOCOL_F_CONFIG = 9,
+ VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10,
+ VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
+ VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
+ VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+ VHOST_USER_PROTOCOL_F_STATUS = 16,
+ /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
+ VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
+ VHOST_USER_PROTOCOL_F_DEVICE_STATE = 19,
+ VHOST_USER_PROTOCOL_F_MAX
+};
+
+/**
+ * VhostUserHostNotifier - notifier information for one queue
+ * @rcu: rcu_head for cleanup
+ * @mr: memory region of notifier
+ * @addr: current mapped address
+ * @unmap_addr: address to be un-mapped
+ * @idx: virtioqueue index
+ *
+ * The VhostUserHostNotifier entries are re-used. When an old mapping
+ * is to be released it is moved to @unmap_addr and @addr is replaced.
+ * Once the RCU process has completed the unmap @unmap_addr is
+ * cleared.
+ */
typedef struct VhostUserHostNotifier {
+ struct rcu_head rcu;
MemoryRegion mr;
void *addr;
- bool set;
+ void *unmap_addr;
+ int idx;
} VhostUserHostNotifier;
+/**
+ * VhostUserState - shared state for all vhost-user devices
+ * @chr: the character backend for the socket
+ * @notifiers: GPtrArray of @VhostUserHostnotifier
+ * @memory_slots:
+ */
typedef struct VhostUserState {
CharBackend *chr;
- VhostUserHostNotifier notifier[VIRTIO_QUEUE_MAX];
+ GPtrArray *notifiers;
+ int memory_slots;
+ bool supports_config;
} VhostUserState;
-VhostUserState *vhost_user_init(void);
+/**
+ * vhost_user_init() - initialise shared vhost_user state
+ * @user: allocated area for storing shared state
+ * @chr: the chardev for the vhost socket
+ * @errp: error handle
+ *
+ * User can either directly g_new() space for the state or embed
+ * VhostUserState in their larger device structure and just point to
+ * it.
+ *
+ * Return: true on success, false on error while setting errp.
+ */
+bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp);
+
+/**
+ * vhost_user_cleanup() - cleanup state
+ * @user: ptr to use state
+ *
+ * Cleans up shared state and notifiers, callee is responsible for
+ * freeing the @VhostUserState memory itself.
+ */
void vhost_user_cleanup(VhostUserState *user);
+/**
+ * vhost_user_async_close() - cleanup vhost-user post connection drop
+ * @d: DeviceState for the associated device (passed to callback)
+ * @chardev: the CharBackend associated with the connection
+ * @vhost: the common vhost device
+ * @cb: the user callback function to complete the clean-up
+ *
+ * This function is used to handle the shutdown of a vhost-user
+ * connection to a backend. We handle this centrally to make sure we
+ * do all the steps and handle potential races due to VM shutdowns.
+ * Once the connection is disabled we call a backhalf to ensure
+ */
+typedef void (*vu_async_close_fn)(DeviceState *cb);
+
+void vhost_user_async_close(DeviceState *d,
+ CharBackend *chardev, struct vhost_dev *vhost,
+ vu_async_close_fn cb,
+ IOEventHandler *event_cb);
+
#endif
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
new file mode 100644
index 0000000000..0a9575b469
--- /dev/null
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -0,0 +1,95 @@
+/*
+ * vhost-vdpa.h
+ *
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * Copyright(c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HW_VIRTIO_VHOST_VDPA_H
+#define HW_VIRTIO_VHOST_VDPA_H
+
+#include <gmodule.h>
+
+#include "hw/virtio/vhost-iova-tree.h"
+#include "hw/virtio/vhost-shadow-virtqueue.h"
+#include "hw/virtio/virtio.h"
+#include "standard-headers/linux/vhost_types.h"
+
+/*
+ * ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA to
+ * qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here
+ */
+#define VHOST_VDPA_GUEST_PA_ASID 0
+
+typedef struct VhostVDPAHostNotifier {
+ MemoryRegion mr;
+ void *addr;
+} VhostVDPAHostNotifier;
+
+typedef enum SVQTransitionState {
+ SVQ_TSTATE_DISABLING = -1,
+ SVQ_TSTATE_DONE,
+ SVQ_TSTATE_ENABLING
+} SVQTransitionState;
+
+/* Info shared by all vhost_vdpa device models */
+typedef struct vhost_vdpa_shared {
+ int device_fd;
+ MemoryListener listener;
+ struct vhost_vdpa_iova_range iova_range;
+ QLIST_HEAD(, vdpa_iommu) iommu_list;
+
+ /* IOVA mapping used by the Shadow Virtqueue */
+ VhostIOVATree *iova_tree;
+
+ /* Copy of backend features */
+ uint64_t backend_cap;
+
+ bool iotlb_batch_begin_sent;
+
+ /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
+ bool shadow_data;
+
+ /* SVQ switching is in progress, or already completed? */
+ SVQTransitionState svq_switching;
+} VhostVDPAShared;
+
+typedef struct vhost_vdpa {
+ int index;
+ uint32_t address_space_id;
+ uint64_t acked_features;
+ bool shadow_vqs_enabled;
+ /* Device suspended successfully */
+ bool suspended;
+ VhostVDPAShared *shared;
+ GPtrArray *shadow_vqs;
+ const VhostShadowVirtqueueOps *shadow_vq_ops;
+ void *shadow_vq_ops_opaque;
+ struct vhost_dev *dev;
+ Error *migration_blocker;
+ VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
+ IOMMUNotifier n;
+} VhostVDPA;
+
+int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
+int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);
+
+int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly);
+int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
+ hwaddr size);
+
+typedef struct vdpa_iommu {
+ VhostVDPAShared *dev_shared;
+ IOMMUMemoryRegion *iommu_mr;
+ hwaddr iommu_offset;
+ IOMMUNotifier n;
+ QLIST_ENTRY(vdpa_iommu) iommu_next;
+} VDPAIOMMUState;
+
+
+#endif
diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
new file mode 100644
index 0000000000..75a74e8a99
--- /dev/null
+++ b/include/hw/virtio/vhost-vsock-common.h
@@ -0,0 +1,53 @@
+/*
+ * Parent class for vhost-vsock devices
+ *
+ * Copyright 2015-2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QEMU_VHOST_VSOCK_COMMON_H
+#define QEMU_VHOST_VSOCK_COMMON_H
+
+#include "qapi/qapi-types-common.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "qom/object.h"
+
+#define TYPE_VHOST_VSOCK_COMMON "vhost-vsock-common"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostVSockCommon, VHOST_VSOCK_COMMON)
+
+enum {
+ VHOST_VSOCK_SAVEVM_VERSION = 0,
+
+ VHOST_VSOCK_QUEUE_SIZE = 128,
+};
+
+struct VHostVSockCommon {
+ VirtIODevice parent;
+
+ struct vhost_virtqueue vhost_vqs[2];
+ struct vhost_dev vhost_dev;
+
+ VirtQueue *event_vq;
+ VirtQueue *recv_vq;
+ VirtQueue *trans_vq;
+
+ QEMUTimer *post_load_timer;
+
+ /* features */
+ OnOffAuto seqpacket;
+};
+
+int vhost_vsock_common_start(VirtIODevice *vdev);
+void vhost_vsock_common_stop(VirtIODevice *vdev);
+int vhost_vsock_common_pre_save(void *opaque);
+int vhost_vsock_common_post_load(void *opaque, int version_id);
+void vhost_vsock_common_realize(VirtIODevice *vdev);
+void vhost_vsock_common_unrealize(VirtIODevice *vdev);
+uint64_t vhost_vsock_common_get_features(VirtIODevice *vdev, uint64_t features,
+ Error **errp);
+
+#endif /* QEMU_VHOST_VSOCK_COMMON_H */
diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
index 7b9205fe3f..84f4e727c7 100644
--- a/include/hw/virtio/vhost-vsock.h
+++ b/include/hw/virtio/vhost-vsock.h
@@ -11,31 +11,26 @@
* top-level directory.
*/
-#ifndef _QEMU_VHOST_VSOCK_H
-#define _QEMU_VHOST_VSOCK_H
+#ifndef QEMU_VHOST_VSOCK_H
+#define QEMU_VHOST_VSOCK_H
-#include "hw/virtio/virtio.h"
-#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-vsock-common.h"
+#include "qom/object.h"
#define TYPE_VHOST_VSOCK "vhost-vsock-device"
-#define VHOST_VSOCK(obj) \
- OBJECT_CHECK(VHostVSock, (obj), TYPE_VHOST_VSOCK)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
typedef struct {
uint64_t guest_cid;
char *vhostfd;
} VHostVSockConf;
-typedef struct {
+struct VHostVSock {
/*< private >*/
- VirtIODevice parent;
+ VHostVSockCommon parent;
VHostVSockConf conf;
- struct vhost_virtqueue vhost_vqs[2];
- struct vhost_dev vhost_dev;
- VirtQueue *event_vq;
- QEMUTimer *post_load_timer;
/*< public >*/
-} VHostVSock;
+};
-#endif /* _QEMU_VHOST_VSOCK_H */
+#endif /* QEMU_VHOST_VSOCK_H */
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index a7f449fa87..02477788df 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -1,12 +1,25 @@
#ifndef VHOST_H
#define VHOST_H
-#include "hw/hw.h"
#include "hw/virtio/vhost-backend.h"
#include "hw/virtio/virtio.h"
#include "exec/memory.h"
+#define VHOST_F_DEVICE_IOTLB 63
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+
+#define VU_REALIZE_CONN_RETRIES 3
+
/* Generic structures common for any vhost based device. */
+
+struct vhost_inflight {
+ int fd;
+ void *addr;
+ uint64_t size;
+ uint64_t offset;
+ uint16_t queue_size;
+};
+
struct vhost_virtqueue {
int kick;
int call;
@@ -21,6 +34,8 @@ struct vhost_virtqueue {
unsigned long long used_phys;
unsigned used_size;
EventNotifier masked_notifier;
+ EventNotifier error_notifier;
+ EventNotifier masked_config_notifier;
struct vhost_dev *dev;
};
@@ -29,6 +44,7 @@ typedef unsigned long vhost_log_chunk_t;
#define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
#define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
#define VHOST_INVALID_FEATURE_BIT (0xff)
+#define VHOST_QUEUE_NUM_CONFIG_INR 0
struct vhost_log {
unsigned long long size;
@@ -53,6 +69,12 @@ typedef struct VhostDevConfigOps {
} VhostDevConfigOps;
struct vhost_memory;
+
+/**
+ * struct vhost_dev - common vhost_dev structure
+ * @vhost_ops: backend specific ops
+ * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier)
+ */
struct vhost_dev {
VirtIODevice *vdev;
MemoryListener memory_listener;
@@ -63,14 +85,42 @@ struct vhost_dev {
int n_tmp_sections;
MemoryRegionSection *tmp_sections;
struct vhost_virtqueue *vqs;
- int nvqs;
+ unsigned int nvqs;
/* the first virtqueue which would be used by this vhost dev */
int vq_index;
+ /* one past the last vq index for the virtio device (not vhost) */
+ int vq_index_end;
+ /* if non-zero, minimum required value for max_queues */
+ int num_queues;
+ /**
+ * vhost feature handling requires matching the feature set
+ * offered by a backend which may be a subset of the total
+ * features eventually offered to the guest.
+ *
+ * @features: available features provided by the backend
+ * @acked_features: final negotiated features with front-end driver
+ *
+ * @backend_features: this is used in a couple of places to either
+ * store VHOST_USER_F_PROTOCOL_FEATURES to apply to
+ * VHOST_USER_SET_FEATURES or VHOST_NET_F_VIRTIO_NET_HDR. Its
+ * future use should be discouraged and the variable retired as
+ * its easy to confuse with the VirtIO backend_features.
+ */
uint64_t features;
uint64_t acked_features;
uint64_t backend_features;
+
+ /**
+ * @protocol_features: is the vhost-user only feature set by
+ * VHOST_USER_SET_PROTOCOL_FEATURES. Protocol features are only
+ * negotiated if VHOST_USER_F_PROTOCOL_FEATURES has been offered
+ * by the backend (see @features).
+ */
uint64_t protocol_features;
+
uint64_t max_queues;
+ uint64_t backend_cap;
+ /* @started: is the vhost device started? */
bool started;
bool log_enabled;
uint64_t log_size;
@@ -84,14 +134,154 @@ struct vhost_dev {
const VhostDevConfigOps *config_ops;
};
+extern const VhostOps kernel_ops;
+extern const VhostOps user_ops;
+extern const VhostOps vdpa_ops;
+
+struct vhost_net {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[2];
+ int backend;
+ NetClientState *nc;
+};
+
+/**
+ * vhost_dev_init() - initialise the vhost interface
+ * @hdev: the common vhost_dev structure
+ * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa)
+ * @backend_type: type of backend
+ * @busyloop_timeout: timeout for polling virtqueue
+ * @errp: error handle
+ *
+ * The initialisation of the vhost device will trigger the
+ * initialisation of the backend and potentially capability
+ * negotiation of backend interface. Configuration of the VirtIO
+ * itself won't happen until the interface is started.
+ *
+ * Return: 0 on success, non-zero on error while setting errp.
+ */
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
VhostBackendType backend_type,
- uint32_t busyloop_timeout);
+ uint32_t busyloop_timeout, Error **errp);
+
+/**
+ * vhost_dev_cleanup() - tear down and cleanup vhost interface
+ * @hdev: the common vhost_dev structure
+ */
void vhost_dev_cleanup(struct vhost_dev *hdev);
-int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
-void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
+
+/**
+ * vhost_dev_enable_notifiers() - enable event notifiers
+ * @hdev: common vhost_dev structure
+ * @vdev: the VirtIODevice structure
+ *
+ * Enable notifications directly to the vhost device rather than being
+ * triggered by QEMU itself. Notifications should be enabled before
+ * the vhost device is started via @vhost_dev_start.
+ *
+ * Return: 0 on success, < 0 on error.
+ */
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+
+/**
+ * vhost_dev_disable_notifiers - disable event notifications
+ * @hdev: common vhost_dev structure
+ * @vdev: the VirtIODevice structure
+ *
+ * Disable direct notifications to vhost device.
+ */
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+bool vhost_config_pending(struct vhost_dev *hdev);
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask);
+
+/**
+ * vhost_dev_is_started() - report status of vhost device
+ * @hdev: common vhost_dev structure
+ *
+ * Return the started status of the vhost device
+ */
+static inline bool vhost_dev_is_started(struct vhost_dev *hdev)
+{
+ return hdev->started;
+}
+
+/**
+ * vhost_dev_start() - start the vhost device
+ * @hdev: common vhost_dev structure
+ * @vdev: the VirtIODevice structure
+ * @vrings: true to have vrings enabled in this call
+ *
+ * Starts the vhost device. From this point VirtIO feature negotiation
+ * can start and the device can start processing VirtIO transactions.
+ *
+ * Return: 0 on success, < 0 on error.
+ */
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
+
+/**
+ * vhost_dev_stop() - stop the vhost device
+ * @hdev: common vhost_dev structure
+ * @vdev: the VirtIODevice structure
+ * @vrings: true to have vrings disabled in this call
+ *
+ * Stop the vhost device. After the device is stopped the notifiers
+ * can be disabled (@vhost_dev_disable_notifiers) and the device can
+ * be torn down (@vhost_dev_cleanup).
+ */
+void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
+
+/**
+ * DOC: vhost device configuration handling
+ *
+ * The VirtIO device configuration space is used for rarely changing
+ * or initialisation time parameters. The configuration can be updated
+ * by either the guest driver or the device itself. If the device can
+ * change the configuration over time the vhost handler should
+ * register a @VhostDevConfigOps structure with
+ * @vhost_dev_set_config_notifier so the guest can be notified. Some
+ * devices register a handler anyway and will signal an error if an
+ * unexpected config change happens.
+ */
+
+/**
+ * vhost_dev_get_config() - fetch device configuration
+ * @hdev: common vhost_dev_structure
+ * @config: pointer to device appropriate config structure
+ * @config_len: size of device appropriate config structure
+ *
+ * Return: 0 on success, < 0 on error while setting errp
+ */
+int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
+ uint32_t config_len, Error **errp);
+
+/**
+ * vhost_dev_set_config() - set device configuration
+ * @hdev: common vhost_dev_structure
+ * @data: pointer to data to set
+ * @offset: offset into configuration space
+ * @size: length of set
+ * @flags: @VhostSetConfigType flags
+ *
+ * By use of @offset/@size a subset of the configuration space can be
+ * written to. The @flags are used to indicate if it is a normal
+ * transaction or related to migration.
+ *
+ * Return: 0 on success, non-zero on error
+ */
+int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags);
+
+/**
+ * vhost_dev_set_config_notifier() - register VhostDevConfigOps
+ * @hdev: common vhost_dev_structure
+ * @ops: notifier ops
+ *
+ * If the device is expected to change configuration a notifier can be
+ * setup to handle the case.
+ */
+void vhost_dev_set_config_notifier(struct vhost_dev *dev,
+ const VhostDevConfigOps *ops);
+
/* Test and clear masked event pending status.
* Should be called after unmask to avoid losing events.
@@ -102,22 +292,176 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
*/
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
bool mask);
+
+/**
+ * vhost_get_features() - return a sanitised set of feature bits
+ * @hdev: common vhost_dev structure
+ * @feature_bits: pointer to terminated table of feature bits
+ * @features: original feature set
+ *
+ * This returns a set of features bits that is an intersection of what
+ * is supported by the vhost backend (hdev->features), the supported
+ * feature_bits and the requested feature set.
+ */
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
uint64_t features);
+
+/**
+ * vhost_ack_features() - set vhost acked_features
+ * @hdev: common vhost_dev structure
+ * @feature_bits: pointer to terminated table of feature bits
+ * @features: requested feature set
+ *
+ * This sets the internal hdev->acked_features to the intersection of
+ * the backends advertised features and the supported feature_bits.
+ */
void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
uint64_t features);
-bool vhost_has_free_slot(void);
+unsigned int vhost_get_max_memslots(void);
+unsigned int vhost_get_free_memslots(void);
int vhost_net_set_backend(struct vhost_dev *hdev,
struct vhost_vring_file *file);
+void vhost_toggle_device_iotlb(VirtIODevice *vdev);
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
-int vhost_dev_get_config(struct vhost_dev *dev, uint8_t *config,
- uint32_t config_len);
-int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data,
- uint32_t offset, uint32_t size, uint32_t flags);
-/* notifier callback in case vhost device config space changed
+
+int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq, unsigned idx);
+void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq, unsigned idx);
+
+void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
+void vhost_dev_free_inflight(struct vhost_inflight *inflight);
+void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);
+int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f);
+int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
+int vhost_dev_set_inflight(struct vhost_dev *dev,
+ struct vhost_inflight *inflight);
+int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
+ struct vhost_inflight *inflight);
+bool vhost_dev_has_iommu(struct vhost_dev *dev);
+
+#ifdef CONFIG_VHOST
+int vhost_reset_device(struct vhost_dev *hdev);
+#else
+static inline int vhost_reset_device(struct vhost_dev *hdev)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_VHOST */
+
+/**
+ * vhost_supports_device_state(): Checks whether the back-end supports
+ * transferring internal device state for the purpose of migration.
+ * Support for this feature is required for vhost_set_device_state_fd()
+ * and vhost_check_device_state().
+ *
+ * @dev: The vhost device
+ *
+ * Returns true if the device supports these commands, and false if it
+ * does not.
*/
-void vhost_dev_set_config_notifier(struct vhost_dev *dev,
- const VhostDevConfigOps *ops);
+bool vhost_supports_device_state(struct vhost_dev *dev);
+
+/**
+ * vhost_set_device_state_fd(): Begin transfer of internal state from/to
+ * the back-end for the purpose of migration. Data is to be transferred
+ * over a pipe according to @direction and @phase. The sending end must
+ * only write to the pipe, and the receiving end must only read from it.
+ * Once the sending end is done, it closes its FD. The receiving end
+ * must take this as the end-of-transfer signal and close its FD, too.
+ *
+ * @fd is the back-end's end of the pipe: The write FD for SAVE, and the
+ * read FD for LOAD. This function transfers ownership of @fd to the
+ * back-end, i.e. closes it in the front-end.
+ *
+ * The back-end may optionally reply with an FD of its own, if this
+ * improves efficiency on its end. In this case, the returned FD is
+ * stored in *reply_fd. The back-end will discard the FD sent to it,
+ * and the front-end must use *reply_fd for transferring state to/from
+ * the back-end.
+ *
+ * @dev: The vhost device
+ * @direction: The direction in which the state is to be transferred.
+ * For outgoing migrations, this is SAVE, and data is read
+ * from the back-end and stored by the front-end in the
+ * migration stream.
+ * For incoming migrations, this is LOAD, and data is read
+ * by the front-end from the migration stream and sent to
+ * the back-end to restore the saved state.
+ * @phase: Which migration phase we are in. Currently, there is only
+ * STOPPED (device and all vrings are stopped), in the future,
+ * more phases such as PRE_COPY or POST_COPY may be added.
+ * @fd: Back-end's end of the pipe through which to transfer state; note
+ * that ownership is transferred to the back-end, so this function
+ * closes @fd in the front-end.
+ * @reply_fd: If the back-end wishes to use a different pipe for state
+ * transfer, this will contain an FD for the front-end to
+ * use. Otherwise, -1 is stored here.
+ * @errp: Potential error description
+ *
+ * Returns 0 on success, and -errno on failure.
+ */
+int vhost_set_device_state_fd(struct vhost_dev *dev,
+ VhostDeviceStateDirection direction,
+ VhostDeviceStatePhase phase,
+ int fd,
+ int *reply_fd,
+ Error **errp);
+
+/**
+ * vhost_set_device_state_fd(): After transferring state from/to the
+ * back-end via vhost_set_device_state_fd(), i.e. once the sending end
+ * has closed the pipe, inquire the back-end to report any potential
+ * errors that have occurred on its side. This allows to sense errors
+ * like:
+ * - During outgoing migration, when the source side had already started
+ * to produce its state, something went wrong and it failed to finish
+ * - During incoming migration, when the received state is somehow
+ * invalid and cannot be processed by the back-end
+ *
+ * @dev: The vhost device
+ * @errp: Potential error description
+ *
+ * Returns 0 when the back-end reports successful state transfer and
+ * processing, and -errno when an error occurred somewhere.
+ */
+int vhost_check_device_state(struct vhost_dev *dev, Error **errp);
+
+/**
+ * vhost_save_backend_state(): High-level function to receive a vhost
+ * back-end's state, and save it in @f. Uses
+ * `vhost_set_device_state_fd()` to get the data from the back-end, and
+ * stores it in consecutive chunks that are each prefixed by their
+ * respective length (be32). The end is marked by a 0-length chunk.
+ *
+ * Must only be called while the device and all its vrings are stopped
+ * (`VHOST_TRANSFER_STATE_PHASE_STOPPED`).
+ *
+ * @dev: The vhost device from which to save the state
+ * @f: Migration stream in which to save the state
+ * @errp: Potential error message
+ *
+ * Returns 0 on success, and -errno otherwise.
+ */
+int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp);
+
+/**
+ * vhost_load_backend_state(): High-level function to load a vhost
+ * back-end's state from @f, and send it over to the back-end. Reads
+ * the data from @f in the format used by `vhost_save_state()`, and uses
+ * `vhost_set_device_state_fd()` to transfer it to the back-end.
+ *
+ * Must only be called while the device and all its vrings are stopped
+ * (`VHOST_TRANSFER_STATE_PHASE_STOPPED`).
+ *
+ * @dev: The vhost device to which to send the state
+ * @f: Migration stream from which to load the state
+ * @errp: Potential error message
+ *
+ * Returns 0 on success, and -errno otherwise.
+ */
+int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp);
+
#endif
diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h
index bdf58f3119..07aae69042 100644
--- a/include/hw/virtio/virtio-access.h
+++ b/include/hw/virtio/virtio-access.h
@@ -16,6 +16,7 @@
#ifndef QEMU_VIRTIO_ACCESS_H
#define QEMU_VIRTIO_ACCESS_H
+#include "exec/hwaddr.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
@@ -27,7 +28,7 @@ static inline bool virtio_access_is_big_endian(VirtIODevice *vdev)
{
#if defined(LEGACY_VIRTIO_IS_BIENDIAN)
return virtio_is_big_endian(vdev);
-#elif defined(TARGET_WORDS_BIGENDIAN)
+#elif TARGET_BIG_ENDIAN
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return false;
@@ -148,7 +149,7 @@ static inline uint64_t virtio_ldq_p(VirtIODevice *vdev, const void *ptr)
static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
{
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
return virtio_access_is_big_endian(vdev) ? s : bswap16(s);
#else
return virtio_access_is_big_endian(vdev) ? bswap16(s) : s;
@@ -214,7 +215,7 @@ static inline void virtio_tswap16s(VirtIODevice *vdev, uint16_t *s)
static inline uint32_t virtio_tswap32(VirtIODevice *vdev, uint32_t s)
{
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
return virtio_access_is_big_endian(vdev) ? s : bswap32(s);
#else
return virtio_access_is_big_endian(vdev) ? bswap32(s) : s;
@@ -228,7 +229,7 @@ static inline void virtio_tswap32s(VirtIODevice *vdev, uint32_t *s)
static inline uint64_t virtio_tswap64(VirtIODevice *vdev, uint64_t s)
{
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
return virtio_access_is_big_endian(vdev) ? s : bswap64(s);
#else
return virtio_access_is_big_endian(vdev) ? bswap64(s) : s;
diff --git a/include/hw/virtio/virtio-acpi.h b/include/hw/virtio/virtio-acpi.h
new file mode 100644
index 0000000000..cace2a315f
--- /dev/null
+++ b/include/hw/virtio/virtio-acpi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ACPI support for virtio
+ */
+
+#ifndef VIRTIO_ACPI_H
+#define VIRTIO_ACPI_H
+
+#include "exec/hwaddr.h"
+
+void virtio_acpi_dsdt_add(Aml *scope, const hwaddr virtio_mmio_base,
+ const hwaddr virtio_mmio_size, uint32_t mmio_irq,
+ long int start_index, int num);
+
+#endif
diff --git a/include/hw/virtio/virtio-balloon.h b/include/hw/virtio/virtio-balloon.h
index e0df3528c8..5139cf8ab6 100644
--- a/include/hw/virtio/virtio-balloon.h
+++ b/include/hw/virtio/virtio-balloon.h
@@ -17,10 +17,13 @@
#include "standard-headers/linux/virtio_balloon.h"
#include "hw/virtio/virtio.h"
+#include "sysemu/iothread.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_BALLOON "virtio-balloon-device"
-#define VIRTIO_BALLOON(obj) \
- OBJECT_CHECK(VirtIOBalloon, (obj), TYPE_VIRTIO_BALLOON)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBalloon, VIRTIO_BALLOON)
+
+#define VIRTIO_BALLOON_FREE_PAGE_HINT_CMD_ID_MIN 0x80000000
typedef struct virtio_balloon_stat VirtIOBalloonStat;
@@ -30,18 +33,44 @@ typedef struct virtio_balloon_stat_modern {
uint64_t val;
} VirtIOBalloonStatModern;
-typedef struct VirtIOBalloon {
+enum virtio_balloon_free_page_hint_status {
+ FREE_PAGE_HINT_S_STOP = 0,
+ FREE_PAGE_HINT_S_REQUESTED = 1,
+ FREE_PAGE_HINT_S_START = 2,
+ FREE_PAGE_HINT_S_DONE = 3,
+};
+
+struct VirtIOBalloon {
VirtIODevice parent_obj;
- VirtQueue *ivq, *dvq, *svq;
+ VirtQueue *ivq, *dvq, *svq, *free_page_vq, *reporting_vq;
+ uint32_t free_page_hint_status;
uint32_t num_pages;
uint32_t actual;
+ uint32_t free_page_hint_cmd_id;
uint64_t stats[VIRTIO_BALLOON_S_NR];
VirtQueueElement *stats_vq_elem;
size_t stats_vq_offset;
QEMUTimer *stats_timer;
+ IOThread *iothread;
+ QEMUBH *free_page_bh;
+ /*
+ * Lock to synchronize threads to access the free page reporting related
+ * fields (e.g. free_page_hint_status).
+ */
+ QemuMutex free_page_lock;
+ QemuCond free_page_cond;
+ /*
+ * Set to block iothread to continue reading free page hints as the VM is
+ * stopped.
+ */
+ bool block_iothread;
+ NotifierWithReturn free_page_hint_notify;
int64_t stats_last_update;
int64_t stats_poll_interval;
uint32_t host_features;
-} VirtIOBalloon;
+
+ bool qemu_4_0_config_size;
+ uint32_t poison_val;
+};
#endif
diff --git a/include/hw/virtio/virtio-blk-common.h b/include/hw/virtio/virtio-blk-common.h
new file mode 100644
index 0000000000..31daada3e3
--- /dev/null
+++ b/include/hw/virtio/virtio-blk-common.h
@@ -0,0 +1,20 @@
+/*
+ * Virtio Block Device common helpers
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef VIRTIO_BLK_COMMON_H
+#define VIRTIO_BLK_COMMON_H
+
+#include "hw/virtio/virtio.h"
+
+extern const VirtIOConfigSizeParams virtio_blk_cfg_size_params;
+
+#endif
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 5117431d96..5c14110c4b 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -19,10 +19,12 @@
#include "hw/block/block.h"
#include "sysemu/iothread.h"
#include "sysemu/block-backend.h"
+#include "sysemu/block-ram-registrar.h"
+#include "qom/object.h"
+#include "qapi/qapi-types-virtio.h"
#define TYPE_VIRTIO_BLK "virtio-blk-device"
-#define VIRTIO_BLK(obj) \
- OBJECT_CHECK(VirtIOBlock, (obj), TYPE_VIRTIO_BLK)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK)
/* This is the last element of the write scatter-gather list */
struct virtio_blk_inhdr
@@ -30,40 +32,57 @@ struct virtio_blk_inhdr
unsigned char status;
};
+#define VIRTIO_BLK_AUTO_NUM_QUEUES UINT16_MAX
+
struct VirtIOBlkConf
{
BlockConf conf;
IOThread *iothread;
+ IOThreadVirtQueueMappingList *iothread_vq_mapping_list;
char *serial;
- uint32_t scsi;
- uint32_t config_wce;
uint32_t request_merging;
uint16_t num_queues;
uint16_t queue_size;
+ bool seg_max_adjust;
+ bool report_discard_granularity;
+ uint32_t max_discard_sectors;
+ uint32_t max_write_zeroes_sectors;
+ bool x_enable_wce_if_config_wce;
};
-struct VirtIOBlockDataPlane;
-
struct VirtIOBlockReq;
-typedef struct VirtIOBlock {
+struct VirtIOBlock {
VirtIODevice parent_obj;
BlockBackend *blk;
- void *rq;
- QEMUBH *bh;
+ QemuMutex rq_lock;
+ struct VirtIOBlockReq *rq; /* protected by rq_lock */
VirtIOBlkConf conf;
unsigned short sector_mask;
bool original_wce;
VMChangeStateEntry *change;
- bool dataplane_disabled;
- bool dataplane_started;
- struct VirtIOBlockDataPlane *dataplane;
-} VirtIOBlock;
+ bool ioeventfd_disabled;
+ bool ioeventfd_started;
+ bool ioeventfd_starting;
+ bool ioeventfd_stopping;
+
+ /*
+ * The AioContext for each virtqueue. The BlockDriverState will use the
+ * first element as its AioContext.
+ */
+ AioContext **vq_aio_context;
+
+ uint64_t host_features;
+ size_t config_size;
+ BlockRAMRegistrar blk_ram_registrar;
+};
typedef struct VirtIOBlockReq {
VirtQueueElement elem;
int64_t sector_num;
VirtIOBlock *dev;
VirtQueue *vq;
+ IOVDiscardUndo inhdr_undo;
+ IOVDiscardUndo outhdr_undo;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr out;
QEMUIOVector qiov;
@@ -81,6 +100,6 @@ typedef struct MultiReqBuffer {
bool is_write;
} MultiReqBuffer;
-bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
+void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
#endif
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 7fec9dc929..7ab8c9dab0 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -25,20 +25,18 @@
#ifndef VIRTIO_BUS_H
#define VIRTIO_BUS_H
-#include "hw/qdev.h"
-#include "sysemu/sysemu.h"
+#include "hw/qdev-core.h"
#include "hw/virtio/virtio.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_BUS "virtio-bus"
-#define VIRTIO_BUS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtioBusClass, obj, TYPE_VIRTIO_BUS)
-#define VIRTIO_BUS_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtioBusClass, klass, TYPE_VIRTIO_BUS)
-#define VIRTIO_BUS(obj) OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_BUS)
-
+typedef struct VirtioBusClass VirtioBusClass;
typedef struct VirtioBusState VirtioBusState;
+DECLARE_OBJ_CHECKERS(VirtioBusState, VirtioBusClass,
+ VIRTIO_BUS, TYPE_VIRTIO_BUS)
+
-typedef struct VirtioBusClass {
+struct VirtioBusClass {
/* This is what a VirtioBus must implement */
BusClass parent;
void (*notify)(DeviceState *d, uint16_t vector);
@@ -85,13 +83,18 @@ typedef struct VirtioBusClass {
int (*ioeventfd_assign)(DeviceState *d, EventNotifier *notifier,
int n, bool assign);
/*
+ * Whether queue number n is enabled.
+ */
+ bool (*queue_enabled)(DeviceState *d, int n);
+ /*
* Does the transport have variable vring alignment?
* (ie can it ever call virtio_queue_set_align()?)
* Note that changing this will break migration for this transport.
*/
bool has_variable_vring_alignment;
AddressSpace *(*get_dma_as)(DeviceState *d);
-} VirtioBusClass;
+ bool (*iommu_enabled)(DeviceState *d);
+};
struct VirtioBusState {
BusState parent_obj;
@@ -152,5 +155,6 @@ void virtio_bus_release_ioeventfd(VirtioBusState *bus);
int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign);
/* Tell the bus that the ioeventfd handler is no longer required. */
void virtio_bus_cleanup_host_notifier(VirtioBusState *bus, int n);
-
+/* Whether the IOMMU is enabled for this device */
+bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev);
#endif /* VIRTIO_BUS_H */
diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
index ca3a04938e..348749f5d5 100644
--- a/include/hw/virtio/virtio-crypto.h
+++ b/include/hw/virtio/virtio-crypto.h
@@ -11,13 +11,14 @@
* top-level directory.
*/
-#ifndef _QEMU_VIRTIO_CRYPTO_H
-#define _QEMU_VIRTIO_CRYPTO_H
+#ifndef QEMU_VIRTIO_CRYPTO_H
+#define QEMU_VIRTIO_CRYPTO_H
#include "standard-headers/linux/virtio_crypto.h"
#include "hw/virtio/virtio.h"
#include "sysemu/iothread.h"
#include "sysemu/cryptodev.h"
+#include "qom/object.h"
#define DEBUG_VIRTIO_CRYPTO 0
@@ -31,8 +32,7 @@ do { \
#define TYPE_VIRTIO_CRYPTO "virtio-crypto-device"
-#define VIRTIO_CRYPTO(obj) \
- OBJECT_CHECK(VirtIOCrypto, (obj), TYPE_VIRTIO_CRYPTO)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOCrypto, VIRTIO_CRYPTO)
#define VIRTIO_CRYPTO_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_CRYPTO)
@@ -50,6 +50,7 @@ typedef struct VirtIOCryptoConf {
uint32_t mac_algo_l;
uint32_t mac_algo_h;
uint32_t aead_algo;
+ uint32_t akcipher_algo;
/* Maximum length of cipher key */
uint32_t max_cipher_key_len;
@@ -71,9 +72,7 @@ typedef struct VirtIOCryptoReq {
size_t in_len;
VirtQueue *vq;
struct VirtIOCrypto *vcrypto;
- union {
- CryptoDevBackendSymOpInfo *sym_op_info;
- } u;
+ CryptoDevBackendOpInfo op_info;
} VirtIOCryptoReq;
typedef struct VirtIOCryptoQueue {
@@ -82,7 +81,7 @@ typedef struct VirtIOCryptoQueue {
struct VirtIOCrypto *vcrypto;
} VirtIOCryptoQueue;
-typedef struct VirtIOCrypto {
+struct VirtIOCrypto {
VirtIODevice parent_obj;
VirtQueue *ctrl_vq;
@@ -97,6 +96,6 @@ typedef struct VirtIOCrypto {
uint32_t curr_queues;
size_t config_size;
uint8_t vhost_started;
-} VirtIOCrypto;
+};
-#endif /* _QEMU_VIRTIO_CRYPTO_H */
+#endif /* QEMU_VIRTIO_CRYPTO_H */
diff --git a/include/hw/virtio/virtio-dmabuf.h b/include/hw/virtio/virtio-dmabuf.h
new file mode 100644
index 0000000000..627c3b6db7
--- /dev/null
+++ b/include/hw/virtio/virtio-dmabuf.h
@@ -0,0 +1,100 @@
+/*
+ * Virtio Shared dma-buf
+ *
+ * Copyright Red Hat, Inc. 2023
+ *
+ * Authors:
+ * Albert Esteve <aesteve@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef VIRTIO_DMABUF_H
+#define VIRTIO_DMABUF_H
+
+#include "qemu/uuid.h"
+#include "vhost.h"
+
+typedef enum SharedObjectType {
+ TYPE_INVALID = 0,
+ TYPE_DMABUF,
+ TYPE_VHOST_DEV,
+} SharedObjectType;
+
+typedef struct VirtioSharedObject {
+ SharedObjectType type;
+ gpointer value;
+} VirtioSharedObject;
+
+/**
+ * virtio_add_dmabuf() - Add a new dma-buf resource to the lookup table
+ * @uuid: new resource's UUID
+ * @dmabuf_fd: the dma-buf descriptor that will be stored and shared with
+ * other virtio devices. The caller retains ownership over the
+ * descriptor and its lifecycle.
+ *
+ * Note: @dmabuf_fd must be a valid (non-negative) file descriptor.
+ *
+ * Return: true if the UUID did not exist and the resource has been added,
+ * false if another resource with the same UUID already existed.
+ * Note that if it finds a repeated UUID, the resource is not inserted in
+ * the lookup table.
+ */
+bool virtio_add_dmabuf(QemuUUID *uuid, int dmabuf_fd);
+
+/**
+ * virtio_add_vhost_device() - Add a new exporter vhost device that holds the
+ * resource with the associated UUID
+ * @uuid: new resource's UUID
+ * @dev: the pointer to the vhost device that holds the resource. The caller
+ * retains ownership over the device struct and its lifecycle.
+ *
+ * Return: true if the UUID did not exist and the device has been tracked,
+ * false if another resource with the same UUID already existed.
+ * Note that if it finds a repeated UUID, the resource is not inserted in
+ * the lookup table.
+ */
+bool virtio_add_vhost_device(QemuUUID *uuid, struct vhost_dev *dev);
+
+/**
+ * virtio_remove_resource() - Removes a resource from the lookup table
+ * @uuid: resource's UUID
+ *
+ * Return: true if the UUID has been found and removed from the lookup table.
+ */
+bool virtio_remove_resource(const QemuUUID *uuid);
+
+/**
+ * virtio_lookup_dmabuf() - Looks for a dma-buf resource in the lookup table
+ * @uuid: resource's UUID
+ *
+ * Return: the dma-buf file descriptor integer, or -1 if the key is not found.
+ */
+int virtio_lookup_dmabuf(const QemuUUID *uuid);
+
+/**
+ * virtio_lookup_vhost_device() - Looks for an exporter vhost device in the
+ * lookup table
+ * @uuid: resource's UUID
+ *
+ * Return: pointer to the vhost_dev struct, or NULL if the key is not found.
+ */
+struct vhost_dev *virtio_lookup_vhost_device(const QemuUUID *uuid);
+
+/**
+ * virtio_object_type() - Looks for the type of resource in the lookup table
+ * @uuid: resource's UUID
+ *
+ * Return: the type of resource associated with the UUID, or TYPE_INVALID if
+ * the key is not found.
+ */
+SharedObjectType virtio_object_type(const QemuUUID *uuid);
+
+/**
+ * virtio_free_resources() - Destroys all keys and values of the shared
+ * resources lookup table, and frees them
+ */
+void virtio_free_resources(void);
+
+#endif /* VIRTIO_DMABUF_H */
diff --git a/include/hw/virtio/virtio-gpu-bswap.h b/include/hw/virtio/virtio-gpu-bswap.h
new file mode 100644
index 0000000000..dd1975e2d4
--- /dev/null
+++ b/include/hw/virtio/virtio-gpu-bswap.h
@@ -0,0 +1,95 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_GPU_BSWAP_H
+#define HW_VIRTIO_GPU_BSWAP_H
+
+#include "qemu/bswap.h"
+#include "standard-headers/linux/virtio_gpu.h"
+
+static inline void
+virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
+{
+ le32_to_cpus(&hdr->type);
+ le32_to_cpus(&hdr->flags);
+ le64_to_cpus(&hdr->fence_id);
+ le32_to_cpus(&hdr->ctx_id);
+}
+
+static inline void
+virtio_gpu_bswap_32(void *ptr, size_t size)
+{
+#if HOST_BIG_ENDIAN
+
+ size_t i;
+ struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
+
+ virtio_gpu_ctrl_hdr_bswap(hdr);
+
+ i = sizeof(struct virtio_gpu_ctrl_hdr);
+ while (i < size) {
+ le32_to_cpus((uint32_t *)(ptr + i));
+ i = i + sizeof(uint32_t);
+ }
+
+#endif
+}
+
+static inline void
+virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
+{
+ virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
+ le32_to_cpus(&t2d->r.x);
+ le32_to_cpus(&t2d->r.y);
+ le32_to_cpus(&t2d->r.width);
+ le32_to_cpus(&t2d->r.height);
+ le64_to_cpus(&t2d->offset);
+ le32_to_cpus(&t2d->resource_id);
+ le32_to_cpus(&t2d->padding);
+}
+
+static inline void
+virtio_gpu_create_blob_bswap(struct virtio_gpu_resource_create_blob *cblob)
+{
+ virtio_gpu_ctrl_hdr_bswap(&cblob->hdr);
+ le32_to_cpus(&cblob->resource_id);
+ le32_to_cpus(&cblob->blob_mem);
+ le32_to_cpus(&cblob->blob_flags);
+ le32_to_cpus(&cblob->nr_entries);
+ le64_to_cpus(&cblob->blob_id);
+ le64_to_cpus(&cblob->size);
+}
+
+static inline void
+virtio_gpu_map_blob_bswap(struct virtio_gpu_resource_map_blob *mblob)
+{
+ virtio_gpu_ctrl_hdr_bswap(&mblob->hdr);
+ le32_to_cpus(&mblob->resource_id);
+ le64_to_cpus(&mblob->offset);
+}
+
+static inline void
+virtio_gpu_unmap_blob_bswap(struct virtio_gpu_resource_unmap_blob *ublob)
+{
+ virtio_gpu_ctrl_hdr_bswap(&ublob->hdr);
+ le32_to_cpus(&ublob->resource_id);
+}
+
+static inline void
+virtio_gpu_scanout_blob_bswap(struct virtio_gpu_set_scanout_blob *ssb)
+{
+ virtio_gpu_bswap_32(ssb, sizeof(*ssb) - sizeof(ssb->offsets[3]));
+ le32_to_cpus(&ssb->offsets[3]);
+}
+
+#endif
diff --git a/include/hw/virtio/virtio-gpu-pci.h b/include/hw/virtio/virtio-gpu-pci.h
new file mode 100644
index 0000000000..225cbbc2e4
--- /dev/null
+++ b/include/hw/virtio/virtio-gpu-pci.h
@@ -0,0 +1,39 @@
+/*
+ * Virtio GPU PCI Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_GPU_PCI_H
+#define HW_VIRTIO_GPU_PCI_H
+
+#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-gpu.h"
+#include "qom/object.h"
+
+
+/*
+ * virtio-gpu-pci-base: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_GPU_PCI_BASE "virtio-gpu-pci-base"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUPCIBase, VIRTIO_GPU_PCI_BASE)
+
+struct VirtIOGPUPCIBase {
+ VirtIOPCIProxy parent_obj;
+ VirtIOGPUBase *vgpu;
+};
+
+/* to share between PCI and VGA */
+#define DEFINE_VIRTIO_GPU_PCI_PROPERTIES(_state) \
+ DEFINE_PROP_BIT("ioeventfd", _state, flags, \
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \
+ DEFINE_PROP_UINT32("vectors", _state, nvectors, 3)
+
+#endif /* HW_VIRTIO_GPU_PCI_H */
diff --git a/include/hw/virtio/virtio-gpu-pixman.h b/include/hw/virtio/virtio-gpu-pixman.h
new file mode 100644
index 0000000000..4dba782758
--- /dev/null
+++ b/include/hw/virtio/virtio-gpu-pixman.h
@@ -0,0 +1,45 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_GPU_PIXMAN_H
+#define HW_VIRTIO_GPU_PIXMAN_H
+
+#include "ui/qemu-pixman.h"
+#include "standard-headers/linux/virtio_gpu.h"
+
+static inline pixman_format_code_t
+virtio_gpu_get_pixman_format(uint32_t virtio_gpu_format)
+{
+ switch (virtio_gpu_format) {
+ case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
+ return PIXMAN_BE_b8g8r8x8;
+ case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
+ return PIXMAN_BE_b8g8r8a8;
+ case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
+ return PIXMAN_BE_x8r8g8b8;
+ case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
+ return PIXMAN_BE_a8r8g8b8;
+ case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
+ return PIXMAN_BE_r8g8b8x8;
+ case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
+ return PIXMAN_BE_r8g8b8a8;
+ case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
+ return PIXMAN_BE_x8b8g8r8;
+ case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
+ return PIXMAN_BE_a8b8g8r8;
+ default:
+ return 0;
+ }
+}
+
+#endif
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index c8c599f1b9..ed44cdad6b 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -19,14 +19,27 @@
#include "ui/console.h"
#include "hw/virtio/virtio.h"
#include "qemu/log.h"
+#include "sysemu/vhost-user-backend.h"
#include "standard-headers/linux/virtio_gpu.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "qom/object.h"
+
+#define TYPE_VIRTIO_GPU_BASE "virtio-gpu-base"
+OBJECT_DECLARE_TYPE(VirtIOGPUBase, VirtIOGPUBaseClass,
+ VIRTIO_GPU_BASE)
#define TYPE_VIRTIO_GPU "virtio-gpu-device"
-#define VIRTIO_GPU(obj) \
- OBJECT_CHECK(VirtIOGPU, (obj), TYPE_VIRTIO_GPU)
+OBJECT_DECLARE_TYPE(VirtIOGPU, VirtIOGPUClass, VIRTIO_GPU)
+
+#define TYPE_VIRTIO_GPU_GL "virtio-gpu-gl-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUGL, VIRTIO_GPU_GL)
+
+#define TYPE_VHOST_USER_GPU "vhost-user-gpu"
+OBJECT_DECLARE_SIMPLE_TYPE(VhostUserGPU, VHOST_USER_GPU)
-#define VIRTIO_ID_GPU 16
+#define TYPE_VIRTIO_GPU_RUTABAGA "virtio-gpu-rutabaga-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPURutabaga, VIRTIO_GPU_RUTABAGA)
struct virtio_gpu_simple_resource {
uint32_t resource_id;
@@ -38,10 +51,27 @@ struct virtio_gpu_simple_resource {
unsigned int iov_cnt;
uint32_t scanout_bitmask;
pixman_image_t *image;
+#ifdef WIN32
+ HANDLE handle;
+#endif
uint64_t hostmem;
+
+ uint64_t blob_size;
+ void *blob;
+ int dmabuf_fd;
+ uint8_t *remapped;
+
QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
};
+struct virtio_gpu_framebuffer {
+ pixman_format_code_t format;
+ uint32_t bytes_pp;
+ uint32_t width, height;
+ uint32_t stride;
+ uint32_t offset;
+};
+
struct virtio_gpu_scanout {
QemuConsole *con;
DisplaySurface *ds;
@@ -51,29 +81,49 @@ struct virtio_gpu_scanout {
uint32_t resource_id;
struct virtio_gpu_update_cursor cursor;
QEMUCursor *current_cursor;
+ struct virtio_gpu_framebuffer fb;
};
struct virtio_gpu_requested_state {
+ uint16_t width_mm, height_mm;
uint32_t width, height;
+ uint32_t refresh_rate;
int x, y;
};
-enum virtio_gpu_conf_flags {
+enum virtio_gpu_base_conf_flags {
VIRTIO_GPU_FLAG_VIRGL_ENABLED = 1,
VIRTIO_GPU_FLAG_STATS_ENABLED,
+ VIRTIO_GPU_FLAG_EDID_ENABLED,
+ VIRTIO_GPU_FLAG_DMABUF_ENABLED,
+ VIRTIO_GPU_FLAG_BLOB_ENABLED,
+ VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED,
+ VIRTIO_GPU_FLAG_RUTABAGA_ENABLED,
};
#define virtio_gpu_virgl_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED))
#define virtio_gpu_stats_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED))
+#define virtio_gpu_edid_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_EDID_ENABLED))
+#define virtio_gpu_dmabuf_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED))
+#define virtio_gpu_blob_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED))
+#define virtio_gpu_context_init_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED))
+#define virtio_gpu_rutabaga_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED))
+#define virtio_gpu_hostmem_enabled(_cfg) \
+ (_cfg.hostmem > 0)
-struct virtio_gpu_conf {
- uint64_t max_hostmem;
+struct virtio_gpu_base_conf {
uint32_t max_outputs;
uint32_t flags;
uint32_t xres;
uint32_t yres;
+ uint64_t hostmem;
};
struct virtio_gpu_ctrl_command {
@@ -81,39 +131,70 @@ struct virtio_gpu_ctrl_command {
VirtQueue *vq;
struct virtio_gpu_ctrl_hdr cmd_hdr;
uint32_t error;
- bool waiting;
bool finished;
QTAILQ_ENTRY(virtio_gpu_ctrl_command) next;
};
-typedef struct VirtIOGPU {
+struct VirtIOGPUBase {
VirtIODevice parent_obj;
- QEMUBH *ctrl_bh;
- QEMUBH *cursor_bh;
- VirtQueue *ctrl_vq;
- VirtQueue *cursor_vq;
+ Error *migration_blocker;
+
+ struct virtio_gpu_base_conf conf;
+ struct virtio_gpu_config virtio_config;
+ const GraphicHwOps *hw_ops;
+ int renderer_blocked;
int enable;
- int config_size;
- DeviceState *qdev;
+ MemoryRegion hostmem;
+
+ struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
+
+ int enabled_output_bitmask;
+ struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
+};
+
+struct VirtIOGPUBaseClass {
+ VirtioDeviceClass parent;
+
+ void (*gl_flushed)(VirtIOGPUBase *g);
+};
+
+#define VIRTIO_GPU_BASE_PROPERTIES(_state, _conf) \
+ DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \
+ DEFINE_PROP_BIT("edid", _state, _conf.flags, \
+ VIRTIO_GPU_FLAG_EDID_ENABLED, true), \
+ DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1280), \
+ DEFINE_PROP_UINT32("yres", _state, _conf.yres, 800)
+
+typedef struct VGPUDMABuf {
+ QemuDmaBuf buf;
+ uint32_t scanout_id;
+ QTAILQ_ENTRY(VGPUDMABuf) next;
+} VGPUDMABuf;
+
+struct VirtIOGPU {
+ VirtIOGPUBase parent_obj;
+
+ uint64_t conf_max_hostmem;
+
+ VirtQueue *ctrl_vq;
+ VirtQueue *cursor_vq;
+
+ QEMUBH *ctrl_bh;
+ QEMUBH *cursor_bh;
+ QEMUBH *reset_bh;
+ QemuCond reset_cond;
+ bool reset_finished;
QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) cmdq;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq;
- struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
- struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
-
- struct virtio_gpu_conf conf;
uint64_t hostmem;
- int enabled_output_bitmask;
- struct virtio_gpu_config virtio_config;
- bool use_virgl_renderer;
- bool renderer_inited;
- int renderer_blocked;
+ bool processing_cmdq;
QEMUTimer *fence_poll;
QEMUTimer *print_stats;
@@ -125,31 +206,91 @@ typedef struct VirtIOGPU {
uint32_t bytes_3d;
} stats;
- Error *migration_blocker;
-} VirtIOGPU;
+ struct {
+ QTAILQ_HEAD(, VGPUDMABuf) bufs;
+ VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS];
+ } dmabuf;
+};
+
+struct VirtIOGPUClass {
+ VirtIOGPUBaseClass parent;
+
+ void (*handle_ctrl)(VirtIODevice *vdev, VirtQueue *vq);
+ void (*process_cmd)(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd);
+ void (*update_cursor_data)(VirtIOGPU *g,
+ struct virtio_gpu_scanout *s,
+ uint32_t resource_id);
+ void (*resource_destroy)(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res,
+ Error **errp);
+};
+
+struct VirtIOGPUGL {
+ struct VirtIOGPU parent_obj;
-extern const GraphicHwOps virtio_gpu_ops;
+ bool renderer_inited;
+ bool renderer_reset;
+};
+
+struct VhostUserGPU {
+ VirtIOGPUBase parent_obj;
+
+ VhostUserBackend *vhost;
+ int vhost_gpu_fd; /* closed by the chardev */
+ CharBackend vhost_chr;
+ QemuDmaBuf dmabuf[VIRTIO_GPU_MAX_SCANOUTS];
+ bool backend_blocked;
+};
+
+#define MAX_SLOTS 4096
+
+struct MemoryRegionInfo {
+ int used;
+ MemoryRegion mr;
+ uint32_t resource_id;
+};
-/* to share between PCI and VGA */
-#define DEFINE_VIRTIO_GPU_PCI_PROPERTIES(_state) \
- DEFINE_PROP_BIT("ioeventfd", _state, flags, \
- VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \
- DEFINE_PROP_UINT32("vectors", _state, nvectors, 3)
+struct rutabaga;
+
+struct VirtIOGPURutabaga {
+ VirtIOGPU parent_obj;
+ struct MemoryRegionInfo memory_regions[MAX_SLOTS];
+ uint64_t capset_mask;
+ char *wayland_socket_path;
+ char *wsi;
+ bool headless;
+ uint32_t num_capsets;
+ struct rutabaga *rutabaga;
+};
#define VIRTIO_GPU_FILL_CMD(out) do { \
- size_t s; \
- s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \
+ size_t virtiogpufillcmd_s_ = \
+ iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \
&out, sizeof(out)); \
- if (s != sizeof(out)) { \
+ if (virtiogpufillcmd_s_ != sizeof(out)) { \
qemu_log_mask(LOG_GUEST_ERROR, \
"%s: command size incorrect %zu vs %zu\n", \
- __func__, s, sizeof(out)); \
+ __func__, virtiogpufillcmd_s_, sizeof(out)); \
return; \
} \
} while (0)
+/* virtio-gpu-base.c */
+bool virtio_gpu_base_device_realize(DeviceState *qdev,
+ VirtIOHandleOutput ctrl_cb,
+ VirtIOHandleOutput cursor_cb,
+ Error **errp);
+void virtio_gpu_base_device_unrealize(DeviceState *qdev);
+void virtio_gpu_base_reset(VirtIOGPUBase *g);
+void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
+ struct virtio_gpu_resp_display_info *dpy_info);
+
+void virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
+ struct virtio_gpu_resp_edid *edid);
/* virtio-gpu.c */
-void virtio_gpu_reset(VirtIODevice *vdev);
+struct virtio_gpu_simple_resource *
+virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
+
void virtio_gpu_ctrl_response(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd,
struct virtio_gpu_ctrl_hdr *resp,
@@ -159,20 +300,42 @@ void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
enum virtio_gpu_ctrl_type type);
void virtio_gpu_get_display_info(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
+void virtio_gpu_get_edid(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd);
int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
- struct virtio_gpu_resource_attach_backing *ab,
+ uint32_t nr_entries, uint32_t offset,
struct virtio_gpu_ctrl_command *cmd,
- uint64_t **addr, struct iovec **iov);
+ uint64_t **addr, struct iovec **iov,
+ uint32_t *niov);
void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
struct iovec *iov, uint32_t count);
+void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res);
void virtio_gpu_process_cmdq(VirtIOGPU *g);
+void virtio_gpu_device_realize(DeviceState *qdev, Error **errp);
+void virtio_gpu_reset(VirtIODevice *vdev);
+void virtio_gpu_simple_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd);
+void virtio_gpu_update_cursor_data(VirtIOGPU *g,
+ struct virtio_gpu_scanout *s,
+ uint32_t resource_id);
+
+/* virtio-gpu-udmabuf.c */
+bool virtio_gpu_have_udmabuf(void);
+void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res);
+void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res);
+int virtio_gpu_update_dmabuf(VirtIOGPU *g,
+ uint32_t scanout_id,
+ struct virtio_gpu_simple_resource *res,
+ struct virtio_gpu_framebuffer *fb,
+ struct virtio_gpu_rect *r);
/* virtio-gpu-3d.c */
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
+void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g);
void virtio_gpu_virgl_reset(VirtIOGPU *g);
-void virtio_gpu_gl_block(void *opaque, bool block);
int virtio_gpu_virgl_init(VirtIOGPU *g);
int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g);
+
#endif
diff --git a/include/hw/virtio/virtio-input.h b/include/hw/virtio/virtio-input.h
index 054c38836f..e69c0aeca3 100644
--- a/include/hw/virtio/virtio-input.h
+++ b/include/hw/virtio/virtio-input.h
@@ -1,13 +1,17 @@
#ifndef QEMU_VIRTIO_INPUT_H
#define QEMU_VIRTIO_INPUT_H
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
#include "ui/input.h"
+#include "sysemu/vhost-user-backend.h"
/* ----------------------------------------------------------------- */
/* virtio input protocol */
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_input.h"
+#include "qom/object.h"
typedef struct virtio_input_absinfo virtio_input_absinfo;
typedef struct virtio_input_config virtio_input_config;
@@ -17,36 +21,32 @@ typedef struct virtio_input_event virtio_input_event;
/* qemu internals */
#define TYPE_VIRTIO_INPUT "virtio-input-device"
-#define VIRTIO_INPUT(obj) \
- OBJECT_CHECK(VirtIOInput, (obj), TYPE_VIRTIO_INPUT)
+OBJECT_DECLARE_TYPE(VirtIOInput, VirtIOInputClass,
+ VIRTIO_INPUT)
#define VIRTIO_INPUT_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_INPUT)
-#define VIRTIO_INPUT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOInputClass, obj, TYPE_VIRTIO_INPUT)
-#define VIRTIO_INPUT_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtIOInputClass, klass, TYPE_VIRTIO_INPUT)
-
-#define TYPE_VIRTIO_INPUT_HID "virtio-input-hid-device"
-#define TYPE_VIRTIO_KEYBOARD "virtio-keyboard-device"
-#define TYPE_VIRTIO_MOUSE "virtio-mouse-device"
-#define TYPE_VIRTIO_TABLET "virtio-tablet-device"
-
-#define VIRTIO_INPUT_HID(obj) \
- OBJECT_CHECK(VirtIOInputHID, (obj), TYPE_VIRTIO_INPUT_HID)
+
+#define TYPE_VIRTIO_INPUT_HID "virtio-input-hid-device"
+#define TYPE_VIRTIO_KEYBOARD "virtio-keyboard-device"
+#define TYPE_VIRTIO_MOUSE "virtio-mouse-device"
+#define TYPE_VIRTIO_TABLET "virtio-tablet-device"
+#define TYPE_VIRTIO_MULTITOUCH "virtio-multitouch-device"
+
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHID, VIRTIO_INPUT_HID)
#define VIRTIO_INPUT_HID_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_INPUT_HID)
#define TYPE_VIRTIO_INPUT_HOST "virtio-input-host-device"
-#define VIRTIO_INPUT_HOST(obj) \
- OBJECT_CHECK(VirtIOInputHost, (obj), TYPE_VIRTIO_INPUT_HOST)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHost, VIRTIO_INPUT_HOST)
#define VIRTIO_INPUT_HOST_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_INPUT_HOST)
-typedef struct VirtIOInput VirtIOInput;
-typedef struct VirtIOInputClass VirtIOInputClass;
+#define TYPE_VHOST_USER_INPUT "vhost-user-input"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserInput, VHOST_USER_INPUT)
+#define VHOST_USER_INPUT_GET_PARENT_CLASS(obj) \
+ OBJECT_GET_PARENT_CLASS(obj, TYPE_VHOST_USER_INPUT)
+
typedef struct VirtIOInputConfig VirtIOInputConfig;
-typedef struct VirtIOInputHID VirtIOInputHID;
-typedef struct VirtIOInputHost VirtIOInputHost;
struct VirtIOInputConfig {
virtio_input_config config;
@@ -86,7 +86,7 @@ struct VirtIOInputHID {
VirtIOInput parent_obj;
char *display;
uint32_t head;
- QemuInputHandler *handler;
+ const QemuInputHandler *handler;
QemuInputHandlerState *hs;
int ledstate;
bool wheel_axis;
@@ -98,6 +98,10 @@ struct VirtIOInputHost {
int fd;
};
+struct VHostUserInput {
+ VHostUserBase parent_obj;
+};
+
void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event);
void virtio_input_init_config(VirtIOInput *vinput,
virtio_input_config *config);
diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h
new file mode 100644
index 0000000000..83a52cc446
--- /dev/null
+++ b/include/hw/virtio/virtio-iommu.h
@@ -0,0 +1,74 @@
+/*
+ * virtio-iommu device
+ *
+ * Copyright (c) 2020 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QEMU_VIRTIO_IOMMU_H
+#define QEMU_VIRTIO_IOMMU_H
+
+#include "standard-headers/linux/virtio_iommu.h"
+#include "hw/virtio/virtio.h"
+#include "hw/pci/pci.h"
+#include "qom/object.h"
+#include "qapi/qapi-types-virtio.h"
+
+#define TYPE_VIRTIO_IOMMU "virtio-iommu-device"
+#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-pci"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOIOMMU, VIRTIO_IOMMU)
+
+#define TYPE_VIRTIO_IOMMU_MEMORY_REGION "virtio-iommu-memory-region"
+
+typedef struct IOMMUDevice {
+ void *viommu;
+ PCIBus *bus;
+ int devfn;
+ IOMMUMemoryRegion iommu_mr;
+ AddressSpace as;
+ MemoryRegion root; /* The root container of the device */
+ MemoryRegion bypass_mr; /* The alias of shared memory MR */
+ GList *resv_regions;
+ GList *host_resv_ranges;
+ bool probe_done;
+} IOMMUDevice;
+
+typedef struct IOMMUPciBus {
+ PCIBus *bus;
+ IOMMUDevice *pbdev[]; /* Parent array is sparse, so dynamically alloc */
+} IOMMUPciBus;
+
+struct VirtIOIOMMU {
+ VirtIODevice parent_obj;
+ VirtQueue *req_vq;
+ VirtQueue *event_vq;
+ struct virtio_iommu_config config;
+ uint64_t features;
+ GHashTable *as_by_busptr;
+ IOMMUPciBus *iommu_pcibus_by_bus_num[PCI_BUS_MAX];
+ PCIBus *primary_bus;
+ ReservedRegion *prop_resv_regions;
+ uint32_t nr_prop_resv_regions;
+ GTree *domains;
+ QemuRecMutex mutex;
+ GTree *endpoints;
+ bool boot_bypass;
+ Notifier machine_done;
+ bool granule_frozen;
+ GranuleMode granule_mode;
+ uint8_t aw_bits;
+};
+
+#endif
diff --git a/include/hw/virtio/virtio-md-pci.h b/include/hw/virtio/virtio-md-pci.h
new file mode 100644
index 0000000000..5912e16674
--- /dev/null
+++ b/include/hw/virtio/virtio-md-pci.h
@@ -0,0 +1,44 @@
+/*
+ * Abstract virtio based memory device
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_MD_PCI_H
+#define HW_VIRTIO_MD_PCI_H
+
+#include "hw/virtio/virtio-pci.h"
+#include "qom/object.h"
+
+/*
+ * virtio-md-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_MD_PCI "virtio-md-pci"
+
+OBJECT_DECLARE_TYPE(VirtIOMDPCI, VirtIOMDPCIClass, VIRTIO_MD_PCI)
+
+struct VirtIOMDPCIClass {
+ /* private */
+ VirtioPCIClass parent;
+
+ /* public */
+ void (*unplug_request_check)(VirtIOMDPCI *vmd, Error **errp);
+};
+
+struct VirtIOMDPCI {
+ VirtIOPCIProxy parent_obj;
+};
+
+void virtio_md_pci_pre_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+void virtio_md_pci_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+void virtio_md_pci_unplug_request(VirtIOMDPCI *vmd, MachineState *ms,
+ Error **errp);
+void virtio_md_pci_unplug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+
+#endif
diff --git a/include/hw/virtio/virtio-mem.h b/include/hw/virtio/virtio-mem.h
new file mode 100644
index 0000000000..5f5b02b8f9
--- /dev/null
+++ b/include/hw/virtio/virtio-mem.h
@@ -0,0 +1,134 @@
+/*
+ * Virtio MEM device
+ *
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_MEM_H
+#define HW_VIRTIO_MEM_H
+
+#include "standard-headers/linux/virtio_mem.h"
+#include "hw/virtio/virtio.h"
+#include "qapi/qapi-types-misc.h"
+#include "sysemu/hostmem.h"
+#include "qom/object.h"
+
+#define TYPE_VIRTIO_MEM "virtio-mem"
+
+OBJECT_DECLARE_TYPE(VirtIOMEM, VirtIOMEMClass,
+ VIRTIO_MEM)
+
+#define VIRTIO_MEM_MEMDEV_PROP "memdev"
+#define VIRTIO_MEM_NODE_PROP "node"
+#define VIRTIO_MEM_SIZE_PROP "size"
+#define VIRTIO_MEM_REQUESTED_SIZE_PROP "requested-size"
+#define VIRTIO_MEM_BLOCK_SIZE_PROP "block-size"
+#define VIRTIO_MEM_ADDR_PROP "memaddr"
+#define VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP "unplugged-inaccessible"
+#define VIRTIO_MEM_EARLY_MIGRATION_PROP "x-early-migration"
+#define VIRTIO_MEM_PREALLOC_PROP "prealloc"
+#define VIRTIO_MEM_DYNAMIC_MEMSLOTS_PROP "dynamic-memslots"
+
+struct VirtIOMEM {
+ VirtIODevice parent_obj;
+
+ /* guest -> host request queue */
+ VirtQueue *vq;
+
+ /* bitmap used to track unplugged memory */
+ int32_t bitmap_size;
+ unsigned long *bitmap;
+
+ /*
+ * With "dynamic-memslots=on": Device memory region in which we dynamically
+ * map the memslots.
+ */
+ MemoryRegion *mr;
+
+ /*
+ * With "dynamic-memslots=on": The individual memslots (aliases into the
+ * memory backend).
+ */
+ MemoryRegion *memslots;
+
+ /* With "dynamic-memslots=on": The total number of memslots. */
+ uint16_t nb_memslots;
+
+ /*
+ * With "dynamic-memslots=on": Size of one memslot (the size of the
+ * last one can differ).
+ */
+ uint64_t memslot_size;
+
+ /* Assigned memory backend with the RAM memory region. */
+ HostMemoryBackend *memdev;
+
+ /* NUMA node */
+ uint32_t node;
+
+ /* assigned address of the region in guest physical memory */
+ uint64_t addr;
+
+ /* usable region size (<= region_size) */
+ uint64_t usable_region_size;
+
+ /* actual size (how much the guest plugged) */
+ uint64_t size;
+
+ /* requested size */
+ uint64_t requested_size;
+
+ /* block size and alignment */
+ uint64_t block_size;
+
+ /*
+ * Whether we indicate VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE to the guest.
+ * For !x86 targets this will always be "on" and consequently indicate
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE.
+ */
+ OnOffAuto unplugged_inaccessible;
+
+ /* whether to prealloc memory when plugging new blocks */
+ bool prealloc;
+
+ /*
+ * Whether we migrate properties that are immutable while migration is
+ * active early, before state of other devices and especially, before
+ * migrating any RAM content.
+ */
+ bool early_migration;
+
+ /*
+ * Whether we dynamically map (multiple, if possible) memslots instead of
+ * statically mapping the whole RAM memory region.
+ */
+ bool dynamic_memslots;
+
+ /* notifiers to notify when "size" changes */
+ NotifierList size_change_notifiers;
+
+ /* listeners to notify on plug/unplug activity. */
+ QLIST_HEAD(, RamDiscardListener) rdl_list;
+};
+
+struct VirtIOMEMClass {
+ /* private */
+ VirtIODevice parent;
+
+ /* public */
+ void (*fill_device_info)(const VirtIOMEM *vmen, VirtioMEMDeviceInfo *vi);
+ MemoryRegion *(*get_memory_region)(VirtIOMEM *vmem, Error **errp);
+ void (*decide_memslots)(VirtIOMEM *vmem, unsigned int limit);
+ unsigned int (*get_memslots)(VirtIOMEM *vmem);
+ void (*add_size_change_notifier)(VirtIOMEM *vmem, Notifier *notifier);
+ void (*remove_size_change_notifier)(VirtIOMEM *vmem, Notifier *notifier);
+ void (*unplug_request_check)(VirtIOMEM *vmem, Error **errp);
+};
+
+#endif
diff --git a/include/hw/virtio/virtio-mmio.h b/include/hw/virtio/virtio-mmio.h
new file mode 100644
index 0000000000..aa49262022
--- /dev/null
+++ b/include/hw/virtio/virtio-mmio.h
@@ -0,0 +1,75 @@
+/*
+ * Virtio MMIO bindings
+ *
+ * Copyright (c) 2011 Linaro Limited
+ *
+ * Author:
+ * Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_VIRTIO_MMIO_H
+#define HW_VIRTIO_MMIO_H
+
+#include "hw/sysbus.h"
+#include "hw/virtio/virtio-bus.h"
+
+/* QOM macros */
+/* virtio-mmio-bus */
+#define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
+/* This is reusing the VirtioBusState typedef from TYPE_VIRTIO_BUS */
+DECLARE_OBJ_CHECKERS(VirtioBusState, VirtioBusClass,
+ VIRTIO_MMIO_BUS, TYPE_VIRTIO_MMIO_BUS)
+
+/* virtio-mmio */
+#define TYPE_VIRTIO_MMIO "virtio-mmio"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOMMIOProxy, VIRTIO_MMIO)
+
+#define VIRT_MAGIC 0x74726976 /* 'virt' */
+#define VIRT_VERSION 2
+#define VIRT_VERSION_LEGACY 1
+#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
+
+typedef struct VirtIOMMIOQueue {
+ uint16_t num;
+ bool enabled;
+ uint32_t desc[2];
+ uint32_t avail[2];
+ uint32_t used[2];
+} VirtIOMMIOQueue;
+
+#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT 1
+#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD \
+ (1 << VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT)
+
+struct VirtIOMMIOProxy {
+ /* Generic */
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq;
+ bool legacy;
+ uint32_t flags;
+ /* Guest accessible state needing migration and reset */
+ uint32_t host_features_sel;
+ uint32_t guest_features_sel;
+ uint32_t guest_page_shift;
+ /* virtio-bus */
+ VirtioBusState bus;
+ bool format_transport_address;
+ /* Fields only used for non-legacy (v2) devices */
+ uint32_t guest_features[2];
+ VirtIOMMIOQueue vqs[VIRTIO_QUEUE_MAX];
+};
+
+#endif
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 4d7f3c82ca..060c23c04d 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -17,10 +17,14 @@
#include "qemu/units.h"
#include "standard-headers/linux/virtio_net.h"
#include "hw/virtio/virtio.h"
+#include "net/announce.h"
+#include "qemu/option_int.h"
+#include "qom/object.h"
+
+#include "ebpf/ebpf_rss.h"
#define TYPE_VIRTIO_NET "virtio-net-device"
-#define VIRTIO_NET(obj) \
- OBJECT_CHECK(VirtIONet, (obj), TYPE_VIRTIO_NET)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIONet, VIRTIO_NET)
#define TX_TIMER_INTERVAL 150000 /* 150 us */
@@ -31,6 +35,15 @@
* and latency. */
#define TX_BURST 256
+/* Maximum VIRTIO_NET_CTRL_MAC_TABLE_SET unicast + multicast entries. */
+#define MAC_TABLE_ENTRIES 64
+
+/*
+ * The maximum number of VLANs in the VLAN filter table
+ * added by VIRTIO_NET_CTRL_VLAN_ADD
+ */
+#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
+
typedef struct virtio_net_conf
{
uint32_t txtimer;
@@ -42,11 +55,102 @@ typedef struct virtio_net_conf
int32_t speed;
char *duplex_str;
uint8_t duplex;
+ char *primary_id_str;
} virtio_net_conf;
+/* Coalesced packets type & status */
+typedef enum {
+ RSC_COALESCE, /* Data been coalesced */
+ RSC_FINAL, /* Will terminate current connection */
+ RSC_NO_MATCH, /* No matched in the buffer pool */
+ RSC_BYPASS, /* Packet to be bypass, not tcp, tcp ctrl, etc */
+ RSC_CANDIDATE /* Data want to be coalesced */
+} CoalesceStatus;
+
+typedef struct VirtioNetRscStat {
+ uint32_t received;
+ uint32_t coalesced;
+ uint32_t over_size;
+ uint32_t cache;
+ uint32_t empty_cache;
+ uint32_t no_match_cache;
+ uint32_t win_update;
+ uint32_t no_match;
+ uint32_t tcp_syn;
+ uint32_t tcp_ctrl_drain;
+ uint32_t dup_ack;
+ uint32_t dup_ack1;
+ uint32_t dup_ack2;
+ uint32_t pure_ack;
+ uint32_t ack_out_of_win;
+ uint32_t data_out_of_win;
+ uint32_t data_out_of_order;
+ uint32_t data_after_pure_ack;
+ uint32_t bypass_not_tcp;
+ uint32_t tcp_option;
+ uint32_t tcp_all_opt;
+ uint32_t ip_frag;
+ uint32_t ip_ecn;
+ uint32_t ip_hacked;
+ uint32_t ip_option;
+ uint32_t purge_failed;
+ uint32_t drain_failed;
+ uint32_t final_failed;
+ int64_t timer;
+} VirtioNetRscStat;
+
+/* Rsc unit general info used to checking if can coalescing */
+typedef struct VirtioNetRscUnit {
+ void *ip; /* ip header */
+ uint16_t *ip_plen; /* data len pointer in ip header field */
+ struct tcp_header *tcp; /* tcp header */
+ uint16_t tcp_hdrlen; /* tcp header len */
+ uint16_t payload; /* pure payload without virtio/eth/ip/tcp */
+} VirtioNetRscUnit;
+
+/* Coalesced segment */
+typedef struct VirtioNetRscSeg {
+ QTAILQ_ENTRY(VirtioNetRscSeg) next;
+ void *buf;
+ size_t size;
+ uint16_t packets;
+ uint16_t dup_ack;
+ bool is_coalesced; /* need recall ipv4 header checksum, mark here */
+ VirtioNetRscUnit unit;
+ NetClientState *nc;
+} VirtioNetRscSeg;
+
+
+/* Chain is divided by protocol(ipv4/v6) and NetClientInfo */
+typedef struct VirtioNetRscChain {
+ QTAILQ_ENTRY(VirtioNetRscChain) next;
+ VirtIONet *n; /* VirtIONet */
+ uint16_t proto;
+ uint8_t gso_type;
+ uint16_t max_payload;
+ QEMUTimer *drain_timer;
+ QTAILQ_HEAD(, VirtioNetRscSeg) buffers;
+ VirtioNetRscStat stat;
+} VirtioNetRscChain;
+
/* Maximum packet size we can receive from tap device: header + 64k */
#define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 * KiB))
+#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
+#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
+
+typedef struct VirtioNetRssData {
+ bool enabled;
+ bool enabled_software_rss;
+ bool redirect;
+ bool populate_hash;
+ uint32_t hash_types;
+ uint8_t key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+ uint16_t indirections_len;
+ uint16_t *indirections_table;
+ uint16_t default_queue;
+} VirtioNetRssData;
+
typedef struct VirtIONetQueue {
VirtQueue *rx_vq;
VirtQueue *tx_vq;
@@ -59,19 +163,25 @@ typedef struct VirtIONetQueue {
struct VirtIONet *n;
} VirtIONetQueue;
-typedef struct VirtIONet {
+struct VirtIONet {
VirtIODevice parent_obj;
uint8_t mac[ETH_ALEN];
uint16_t status;
VirtIONetQueue *vqs;
VirtQueue *ctrl_vq;
NICState *nic;
+ /* RSC Chains - temporary storage of coalesced data,
+ all these data are lost in case of migration */
+ QTAILQ_HEAD(, VirtioNetRscChain) rsc_chains;
uint32_t tx_timeout;
int32_t tx_burst;
uint32_t has_vnet_hdr;
size_t host_hdr_len;
size_t guest_hdr_len;
uint64_t host_features;
+ uint32_t rsc_timeout;
+ uint8_t rsc4_enabled;
+ uint8_t rsc6_enabled;
uint8_t has_ufo;
uint32_t mergeable_rx_bufs;
uint8_t promisc;
@@ -93,19 +203,38 @@ typedef struct VirtIONet {
NICConf nic_conf;
DeviceState *qdev;
int multiqueue;
- uint16_t max_queues;
- uint16_t curr_queues;
+ uint16_t max_queue_pairs;
+ uint16_t curr_queue_pairs;
+ uint16_t max_ncs;
size_t config_size;
char *netclient_name;
char *netclient_type;
uint64_t curr_guest_offloads;
- QEMUTimer *announce_timer;
- int announce_counter;
+ /* used on saved state restore phase to preserve the curr_guest_offloads */
+ uint64_t saved_guest_offloads;
+ AnnounceTimer announce_timer;
bool needs_vnet_hdr_swap;
bool mtu_bypass_backend;
-} VirtIONet;
+ /* primary failover device is hidden*/
+ bool failover_primary_hidden;
+ bool failover;
+ DeviceListener primary_listener;
+ QDict *primary_opts;
+ bool primary_opts_from_json;
+ NotifierWithReturn migration_state;
+ VirtioNetRssData rss_data;
+ struct NetRxPkt *rx_pkt;
+ struct EBPFRSSContext ebpf_rss;
+ uint32_t nr_ebpf_rss_fds;
+ char **ebpf_rss_fds;
+};
+size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
+ const struct iovec *in_sg, unsigned in_num,
+ const struct iovec *out_sg,
+ unsigned out_num);
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
const char *type);
+uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n);
#endif
diff --git a/include/hw/virtio/virtio-pci.h b/include/hw/virtio/virtio-pci.h
new file mode 100644
index 0000000000..59d88018c1
--- /dev/null
+++ b/include/hw/virtio/virtio-pci.h
@@ -0,0 +1,272 @@
+/*
+ * Virtio PCI Bindings
+ *
+ * Copyright IBM, Corp. 2007
+ * Copyright (c) 2009 CodeSourcery
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Paul Brook <paul@codesourcery.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_VIRTIO_PCI_H
+#define QEMU_VIRTIO_PCI_H
+
+#include "hw/pci/msi.h"
+#include "hw/virtio/virtio-bus.h"
+#include "qom/object.h"
+
+
+/* virtio-pci-bus */
+
+typedef struct VirtioBusState VirtioPCIBusState;
+typedef struct VirtioBusClass VirtioPCIBusClass;
+
+#define TYPE_VIRTIO_PCI_BUS "virtio-pci-bus"
+DECLARE_OBJ_CHECKERS(VirtioPCIBusState, VirtioPCIBusClass,
+ VIRTIO_PCI_BUS, TYPE_VIRTIO_PCI_BUS)
+
+enum {
+ VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT,
+ VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT,
+ VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
+ VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
+ VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT,
+ VIRTIO_PCI_FLAG_ATS_BIT,
+ VIRTIO_PCI_FLAG_INIT_DEVERR_BIT,
+ VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT,
+ VIRTIO_PCI_FLAG_INIT_PM_BIT,
+ VIRTIO_PCI_FLAG_INIT_FLR_BIT,
+ VIRTIO_PCI_FLAG_AER_BIT,
+ VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT,
+};
+
+/* Need to activate work-arounds for buggy guests at vmstate load. */
+#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION \
+ (1 << VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT)
+
+/* Performance improves when virtqueue kick processing is decoupled from the
+ * vcpu thread using ioeventfd for some devices. */
+#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
+
+/* virtio version flags */
+#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
+
+/* migrate extra state */
+#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT)
+
+/* have pio notification for modern device ? */
+#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
+ (1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
+
+/* page per vq flag to be used by split drivers within guests */
+#define VIRTIO_PCI_FLAG_PAGE_PER_VQ \
+ (1 << VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT)
+
+/* address space translation service */
+#define VIRTIO_PCI_FLAG_ATS (1 << VIRTIO_PCI_FLAG_ATS_BIT)
+
+/* Init error enabling flags */
+#define VIRTIO_PCI_FLAG_INIT_DEVERR (1 << VIRTIO_PCI_FLAG_INIT_DEVERR_BIT)
+
+/* Init Link Control register */
+#define VIRTIO_PCI_FLAG_INIT_LNKCTL (1 << VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT)
+
+/* Init Power Management */
+#define VIRTIO_PCI_FLAG_INIT_PM (1 << VIRTIO_PCI_FLAG_INIT_PM_BIT)
+
+/* Init Function Level Reset capability */
+#define VIRTIO_PCI_FLAG_INIT_FLR (1 << VIRTIO_PCI_FLAG_INIT_FLR_BIT)
+
+/* Advanced Error Reporting capability */
+#define VIRTIO_PCI_FLAG_AER (1 << VIRTIO_PCI_FLAG_AER_BIT)
+
+/* Page Aligned Address space Translation Service */
+#define VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED \
+ (1 << VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT)
+
+typedef struct {
+ MSIMessage msg;
+ int virq;
+ unsigned int users;
+} VirtIOIRQFD;
+
+/*
+ * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
+ */
+#define TYPE_VIRTIO_PCI "virtio-pci"
+OBJECT_DECLARE_TYPE(VirtIOPCIProxy, VirtioPCIClass, VIRTIO_PCI)
+
+struct VirtioPCIClass {
+ PCIDeviceClass parent_class;
+ DeviceRealize parent_dc_realize;
+ void (*realize)(VirtIOPCIProxy *vpci_dev, Error **errp);
+};
+
+typedef struct VirtIOPCIRegion {
+ MemoryRegion mr;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t type;
+} VirtIOPCIRegion;
+
+typedef struct VirtIOPCIQueue {
+ uint16_t num;
+ bool enabled;
+ /*
+ * No need to migrate the reset status, because it is always 0
+ * when the migration starts.
+ */
+ bool reset;
+ uint32_t desc[2];
+ uint32_t avail[2];
+ uint32_t used[2];
+} VirtIOPCIQueue;
+
+struct VirtIOPCIProxy {
+ PCIDevice pci_dev;
+ MemoryRegion bar;
+ union {
+ struct {
+ VirtIOPCIRegion common;
+ VirtIOPCIRegion isr;
+ VirtIOPCIRegion device;
+ VirtIOPCIRegion notify;
+ VirtIOPCIRegion notify_pio;
+ };
+ VirtIOPCIRegion regs[5];
+ };
+ MemoryRegion modern_bar;
+ MemoryRegion io_bar;
+ uint32_t legacy_io_bar_idx;
+ uint32_t msix_bar_idx;
+ uint32_t modern_io_bar_idx;
+ uint32_t modern_mem_bar_idx;
+ int config_cap;
+ uint32_t flags;
+ bool disable_modern;
+ bool ignore_backend_features;
+ OnOffAuto disable_legacy;
+ /* Transitional device id */
+ uint16_t trans_devid;
+ uint32_t class_code;
+ uint32_t nvectors;
+ uint32_t dfselect;
+ uint32_t gfselect;
+ uint32_t guest_features[2];
+ VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX];
+
+ VirtIOIRQFD *vector_irqfd;
+ int nvqs_with_notifiers;
+ VirtioBusState bus;
+};
+
+static inline bool virtio_pci_modern(VirtIOPCIProxy *proxy)
+{
+ return !proxy->disable_modern;
+}
+
+static inline bool virtio_pci_legacy(VirtIOPCIProxy *proxy)
+{
+ return proxy->disable_legacy == ON_OFF_AUTO_OFF;
+}
+
+static inline void virtio_pci_force_virtio_1(VirtIOPCIProxy *proxy)
+{
+ proxy->disable_modern = false;
+ proxy->disable_legacy = ON_OFF_AUTO_ON;
+}
+
+static inline void virtio_pci_disable_modern(VirtIOPCIProxy *proxy)
+{
+ proxy->disable_modern = true;
+}
+
+uint16_t virtio_pci_get_trans_devid(uint16_t device_id);
+uint16_t virtio_pci_get_class_id(uint16_t device_id);
+
+/*
+ * virtio-input-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_INPUT_PCI "virtio-input-pci"
+
+/* Virtio ABI version, if we increment this, we break the guest driver. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/* Input for virtio_pci_types_register() */
+typedef struct VirtioPCIDeviceTypeInfo {
+ /*
+ * Common base class for the subclasses below.
+ *
+ * Required only if transitional_name or non_transitional_name is set.
+ *
+ * We need a separate base type instead of making all types
+ * inherit from generic_name for two reasons:
+ * 1) generic_name implements INTERFACE_PCIE_DEVICE, but
+ * transitional_name does not.
+ * 2) generic_name has the "disable-legacy" and "disable-modern"
+ * properties, transitional_name and non_transitional name don't.
+ */
+ const char *base_name;
+ /*
+ * Generic device type. Optional.
+ *
+ * Supports both transitional and non-transitional modes,
+ * using the disable-legacy and disable-modern properties.
+ * If disable-legacy=auto, (non-)transitional mode is selected
+ * depending on the bus where the device is plugged.
+ *
+ * Implements both INTERFACE_PCIE_DEVICE and INTERFACE_CONVENTIONAL_PCI_DEVICE,
+ * but PCI Express is supported only in non-transitional mode.
+ *
+ * The only type implemented by QEMU 3.1 and older.
+ */
+ const char *generic_name;
+ /*
+ * The transitional device type. Optional.
+ *
+ * Implements both INTERFACE_PCIE_DEVICE and INTERFACE_CONVENTIONAL_PCI_DEVICE.
+ */
+ const char *transitional_name;
+ /*
+ * The non-transitional device type. Optional.
+ *
+ * Implements INTERFACE_CONVENTIONAL_PCI_DEVICE only.
+ */
+ const char *non_transitional_name;
+
+ /* Parent type. If NULL, TYPE_VIRTIO_PCI is used */
+ const char *parent;
+
+ /* Same as TypeInfo fields: */
+ size_t instance_size;
+ size_t class_size;
+ void (*instance_init)(Object *obj);
+ void (*instance_finalize)(Object *obj);
+ void (*class_init)(ObjectClass *klass, void *data);
+ InterfaceInfo *interfaces;
+} VirtioPCIDeviceTypeInfo;
+
+/* Register virtio-pci type(s). @t must be static. */
+void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t);
+
+/**
+ * virtio_pci_optimal_num_queues:
+ * @fixed_queues: number of queues that are always present
+ *
+ * Returns: The optimal number of queues for a multi-queue device, excluding
+ * @fixed_queues.
+ */
+unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues);
+void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, VirtQueue *vq,
+ int n, bool assign,
+ bool with_irqfd);
+
+int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy, uint8_t bar, uint64_t offset,
+ uint64_t length, uint8_t id);
+
+#endif
diff --git a/include/hw/virtio/virtio-pmem.h b/include/hw/virtio/virtio-pmem.h
new file mode 100644
index 0000000000..fc4fd1f7fe
--- /dev/null
+++ b/include/hw/virtio/virtio-pmem.h
@@ -0,0 +1,46 @@
+/*
+ * Virtio PMEM device
+ *
+ * Copyright (C) 2018-2019 Red Hat, Inc.
+ *
+ * Authors:
+ * Pankaj Gupta <pagupta@redhat.com>
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_PMEM_H
+#define HW_VIRTIO_PMEM_H
+
+#include "hw/virtio/virtio.h"
+#include "qapi/qapi-types-machine.h"
+#include "qom/object.h"
+
+#define TYPE_VIRTIO_PMEM "virtio-pmem"
+
+OBJECT_DECLARE_TYPE(VirtIOPMEM, VirtIOPMEMClass,
+ VIRTIO_PMEM)
+
+#define VIRTIO_PMEM_ADDR_PROP "memaddr"
+#define VIRTIO_PMEM_MEMDEV_PROP "memdev"
+
+struct VirtIOPMEM {
+ VirtIODevice parent_obj;
+
+ VirtQueue *rq_vq;
+ uint64_t start;
+ HostMemoryBackend *memdev;
+};
+
+struct VirtIOPMEMClass {
+ /* private */
+ VirtIODevice parent;
+
+ /* public */
+ void (*fill_device_info)(const VirtIOPMEM *pmem, VirtioPMEMDeviceInfo *vi);
+ MemoryRegion *(*get_memory_region)(VirtIOPMEM *pmem, Error **errp);
+};
+
+#endif
diff --git a/include/hw/virtio/virtio-rng.h b/include/hw/virtio/virtio-rng.h
index 922dce7cac..82734255d9 100644
--- a/include/hw/virtio/virtio-rng.h
+++ b/include/hw/virtio/virtio-rng.h
@@ -12,13 +12,13 @@
#ifndef QEMU_VIRTIO_RNG_H
#define QEMU_VIRTIO_RNG_H
+#include "hw/virtio/virtio.h"
#include "sysemu/rng.h"
-#include "sysemu/rng-random.h"
#include "standard-headers/linux/virtio_rng.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_RNG "virtio-rng-device"
-#define VIRTIO_RNG(obj) \
- OBJECT_CHECK(VirtIORNG, (obj), TYPE_VIRTIO_RNG)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIORNG, VIRTIO_RNG)
#define VIRTIO_RNG_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_RNG)
@@ -26,10 +26,9 @@ struct VirtIORNGConf {
RngBackend *rng;
uint64_t max_bytes;
uint32_t period_ms;
- RngRandom *default_backend;
};
-typedef struct VirtIORNG {
+struct VirtIORNG {
VirtIODevice parent_obj;
/* Only one vq - guest puts buffer(s) on it when it needs entropy */
@@ -47,6 +46,6 @@ typedef struct VirtIORNG {
bool activate_timer;
VMChangeStateEntry *vmstate;
-} VirtIORNG;
+};
#endif
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 4c0bcdb788..7be0105918 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -13,29 +13,32 @@
#ifndef QEMU_VIRTIO_SCSI_H
#define QEMU_VIRTIO_SCSI_H
+#include "qom/object.h"
/* Override CDB/sense data size: they are dynamic (guest controlled) in QEMU */
#define VIRTIO_SCSI_CDB_SIZE 0
#define VIRTIO_SCSI_SENSE_SIZE 0
#include "standard-headers/linux/virtio_scsi.h"
#include "hw/virtio/virtio.h"
-#include "hw/pci/pci.h"
#include "hw/scsi/scsi.h"
#include "chardev/char-fe.h"
#include "sysemu/iothread.h"
#define TYPE_VIRTIO_SCSI_COMMON "virtio-scsi-common"
-#define VIRTIO_SCSI_COMMON(obj) \
- OBJECT_CHECK(VirtIOSCSICommon, (obj), TYPE_VIRTIO_SCSI_COMMON)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSICommon, VIRTIO_SCSI_COMMON)
#define TYPE_VIRTIO_SCSI "virtio-scsi-device"
-#define VIRTIO_SCSI(obj) \
- OBJECT_CHECK(VirtIOSCSI, (obj), TYPE_VIRTIO_SCSI)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSI, VIRTIO_SCSI)
#define VIRTIO_SCSI_MAX_CHANNEL 0
#define VIRTIO_SCSI_MAX_TARGET 255
#define VIRTIO_SCSI_MAX_LUN 16383
+/* Number of virtqueues that are always present */
+#define VIRTIO_SCSI_VQ_NUM_FIXED 2
+
+#define VIRTIO_SCSI_AUTO_NUM_QUEUES UINT32_MAX
+
typedef struct virtio_scsi_cmd_req VirtIOSCSICmdReq;
typedef struct virtio_scsi_cmd_resp VirtIOSCSICmdResp;
typedef struct virtio_scsi_ctrl_tmf_req VirtIOSCSICtrlTMFReq;
@@ -48,12 +51,12 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
struct VirtIOSCSIConf {
uint32_t num_queues;
uint32_t virtqueue_size;
+ bool worker_per_virtqueue;
+ bool seg_max_adjust;
uint32_t max_sectors;
uint32_t cmd_per_lun;
-#ifdef CONFIG_VHOST_SCSI
char *vhostfd;
char *wwpn;
-#endif
CharBackend chardev;
uint32_t boot_tpgt;
IOThread *iothread;
@@ -61,7 +64,7 @@ struct VirtIOSCSIConf {
struct VirtIOSCSI;
-typedef struct VirtIOSCSICommon {
+struct VirtIOSCSICommon {
VirtIODevice parent_obj;
VirtIOSCSIConf conf;
@@ -70,15 +73,25 @@ typedef struct VirtIOSCSICommon {
VirtQueue *ctrl_vq;
VirtQueue *event_vq;
VirtQueue **cmd_vqs;
-} VirtIOSCSICommon;
+};
-typedef struct VirtIOSCSI {
+struct VirtIOSCSIReq;
+
+struct VirtIOSCSI {
VirtIOSCSICommon parent_obj;
SCSIBus bus;
- int resetting;
+ int resetting; /* written from main loop thread, read from any thread */
bool events_dropped;
+ /*
+ * TMFs deferred to main loop BH. These fields are protected by
+ * tmf_bh_lock.
+ */
+ QemuMutex tmf_bh_lock;
+ QEMUBH *tmf_bh;
+ QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list;
+
/* Fields for dataplane below */
AioContext *ctx; /* one iothread per virtio-scsi-pci for now */
@@ -87,57 +100,7 @@ typedef struct VirtIOSCSI {
bool dataplane_stopping;
bool dataplane_fenced;
uint32_t host_features;
-} VirtIOSCSI;
-
-typedef struct VirtIOSCSIReq {
- /* Note:
- * - fields up to resp_iov are initialized by virtio_scsi_init_req;
- * - fields starting at vring are zeroed by virtio_scsi_init_req.
- * */
- VirtQueueElement elem;
-
- VirtIOSCSI *dev;
- VirtQueue *vq;
- QEMUSGList qsgl;
- QEMUIOVector resp_iov;
-
- union {
- /* Used for two-stage request submission */
- QTAILQ_ENTRY(VirtIOSCSIReq) next;
-
- /* Used for cancellation of request during TMFs */
- int remaining;
- };
-
- SCSIRequest *sreq;
- size_t resp_size;
- enum SCSIXferMode mode;
- union {
- VirtIOSCSICmdResp cmd;
- VirtIOSCSICtrlTMFResp tmf;
- VirtIOSCSICtrlANResp an;
- VirtIOSCSIEvent event;
- } resp;
- union {
- VirtIOSCSICmdReq cmd;
- VirtIOSCSICtrlTMFReq tmf;
- VirtIOSCSICtrlANReq an;
- } req;
-} VirtIOSCSIReq;
-
-static inline void virtio_scsi_acquire(VirtIOSCSI *s)
-{
- if (s->ctx) {
- aio_context_acquire(s->ctx);
- }
-}
-
-static inline void virtio_scsi_release(VirtIOSCSI *s)
-{
- if (s->ctx) {
- aio_context_release(s->ctx);
- }
-}
+};
void virtio_scsi_common_realize(DeviceState *dev,
VirtIOHandleOutput ctrl,
@@ -145,14 +108,7 @@ void virtio_scsi_common_realize(DeviceState *dev,
VirtIOHandleOutput cmd,
Error **errp);
-void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp);
-bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq);
-bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq);
-bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq);
-void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req);
-void virtio_scsi_free_req(VirtIOSCSIReq *req);
-void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
- uint32_t event, uint32_t reason);
+void virtio_scsi_common_unrealize(DeviceState *dev);
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp);
int virtio_scsi_dataplane_start(VirtIODevice *s);
diff --git a/include/hw/virtio/virtio-serial.h b/include/hw/virtio/virtio-serial.h
index 12657a9f39..d87c62eab7 100644
--- a/include/hw/virtio/virtio-serial.h
+++ b/include/hw/virtio/virtio-serial.h
@@ -17,8 +17,8 @@
#define QEMU_VIRTIO_SERIAL_H
#include "standard-headers/linux/virtio_console.h"
-#include "hw/qdev.h"
#include "hw/virtio/virtio.h"
+#include "qom/object.h"
struct virtio_serial_conf {
/* Max. number of ports we can have for a virtio-serial device */
@@ -26,18 +26,16 @@ struct virtio_serial_conf {
};
#define TYPE_VIRTIO_SERIAL_PORT "virtio-serial-port"
-#define VIRTIO_SERIAL_PORT(obj) \
- OBJECT_CHECK(VirtIOSerialPort, (obj), TYPE_VIRTIO_SERIAL_PORT)
-#define VIRTIO_SERIAL_PORT_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtIOSerialPortClass, (klass), TYPE_VIRTIO_SERIAL_PORT)
-#define VIRTIO_SERIAL_PORT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOSerialPortClass, (obj), TYPE_VIRTIO_SERIAL_PORT)
+OBJECT_DECLARE_TYPE(VirtIOSerialPort, VirtIOSerialPortClass,
+ VIRTIO_SERIAL_PORT)
typedef struct VirtIOSerial VirtIOSerial;
-typedef struct VirtIOSerialBus VirtIOSerialBus;
-typedef struct VirtIOSerialPort VirtIOSerialPort;
-typedef struct VirtIOSerialPortClass {
+#define TYPE_VIRTIO_SERIAL_BUS "virtio-serial-bus"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSerialBus, VIRTIO_SERIAL_BUS)
+
+
+struct VirtIOSerialPortClass {
DeviceClass parent_class;
/* Is this a device that binds with hvc in the guest? */
@@ -82,7 +80,7 @@ typedef struct VirtIOSerialPortClass {
*/
ssize_t (*have_data)(VirtIOSerialPort *port, const uint8_t *buf,
ssize_t len);
-} VirtIOSerialPortClass;
+};
/*
* This is the state that's shared between all the ports. Some of the
@@ -224,7 +222,6 @@ size_t virtio_serial_guest_ready(VirtIOSerialPort *port);
void virtio_serial_throttle_port(VirtIOSerialPort *port, bool throttle);
#define TYPE_VIRTIO_SERIAL "virtio-serial-device"
-#define VIRTIO_SERIAL(obj) \
- OBJECT_CHECK(VirtIOSerial, (obj), TYPE_VIRTIO_SERIAL)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSerial, VIRTIO_SERIAL)
#endif
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 9c1fa07d6d..7d5ffdc145 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -14,16 +14,23 @@
#ifndef QEMU_VIRTIO_H
#define QEMU_VIRTIO_H
-#include "hw/hw.h"
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
#include "net/net.h"
-#include "hw/qdev.h"
-#include "sysemu/sysemu.h"
+#include "migration/vmstate.h"
#include "qemu/event_notifier.h"
#include "standard-headers/linux/virtio_config.h"
#include "standard-headers/linux/virtio_ring.h"
+#include "qom/object.h"
+#include "block/aio.h"
-/* A guest should never accept this. It implies negotiation is broken. */
-#define VIRTIO_F_BAD_FEATURE 30
+/*
+ * A guest should never accept this. It implies negotiation is broken
+ * between the driver frontend and the device. This bit is re-used for
+ * vhost-user to advertise VHOST_USER_F_PROTOCOL_FEATURES between QEMU
+ * and a vhost-user backend.
+ */
+#define VIRTIO_F_BAD_FEATURE 30
#define VIRTIO_LEGACY_FEATURES ((0x1ULL << VIRTIO_F_BAD_FEATURE) | \
(0x1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
@@ -37,6 +44,20 @@ static inline hwaddr vring_align(hwaddr addr,
return QEMU_ALIGN_UP(addr, align);
}
+typedef struct VirtIOFeature {
+ uint64_t flags;
+ size_t end;
+} VirtIOFeature;
+
+typedef struct VirtIOConfigSizeParams {
+ size_t min_size;
+ size_t max_size;
+ const VirtIOFeature *feature_sizes;
+} VirtIOConfigSizeParams;
+
+size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
+ uint64_t host_features);
+
typedef struct VirtQueue VirtQueue;
#define VIRTQUEUE_MAX_SIZE 1024
@@ -44,6 +65,8 @@ typedef struct VirtQueue VirtQueue;
typedef struct VirtQueueElement
{
unsigned int index;
+ unsigned int len;
+ unsigned int ndescs;
unsigned int out_num;
unsigned int in_num;
hwaddr *in_addr;
@@ -56,13 +79,16 @@ typedef struct VirtQueueElement
#define VIRTIO_NO_VECTOR 0xffff
+/* special index value used internally for config irqs */
+#define VIRTIO_CONFIG_IRQ_IDX -1
+
#define TYPE_VIRTIO_DEVICE "virtio-device"
-#define VIRTIO_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtioDeviceClass, obj, TYPE_VIRTIO_DEVICE)
-#define VIRTIO_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtioDeviceClass, klass, TYPE_VIRTIO_DEVICE)
-#define VIRTIO_DEVICE(obj) \
- OBJECT_CHECK(VirtIODevice, (obj), TYPE_VIRTIO_DEVICE)
+OBJECT_DECLARE_TYPE(VirtIODevice, VirtioDeviceClass, VIRTIO_DEVICE)
+
+typedef struct {
+ int virtio_bit;
+ const char *feature_desc;
+} qmp_virtio_feature_map_t;
enum virtio_device_endian {
VIRTIO_DEVICE_ENDIAN_UNKNOWN,
@@ -70,6 +96,12 @@ enum virtio_device_endian {
VIRTIO_DEVICE_ENDIAN_BIG,
};
+/**
+ * struct VirtIODevice - common VirtIO structure
+ * @name: name of the device
+ * @status: VirtIO Device Status field
+ *
+ */
struct VirtIODevice
{
DeviceState parent_obj;
@@ -77,9 +109,20 @@ struct VirtIODevice
uint8_t status;
uint8_t isr;
uint16_t queue_sel;
- uint64_t guest_features;
+ /**
+ * These fields represent a set of VirtIO features at various
+ * levels of the stack. @host_features indicates the complete
+ * feature set the VirtIO device can offer to the driver.
+ * @guest_features indicates which features the VirtIO driver has
+ * selected by writing to the feature register. Finally
+ * @backend_features represents everything supported by the
+ * backend (e.g. vhost) and could potentially be a subset of the
+ * total feature set offered by QEMU.
+ */
uint64_t host_features;
+ uint64_t guest_features;
uint64_t backend_features;
+
size_t config_len;
void *config;
uint16_t config_vector;
@@ -88,17 +131,43 @@ struct VirtIODevice
VirtQueue *vq;
MemoryListener listener;
uint16_t device_id;
+ /* @vm_running: current VM running state via virtio_vmstate_change() */
bool vm_running;
bool broken; /* device in invalid state, needs reset */
+ bool use_disabled_flag; /* allow use of 'disable' flag when needed */
+ bool disabled; /* device in temporarily disabled state */
+ /**
+ * @use_started: true if the @started flag should be used to check the
+ * current state of the VirtIO device. Otherwise status bits
+ * should be checked for a current status of the device.
+ * @use_started is only set via QMP and defaults to true for all
+ * modern machines (since 4.1).
+ */
+ bool use_started;
+ bool started;
+ bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */
+ bool disable_legacy_check;
+ bool vhost_started;
VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
+ /**
+ * @user_guest_notifier_mask: gate usage of ->guest_notifier_mask() callback.
+ * This is used to suppress the masking of guest updates for
+ * vhost-user devices which are asynchronous by design.
+ */
bool use_guest_notifier_mask;
AddressSpace *dma_as;
QLIST_HEAD(, VirtQueue) *vector_queues;
+ QTAILQ_ENTRY(VirtIODevice) next;
+ /**
+ * @config_notifier: the event notifier that handles config events
+ */
+ EventNotifier config_notifier;
+ bool device_iotlb_enabled;
};
-typedef struct VirtioDeviceClass {
+struct VirtioDeviceClass {
/*< private >*/
DeviceClass parent;
/*< public >*/
@@ -116,6 +185,10 @@ typedef struct VirtioDeviceClass {
void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
void (*reset)(VirtIODevice *vdev);
void (*set_status)(VirtIODevice *vdev, uint8_t val);
+ /* Device must validate queue_index. */
+ void (*queue_reset)(VirtIODevice *vdev, uint32_t queue_index);
+ /* Device must validate queue_index. */
+ void (*queue_enable)(VirtIODevice *vdev, uint32_t queue_index);
/* For transitional devices, this is a bitmap of features
* that are only exposed on the legacy interface but not
* the modern one.
@@ -140,29 +213,45 @@ typedef struct VirtioDeviceClass {
*/
void (*save)(VirtIODevice *vdev, QEMUFile *f);
int (*load)(VirtIODevice *vdev, QEMUFile *f, int version_id);
+ /* Post load hook in vmsd is called early while device is processed, and
+ * when VirtIODevice isn't fully initialized. Devices should use this instead,
+ * unless they specifically want to verify the migration stream as it's
+ * processed, e.g. for bounds checking.
+ */
+ int (*post_load)(VirtIODevice *vdev);
const VMStateDescription *vmsd;
-} VirtioDeviceClass;
+ bool (*primary_unplug_pending)(void *opaque);
+ struct vhost_dev *(*get_vhost)(VirtIODevice *vdev);
+ void (*toggle_device_iotlb)(VirtIODevice *vdev);
+};
void virtio_instance_init_common(Object *proxy_obj, void *data,
size_t vdev_size, const char *vdev_name);
-void virtio_init(VirtIODevice *vdev, const char *name,
- uint16_t device_id, size_t config_size);
+/**
+ * virtio_init() - initialise the common VirtIODevice structure
+ * @vdev: pointer to VirtIODevice
+ * @device_id: the VirtIO device ID (see virtio_ids.h)
+ * @config_size: size of the config space
+ */
+void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size);
+
void virtio_cleanup(VirtIODevice *vdev);
-void virtio_error(VirtIODevice *vdev, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
+void virtio_error(VirtIODevice *vdev, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
/* Set the child bus name. */
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name);
typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *);
-typedef bool (*VirtIOHandleAIOOutput)(VirtIODevice *, VirtQueue *);
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
VirtIOHandleOutput handle_output);
void virtio_del_queue(VirtIODevice *vdev, int n);
+void virtio_delete_queue(VirtQueue *vq);
+
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len);
void virtqueue_flush(VirtQueue *vq, unsigned int count);
@@ -178,7 +267,8 @@ void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem);
void *virtqueue_pop(VirtQueue *vq, size_t sz);
unsigned int virtqueue_drop_all(VirtQueue *vq);
void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz);
-void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem);
+void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
+ VirtQueueElement *elem);
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
unsigned int out_bytes);
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
@@ -201,8 +291,16 @@ extern const VMStateInfo virtio_vmstate_info;
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id);
+/**
+ * virtio_notify_config() - signal a change to device config
+ * @vdev: the virtio device
+ *
+ * Assuming the virtio device is up (VIRTIO_CONFIG_S_DRIVER_OK) this
+ * will trigger a guest interrupt and update the config version.
+ */
void virtio_notify_config(VirtIODevice *vdev);
+bool virtio_queue_get_notification(VirtQueue *vq);
void virtio_queue_set_notification(VirtQueue *vq, int enable);
int virtio_queue_ready(VirtQueue *vq);
@@ -235,6 +333,7 @@ int virtio_get_num_queues(VirtIODevice *vdev);
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
hwaddr avail, hwaddr used);
void virtio_queue_update_rings(VirtIODevice *vdev, int n);
+void virtio_init_region_cache(VirtIODevice *vdev, int n);
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align);
void virtio_queue_notify(VirtIODevice *vdev, int n);
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
@@ -243,6 +342,8 @@ int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
MemoryRegion *mr, bool assign);
int virtio_set_status(VirtIODevice *vdev, uint8_t val);
void virtio_reset(void *opaque);
+void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index);
+void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index);
void virtio_update_irq(VirtIODevice *vdev);
int virtio_set_features(VirtIODevice *vdev, uint64_t val);
@@ -264,16 +365,23 @@ typedef struct VirtIORNGConf VirtIORNGConf;
DEFINE_PROP_BIT64("any_layout", _state, _field, \
VIRTIO_F_ANY_LAYOUT, true), \
DEFINE_PROP_BIT64("iommu_platform", _state, _field, \
- VIRTIO_F_IOMMU_PLATFORM, false)
+ VIRTIO_F_IOMMU_PLATFORM, false), \
+ DEFINE_PROP_BIT64("packed", _state, _field, \
+ VIRTIO_F_RING_PACKED, false), \
+ DEFINE_PROP_BIT64("queue_reset", _state, _field, \
+ VIRTIO_F_RING_RESET, true)
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
+bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n);
+bool virtio_queue_enabled(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
-uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
-void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
+unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
+void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
+ unsigned int idx);
void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n);
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n);
@@ -283,16 +391,20 @@ EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
bool with_irqfd);
int virtio_device_start_ioeventfd(VirtIODevice *vdev);
-void virtio_device_stop_ioeventfd(VirtIODevice *vdev);
int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
void virtio_device_release_ioeventfd(VirtIODevice *vdev);
bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
void virtio_queue_host_notifier_read(EventNotifier *n);
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- VirtIOHandleAIOOutput handle_output);
+void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx);
+void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx);
+void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx);
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
+EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev);
+void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
+ bool assign, bool with_irqfd);
static inline void virtio_add_feature(uint64_t *features, unsigned int fbit)
{
@@ -312,7 +424,7 @@ static inline bool virtio_has_feature(uint64_t features, unsigned int fbit)
return !!(features & (1ULL << fbit));
}
-static inline bool virtio_vdev_has_feature(VirtIODevice *vdev,
+static inline bool virtio_vdev_has_feature(const VirtIODevice *vdev,
unsigned int fbit)
{
return virtio_has_feature(vdev->guest_features, fbit);
@@ -333,4 +445,74 @@ static inline bool virtio_is_big_endian(VirtIODevice *vdev)
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return false;
}
+
+/**
+ * virtio_device_started() - check if device started
+ * @vdev - the VirtIO device
+ * @status - the devices status bits
+ *
+ * Check if the device is started. For most modern machines this is
+ * tracked via the @vdev->started field (to support migration),
+ * otherwise we check for the final negotiated status bit that
+ * indicates everything is ready.
+ */
+static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
+{
+ if (vdev->use_started) {
+ return vdev->started;
+ }
+
+ return status & VIRTIO_CONFIG_S_DRIVER_OK;
+}
+
+/**
+ * virtio_device_should_start() - check if device startable
+ * @vdev - the VirtIO device
+ * @status - the devices status bits
+ *
+ * This is similar to virtio_device_started() but also encapsulates a
+ * check on the VM status which would prevent a device starting
+ * anyway.
+ */
+static inline bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status)
+{
+ if (!vdev->vm_running) {
+ return false;
+ }
+
+ return virtio_device_started(vdev, status);
+}
+
+static inline void virtio_set_started(VirtIODevice *vdev, bool started)
+{
+ if (started) {
+ vdev->start_on_kick = false;
+ }
+
+ if (vdev->use_started) {
+ vdev->started = started;
+ }
+}
+
+static inline void virtio_set_disabled(VirtIODevice *vdev, bool disable)
+{
+ if (vdev->use_disabled_flag) {
+ vdev->disabled = disable;
+ }
+}
+
+static inline bool virtio_device_disabled(VirtIODevice *vdev)
+{
+ return unlikely(vdev->disabled || vdev->broken);
+}
+
+bool virtio_legacy_allowed(VirtIODevice *vdev);
+bool virtio_legacy_check_disabled(VirtIODevice *vdev);
+
+QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
+ QEMUBHFunc *cb, void *opaque,
+ const char *name);
+#define virtio_bh_new_guarded(dev, cb, opaque) \
+ virtio_bh_new_guarded_full((dev), (cb), (opaque), (stringify(cb)))
+
#endif
diff --git a/include/hw/vmstate-if.h b/include/hw/vmstate-if.h
new file mode 100644
index 0000000000..52df571d17
--- /dev/null
+++ b/include/hw/vmstate-if.h
@@ -0,0 +1,39 @@
+/*
+ * VMState interface
+ *
+ * Copyright (c) 2009-2019 Red Hat Inc
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef VMSTATE_IF_H
+#define VMSTATE_IF_H
+
+#include "qom/object.h"
+
+#define TYPE_VMSTATE_IF "vmstate-if"
+
+typedef struct VMStateIfClass VMStateIfClass;
+DECLARE_CLASS_CHECKERS(VMStateIfClass, VMSTATE_IF,
+ TYPE_VMSTATE_IF)
+#define VMSTATE_IF(obj) \
+ INTERFACE_CHECK(VMStateIf, (obj), TYPE_VMSTATE_IF)
+
+typedef struct VMStateIf VMStateIf;
+
+struct VMStateIfClass {
+ InterfaceClass parent_class;
+
+ char * (*get_id)(VMStateIf *obj);
+};
+
+static inline char *vmstate_if_get_id(VMStateIf *vmif)
+{
+ if (!vmif) {
+ return NULL;
+ }
+
+ return VMSTATE_IF_GET_CLASS(vmif)->get_id(vmif);
+}
+
+#endif /* VMSTATE_IF_H */
diff --git a/include/hw/watchdog/allwinner-wdt.h b/include/hw/watchdog/allwinner-wdt.h
new file mode 100644
index 0000000000..7fe41e20f2
--- /dev/null
+++ b/include/hw/watchdog/allwinner-wdt.h
@@ -0,0 +1,123 @@
+/*
+ * Allwinner Watchdog emulation
+ *
+ * Copyright (C) 2023 Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
+ *
+ * This file is derived from Allwinner RTC,
+ * by Niek Linnenbank.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_WATCHDOG_ALLWINNER_WDT_H
+#define HW_WATCHDOG_ALLWINNER_WDT_H
+
+#include "qom/object.h"
+#include "hw/ptimer.h"
+#include "hw/sysbus.h"
+
+/*
+ * This is a model of the Allwinner watchdog.
+ * Since watchdog registers belong to the timer module (and are shared with the
+ * RTC module), the interrupt line from watchdog is not handled right now.
+ * In QEMU, we just wire up the watchdog reset to watchdog_perform_action(),
+ * at least for the moment.
+ */
+
+#define TYPE_AW_WDT "allwinner-wdt"
+
+/** Allwinner WDT sun4i family (A10, A12), also sun7i (A20) */
+#define TYPE_AW_WDT_SUN4I TYPE_AW_WDT "-sun4i"
+
+/** Allwinner WDT sun6i family and newer (A31, H2+, H3, etc) */
+#define TYPE_AW_WDT_SUN6I TYPE_AW_WDT "-sun6i"
+
+/** Number of WDT registers */
+#define AW_WDT_REGS_NUM (5)
+
+OBJECT_DECLARE_TYPE(AwWdtState, AwWdtClass, AW_WDT)
+
+/**
+ * Allwinner WDT object instance state.
+ */
+struct AwWdtState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ struct ptimer_state *timer;
+
+ uint32_t regs[AW_WDT_REGS_NUM];
+};
+
+/**
+ * Allwinner WDT class-level struct.
+ *
+ * This struct is filled by each sunxi device specific code
+ * such that the generic code can use this struct to support
+ * all devices.
+ */
+struct AwWdtClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+ /*< public >*/
+
+ /** Defines device specific register map */
+ const uint8_t *regmap;
+
+ /** Size of the regmap in bytes */
+ size_t regmap_size;
+
+ /**
+ * Read device specific register
+ *
+ * @offset: register offset to read
+ * @return true if register read successful, false otherwise
+ */
+ bool (*read)(AwWdtState *s, uint32_t offset);
+
+ /**
+ * Write device specific register
+ *
+ * @offset: register offset to write
+ * @data: value to set in register
+ * @return true if register write successful, false otherwise
+ */
+ bool (*write)(AwWdtState *s, uint32_t offset, uint32_t data);
+
+ /**
+ * Check if watchdog can generate system reset
+ *
+ * @return true if watchdog can generate system reset
+ */
+ bool (*can_reset_system)(AwWdtState *s);
+
+ /**
+ * Check if provided key is valid
+ *
+ * @value: value written to register
+ * @return true if key is valid, false otherwise
+ */
+ bool (*is_key_valid)(AwWdtState *s, uint32_t val);
+
+ /**
+ * Get current INTV_VALUE setting
+ *
+ * @return current INTV_VALUE (0-15)
+ */
+ uint8_t (*get_intv_value)(AwWdtState *s);
+};
+
+#endif /* HW_WATCHDOG_ALLWINNER_WDT_H */
diff --git a/include/hw/watchdog/cmsdk-apb-watchdog.h b/include/hw/watchdog/cmsdk-apb-watchdog.h
index ab8b5987a1..c6b3e78731 100644
--- a/include/hw/watchdog/cmsdk-apb-watchdog.h
+++ b/include/hw/watchdog/cmsdk-apb-watchdog.h
@@ -16,7 +16,7 @@
* https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit
*
* QEMU interface:
- * + QOM property "wdogclk-frq": frequency at which the watchdog is clocked
+ * + Clock input "WDOGCLK": clock for the watchdog's timer
* + sysbus MMIO region 0: the register bank
* + sysbus IRQ 0: watchdog interrupt
*
@@ -33,20 +33,28 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_CMSDK_APB_WATCHDOG "cmsdk-apb-watchdog"
-#define CMSDK_APB_WATCHDOG(obj) OBJECT_CHECK(CMSDKAPBWatchdog, (obj), \
- TYPE_CMSDK_APB_WATCHDOG)
+OBJECT_DECLARE_SIMPLE_TYPE(CMSDKAPBWatchdog, CMSDK_APB_WATCHDOG)
-typedef struct CMSDKAPBWatchdog {
+/*
+ * This shares the same struct (and cast macro) as the base
+ * cmsdk-apb-watchdog device.
+ */
+#define TYPE_LUMINARY_WATCHDOG "luminary-watchdog"
+
+struct CMSDKAPBWatchdog {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
qemu_irq wdogint;
- uint32_t wdogclk_frq;
+ bool is_luminary;
struct ptimer_state *timer;
+ Clock *wdogclk;
uint32_t control;
uint32_t intstatus;
@@ -54,6 +62,7 @@ typedef struct CMSDKAPBWatchdog {
uint32_t itcr;
uint32_t itop;
uint32_t resetstatus;
-} CMSDKAPBWatchdog;
+ const uint32_t *id;
+};
#endif
diff --git a/include/hw/watchdog/sbsa_gwdt.h b/include/hw/watchdog/sbsa_gwdt.h
new file mode 100644
index 0000000000..70b137de30
--- /dev/null
+++ b/include/hw/watchdog/sbsa_gwdt.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020 Linaro Limited
+ *
+ * Authors:
+ * Shashi Mallela <shashi.mallela@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef WDT_SBSA_GWDT_H
+#define WDT_SBSA_GWDT_H
+
+#include "qemu/bitops.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+
+#define TYPE_WDT_SBSA "sbsa_gwdt"
+#define SBSA_GWDT(obj) \
+ OBJECT_CHECK(SBSA_GWDTState, (obj), TYPE_WDT_SBSA)
+#define SBSA_GWDT_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SBSA_GWDTClass, (klass), TYPE_WDT_SBSA)
+#define SBSA_GWDT_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SBSA_GWDTClass, (obj), TYPE_WDT_SBSA)
+
+/* SBSA Generic Watchdog register definitions */
+/* refresh frame */
+#define SBSA_GWDT_WRR 0x000
+
+/* control frame */
+#define SBSA_GWDT_WCS 0x000
+#define SBSA_GWDT_WOR 0x008
+#define SBSA_GWDT_WORU 0x00C
+#define SBSA_GWDT_WCV 0x010
+#define SBSA_GWDT_WCVU 0x014
+
+/* Watchdog Interface Identification Register */
+#define SBSA_GWDT_W_IIDR 0xFCC
+
+/* Watchdog Control and Status Register Bits */
+#define SBSA_GWDT_WCS_EN BIT(0)
+#define SBSA_GWDT_WCS_WS0 BIT(1)
+#define SBSA_GWDT_WCS_WS1 BIT(2)
+
+#define SBSA_GWDT_WOR_MASK 0x0000FFFF
+
+/*
+ * Watchdog Interface Identification Register definition
+ * considering JEP106 code for ARM in Bits [11:0]
+ */
+#define SBSA_GWDT_ID 0x1043B
+
+/* 2 Separate memory regions for each of refresh & control register frames */
+#define SBSA_GWDT_RMMIO_SIZE 0x1000
+#define SBSA_GWDT_CMMIO_SIZE 0x1000
+
+#define SBSA_TIMER_FREQ 62500000 /* Hz */
+
+typedef struct SBSA_GWDTState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion rmmio;
+ MemoryRegion cmmio;
+ qemu_irq irq;
+
+ QEMUTimer *timer;
+
+ uint32_t id;
+ uint32_t wcs;
+ uint32_t worl;
+ uint32_t woru;
+ uint32_t wcvl;
+ uint32_t wcvu;
+} SBSA_GWDTState;
+
+#endif /* WDT_SBSA_GWDT_H */
diff --git a/include/hw/watchdog/wdt_aspeed.h b/include/hw/watchdog/wdt_aspeed.h
index 7de3e5c224..e90ef86651 100644
--- a/include/hw/watchdog/wdt_aspeed.h
+++ b/include/hw/watchdog/wdt_aspeed.h
@@ -6,18 +6,24 @@
* This code is licensed under the GPL version 2 or later. See the
* COPYING file in the top-level directory.
*/
-#ifndef ASPEED_WDT_H
-#define ASPEED_WDT_H
+#ifndef WDT_ASPEED_H
+#define WDT_ASPEED_H
+
+#include "hw/misc/aspeed_scu.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_WDT "aspeed.wdt"
-#define ASPEED_WDT(obj) \
- OBJECT_CHECK(AspeedWDTState, (obj), TYPE_ASPEED_WDT)
+OBJECT_DECLARE_TYPE(AspeedWDTState, AspeedWDTClass, ASPEED_WDT)
+#define TYPE_ASPEED_2400_WDT TYPE_ASPEED_WDT "-ast2400"
+#define TYPE_ASPEED_2500_WDT TYPE_ASPEED_WDT "-ast2500"
+#define TYPE_ASPEED_2600_WDT TYPE_ASPEED_WDT "-ast2600"
+#define TYPE_ASPEED_1030_WDT TYPE_ASPEED_WDT "-ast1030"
-#define ASPEED_WDT_REGS_MAX (0x20 / 4)
+#define ASPEED_WDT_REGS_MAX (0x30 / 4)
-typedef struct AspeedWDTState {
+struct AspeedWDTState {
/*< private >*/
SysBusDevice parent_obj;
QEMUTimer *timer;
@@ -26,9 +32,22 @@ typedef struct AspeedWDTState {
MemoryRegion iomem;
uint32_t regs[ASPEED_WDT_REGS_MAX];
+ AspeedSCUState *scu;
uint32_t pclk_freq;
- uint32_t silicon_rev;
+};
+
+
+struct AspeedWDTClass {
+ SysBusDeviceClass parent_class;
+
+ uint32_t iosize;
uint32_t ext_pulse_width_mask;
-} AspeedWDTState;
+ uint32_t reset_ctrl_reg;
+ void (*reset_pulse)(AspeedWDTState *s, uint32_t property);
+ void (*wdt_reload)(AspeedWDTState *s);
+ uint64_t (*sanitize_ctrl)(uint64_t data);
+ uint32_t default_status;
+ uint32_t default_reload_value;
+};
-#endif /* ASPEED_WDT_H */
+#endif /* WDT_ASPEED_H */
diff --git a/include/hw/watchdog/wdt_diag288.h b/include/hw/watchdog/wdt_diag288.h
index 706d96b753..f72c1d3318 100644
--- a/include/hw/watchdog/wdt_diag288.h
+++ b/include/hw/watchdog/wdt_diag288.h
@@ -1,36 +1,35 @@
#ifndef WDT_DIAG288_H
#define WDT_DIAG288_H
-#include "hw/qdev.h"
+#include "hw/qdev-core.h"
+#include "qom/object.h"
#define TYPE_WDT_DIAG288 "diag288"
-#define DIAG288(obj) \
- OBJECT_CHECK(DIAG288State, (obj), TYPE_WDT_DIAG288)
-#define DIAG288_CLASS(klass) \
- OBJECT_CLASS_CHECK(DIAG288Class, (klass), TYPE_WDT_DIAG288)
-#define DIAG288_GET_CLASS(obj) \
- OBJECT_GET_CLASS(DIAG288Class, (obj), TYPE_WDT_DIAG288)
+typedef struct DIAG288Class DIAG288Class;
+typedef struct DIAG288State DIAG288State;
+DECLARE_OBJ_CHECKERS(DIAG288State, DIAG288Class,
+ DIAG288, TYPE_WDT_DIAG288)
#define WDT_DIAG288_INIT 0
#define WDT_DIAG288_CHANGE 1
#define WDT_DIAG288_CANCEL 2
-typedef struct DIAG288State {
+struct DIAG288State {
/*< private >*/
DeviceState parent_obj;
QEMUTimer *timer;
bool enabled;
/*< public >*/
-} DIAG288State;
+};
-typedef struct DIAG288Class {
+struct DIAG288Class {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
int (*handle_timer)(DIAG288State *dev,
uint64_t func, uint64_t timeout);
-} DIAG288Class;
+};
#endif /* WDT_DIAG288_H */
diff --git a/include/hw/watchdog/wdt_imx2.h b/include/hw/watchdog/wdt_imx2.h
new file mode 100644
index 0000000000..600a552d2e
--- /dev/null
+++ b/include/hw/watchdog/wdt_imx2.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017, Impinj, Inc.
+ *
+ * i.MX2 Watchdog IP block
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef WDT_IMX2_H
+#define WDT_IMX2_H
+
+#include "qemu/bitops.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "hw/ptimer.h"
+#include "qom/object.h"
+
+#define TYPE_IMX2_WDT "imx2.wdt"
+OBJECT_DECLARE_SIMPLE_TYPE(IMX2WdtState, IMX2_WDT)
+
+enum IMX2WdtRegisters {
+ IMX2_WDT_WCR = 0x0000, /* Control Register */
+ IMX2_WDT_WSR = 0x0002, /* Service Register */
+ IMX2_WDT_WRSR = 0x0004, /* Reset Status Register */
+ IMX2_WDT_WICR = 0x0006, /* Interrupt Control Register */
+ IMX2_WDT_WMCR = 0x0008, /* Misc Register */
+};
+
+#define IMX2_WDT_MMIO_SIZE 0x000a
+
+/* Control Register definitions */
+#define IMX2_WDT_WCR_WT (0xFF << 8) /* Watchdog Timeout Field */
+#define IMX2_WDT_WCR_WDW BIT(7) /* WDOG Disable for Wait */
+#define IMX2_WDT_WCR_WDA BIT(5) /* WDOG Assertion */
+#define IMX2_WDT_WCR_SRS BIT(4) /* Software Reset Signal */
+#define IMX2_WDT_WCR_WDT BIT(3) /* WDOG Timeout Assertion */
+#define IMX2_WDT_WCR_WDE BIT(2) /* Watchdog Enable */
+#define IMX2_WDT_WCR_WDBG BIT(1) /* Watchdog Debug Enable */
+#define IMX2_WDT_WCR_WDZST BIT(0) /* Watchdog Timer Suspend */
+
+#define IMX2_WDT_WCR_LOCK_MASK (IMX2_WDT_WCR_WDZST | IMX2_WDT_WCR_WDBG \
+ | IMX2_WDT_WCR_WDW)
+
+/* Service Register definitions */
+#define IMX2_WDT_SEQ1 0x5555 /* service sequence 1 */
+#define IMX2_WDT_SEQ2 0xAAAA /* service sequence 2 */
+
+/* Reset Status Register definitions */
+#define IMX2_WDT_WRSR_TOUT BIT(1) /* Reset due to Timeout */
+#define IMX2_WDT_WRSR_SFTW BIT(0) /* Reset due to software reset */
+
+/* Interrupt Control Register definitions */
+#define IMX2_WDT_WICR_WIE BIT(15) /* Interrupt Enable */
+#define IMX2_WDT_WICR_WTIS BIT(14) /* Interrupt Status */
+#define IMX2_WDT_WICR_WICT 0xff /* Interrupt Timeout */
+#define IMX2_WDT_WICR_WICT_DEF 0x04 /* Default interrupt timeout (2s) */
+
+#define IMX2_WDT_WICR_LOCK_MASK (IMX2_WDT_WICR_WIE | IMX2_WDT_WICR_WICT)
+
+/* Misc Control Register definitions */
+#define IMX2_WDT_WMCR_PDE BIT(0) /* Power-Down Enable */
+
+struct IMX2WdtState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ qemu_irq irq;
+
+ struct ptimer_state *timer;
+ struct ptimer_state *itimer;
+
+ bool pretimeout_support;
+ bool wicr_locked;
+
+ uint16_t wcr;
+ uint16_t wsr;
+ uint16_t wrsr;
+ uint16_t wicr;
+ uint16_t wmcr;
+
+ bool wcr_locked; /* affects WDZST, WDBG, and WDW */
+ bool wcr_wde_locked; /* affects WDE */
+ bool wcr_wdt_locked; /* affects WDT (never cleared) */
+};
+
+#endif /* WDT_IMX2_H */
diff --git a/include/hw/xen/arch_hvm.h b/include/hw/xen/arch_hvm.h
new file mode 100644
index 0000000000..c7c515220d
--- /dev/null
+++ b/include/hw/xen/arch_hvm.h
@@ -0,0 +1,5 @@
+#if defined(TARGET_I386) || defined(TARGET_X86_64)
+#include "hw/i386/xen_arch_hvm.h"
+#elif defined(TARGET_ARM) || defined(TARGET_ARM_64)
+#include "hw/arm/xen_arch_hvm.h"
+#endif
diff --git a/include/hw/xen/interface/arch-arm.h b/include/hw/xen/interface/arch-arm.h
new file mode 100644
index 0000000000..1528ced509
--- /dev/null
+++ b/include/hw/xen/interface/arch-arm.h
@@ -0,0 +1,509 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * arch-arm.h
+ *
+ * Guest OS interface to ARM Xen.
+ *
+ * Copyright 2011 (C) Citrix Systems
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_ARM_H__
+#define __XEN_PUBLIC_ARCH_ARM_H__
+
+/*
+ * `incontents 50 arm_abi Hypercall Calling Convention
+ *
+ * A hypercall is issued using the ARM HVC instruction.
+ *
+ * A hypercall can take up to 5 arguments. These are passed in
+ * registers, the first argument in x0/r0 (for arm64/arm32 guests
+ * respectively irrespective of whether the underlying hypervisor is
+ * 32- or 64-bit), the second argument in x1/r1, the third in x2/r2,
+ * the forth in x3/r3 and the fifth in x4/r4.
+ *
+ * The hypercall number is passed in r12 (arm) or x16 (arm64). In both
+ * cases the relevant ARM procedure calling convention specifies this
+ * is an inter-procedure-call scratch register (e.g. for use in linker
+ * stubs). This use does not conflict with use during a hypercall.
+ *
+ * The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG.
+ *
+ * The return value is in x0/r0.
+ *
+ * The hypercall will clobber x16/r12 and the argument registers used
+ * by that hypercall (except r0 which is the return value) i.e. in
+ * addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a
+ * 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3.
+ *
+ * Parameter structs passed to hypercalls are laid out according to
+ * the Procedure Call Standard for the ARM Architecture (AAPCS, AKA
+ * EABI) and Procedure Call Standard for the ARM 64-bit Architecture
+ * (AAPCS64). Where there is a conflict the 64-bit standard should be
+ * used regardless of guest type. Structures which are passed as
+ * hypercall arguments are always little endian.
+ *
+ * All memory which is shared with other entities in the system
+ * (including the hypervisor and other guests) must reside in memory
+ * which is mapped as Normal Inner Write-Back Outer Write-Back Inner-Shareable.
+ * This applies to:
+ * - hypercall arguments passed via a pointer to guest memory.
+ * - memory shared via the grant table mechanism (including PV I/O
+ * rings etc).
+ * - memory shared with the hypervisor (struct shared_info, struct
+ * vcpu_info, the grant table, etc).
+ *
+ * Any cache allocation hints are acceptable.
+ */
+
+/*
+ * `incontents 55 arm_hcall Supported Hypercalls
+ *
+ * Xen on ARM makes extensive use of hardware facilities and therefore
+ * only a subset of the potential hypercalls are required.
+ *
+ * Since ARM uses second stage paging any machine/physical addresses
+ * passed to hypercalls are Guest Physical Addresses (Intermediate
+ * Physical Addresses) unless otherwise noted.
+ *
+ * The following hypercalls (and sub operations) are supported on the
+ * ARM platform. Other hypercalls should be considered
+ * unavailable/unsupported.
+ *
+ * HYPERVISOR_memory_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_domctl
+ * All generic sub-operations, with the exception of:
+ * * XEN_DOMCTL_irq_permission (not yet implemented)
+ *
+ * HYPERVISOR_sched_op
+ * All generic sub-operations, with the exception of:
+ * * SCHEDOP_block -- prefer wfi hardware instruction
+ *
+ * HYPERVISOR_console_io
+ * All generic sub-operations
+ *
+ * HYPERVISOR_xen_version
+ * All generic sub-operations
+ *
+ * HYPERVISOR_event_channel_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_physdev_op
+ * Exactly these sub-operations are supported:
+ * PHYSDEVOP_pci_device_add
+ * PHYSDEVOP_pci_device_remove
+ *
+ * HYPERVISOR_sysctl
+ * All generic sub-operations, with the exception of:
+ * * XEN_SYSCTL_page_offline_op
+ * * XEN_SYSCTL_get_pmstat
+ * * XEN_SYSCTL_pm_op
+ *
+ * HYPERVISOR_hvm_op
+ * Exactly these sub-operations are supported:
+ * * HVMOP_set_param
+ * * HVMOP_get_param
+ *
+ * HYPERVISOR_grant_table_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_vcpu_op
+ * Exactly these sub-operations are supported:
+ * * VCPUOP_register_vcpu_info
+ * * VCPUOP_register_runstate_memory_area
+ *
+ * HYPERVISOR_argo_op
+ * All generic sub-operations
+ *
+ * Other notes on the ARM ABI:
+ *
+ * - struct start_info is not exported to ARM guests.
+ *
+ * - struct shared_info is mapped by ARM guests using the
+ * HYPERVISOR_memory_op sub-op XENMEM_add_to_physmap, passing
+ * XENMAPSPACE_shared_info as space parameter.
+ *
+ * - All the per-cpu struct vcpu_info are mapped by ARM guests using the
+ * HYPERVISOR_vcpu_op sub-op VCPUOP_register_vcpu_info, including cpu0
+ * struct vcpu_info.
+ *
+ * - The grant table is mapped using the HYPERVISOR_memory_op sub-op
+ * XENMEM_add_to_physmap, passing XENMAPSPACE_grant_table as space
+ * parameter. The memory range specified under the Xen compatible
+ * hypervisor node on device tree can be used as target gpfn for the
+ * mapping.
+ *
+ * - Xenstore is initialized by using the two hvm_params
+ * HVM_PARAM_STORE_PFN and HVM_PARAM_STORE_EVTCHN. They can be read
+ * with the HYPERVISOR_hvm_op sub-op HVMOP_get_param.
+ *
+ * - The paravirtualized console is initialized by using the two
+ * hvm_params HVM_PARAM_CONSOLE_PFN and HVM_PARAM_CONSOLE_EVTCHN. They
+ * can be read with the HYPERVISOR_hvm_op sub-op HVMOP_get_param.
+ *
+ * - Event channel notifications are delivered using the percpu GIC
+ * interrupt specified under the Xen compatible hypervisor node on
+ * device tree.
+ *
+ * - The device tree Xen compatible node is fully described under Linux
+ * at Documentation/devicetree/bindings/arm/xen.txt.
+ */
+
+#define XEN_HYPERCALL_TAG 0XEA1
+
+#define int64_aligned_t int64_t __attribute__((aligned(8)))
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+
+#ifndef __ASSEMBLY__
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef union { type *p; unsigned long q; } \
+ __guest_handle_ ## name; \
+ typedef union { type *p; uint64_aligned_t q; } \
+ __guest_handle_64_ ## name
+
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes
+ * aligned.
+ * XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an
+ * hypercall argument. It is 4 bytes on aarch32 and 8 bytes on aarch64.
+ */
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
+ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
+#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
+#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
+#define set_xen_guest_handle_raw(hnd, val) \
+ do { \
+ __typeof__(&(hnd)) _sxghr_tmp = &(hnd); \
+ _sxghr_tmp->q = 0; \
+ _sxghr_tmp->p = val; \
+ } while ( 0 )
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
+
+typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn PRIx64
+#define PRIu_xen_pfn PRIu64
+
+/*
+ * Maximum number of virtual CPUs in legacy multi-processor guests.
+ * Only one. All other VCPUS must use VCPUOP_register_vcpu_info.
+ */
+#define XEN_LEGACY_MAX_VCPUS 1
+
+typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong PRIx64
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
+# define __DECL_REG(n64, n32) union { \
+ uint64_t n64; \
+ uint32_t n32; \
+ }
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */
+#define __DECL_REG(n64, n32) uint64_t n64
+#endif
+
+struct vcpu_guest_core_regs
+{
+ /* Aarch64 Aarch32 */
+ __DECL_REG(x0, r0_usr);
+ __DECL_REG(x1, r1_usr);
+ __DECL_REG(x2, r2_usr);
+ __DECL_REG(x3, r3_usr);
+ __DECL_REG(x4, r4_usr);
+ __DECL_REG(x5, r5_usr);
+ __DECL_REG(x6, r6_usr);
+ __DECL_REG(x7, r7_usr);
+ __DECL_REG(x8, r8_usr);
+ __DECL_REG(x9, r9_usr);
+ __DECL_REG(x10, r10_usr);
+ __DECL_REG(x11, r11_usr);
+ __DECL_REG(x12, r12_usr);
+
+ __DECL_REG(x13, sp_usr);
+ __DECL_REG(x14, lr_usr);
+
+ __DECL_REG(x15, __unused_sp_hyp);
+
+ __DECL_REG(x16, lr_irq);
+ __DECL_REG(x17, sp_irq);
+
+ __DECL_REG(x18, lr_svc);
+ __DECL_REG(x19, sp_svc);
+
+ __DECL_REG(x20, lr_abt);
+ __DECL_REG(x21, sp_abt);
+
+ __DECL_REG(x22, lr_und);
+ __DECL_REG(x23, sp_und);
+
+ __DECL_REG(x24, r8_fiq);
+ __DECL_REG(x25, r9_fiq);
+ __DECL_REG(x26, r10_fiq);
+ __DECL_REG(x27, r11_fiq);
+ __DECL_REG(x28, r12_fiq);
+
+ __DECL_REG(x29, sp_fiq);
+ __DECL_REG(x30, lr_fiq);
+
+ /* Return address and mode */
+ __DECL_REG(pc64, pc32); /* ELR_EL2 */
+ uint64_t cpsr; /* SPSR_EL2 */
+
+ union {
+ uint64_t spsr_el1; /* AArch64 */
+ uint32_t spsr_svc; /* AArch32 */
+ };
+
+ /* AArch32 guests only */
+ uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt;
+
+ /* AArch64 guests only */
+ uint64_t sp_el0;
+ uint64_t sp_el1, elr_el1;
+};
+typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);
+
+#undef __DECL_REG
+
+struct vcpu_guest_context {
+#define _VGCF_online 0
+#define VGCF_online (1<<_VGCF_online)
+ uint32_t flags; /* VGCF_* */
+
+ struct vcpu_guest_core_regs user_regs; /* Core CPU registers */
+
+ uint64_t sctlr;
+ uint64_t ttbcr, ttbr0, ttbr1;
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+/*
+ * struct xen_arch_domainconfig's ABI is covered by
+ * XEN_DOMCTL_INTERFACE_VERSION.
+ */
+#define XEN_DOMCTL_CONFIG_GIC_NATIVE 0
+#define XEN_DOMCTL_CONFIG_GIC_V2 1
+#define XEN_DOMCTL_CONFIG_GIC_V3 2
+
+#define XEN_DOMCTL_CONFIG_TEE_NONE 0
+#define XEN_DOMCTL_CONFIG_TEE_OPTEE 1
+
+struct xen_arch_domainconfig {
+ /* IN/OUT */
+ uint8_t gic_version;
+ /* IN */
+ uint16_t tee_type;
+ /* IN */
+ uint32_t nr_spis;
+ /*
+ * OUT
+ * Based on the property clock-frequency in the DT timer node.
+ * The property may be present when the bootloader/firmware doesn't
+ * set correctly CNTFRQ which hold the timer frequency.
+ *
+ * As it's not possible to trap this register, we have to replicate
+ * the value in the guest DT.
+ *
+ * = 0 => property not present
+ * > 0 => Value of the property
+ *
+ */
+ uint32_t clock_frequency;
+};
+#endif /* __XEN__ || __XEN_TOOLS__ */
+
+struct arch_vcpu_info {
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+struct arch_shared_info {
+};
+typedef struct arch_shared_info arch_shared_info_t;
+typedef uint64_t xen_callback_t;
+
+#endif
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/* PSR bits (CPSR, SPSR) */
+
+#define PSR_THUMB (1<<5) /* Thumb Mode enable */
+#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
+#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */
+#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */
+#define PSR_BIG_ENDIAN (1<<9) /* arm32: Big Endian Mode */
+#define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */
+#define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */
+#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
+#define PSR_Z (1<<30) /* Zero condition flag */
+
+/* 32 bit modes */
+#define PSR_MODE_USR 0x10
+#define PSR_MODE_FIQ 0x11
+#define PSR_MODE_IRQ 0x12
+#define PSR_MODE_SVC 0x13
+#define PSR_MODE_MON 0x16
+#define PSR_MODE_ABT 0x17
+#define PSR_MODE_HYP 0x1a
+#define PSR_MODE_UND 0x1b
+#define PSR_MODE_SYS 0x1f
+
+/* 64 bit modes */
+#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */
+#define PSR_MODE_EL3h 0x0d
+#define PSR_MODE_EL3t 0x0c
+#define PSR_MODE_EL2h 0x09
+#define PSR_MODE_EL2t 0x08
+#define PSR_MODE_EL1h 0x05
+#define PSR_MODE_EL1t 0x04
+#define PSR_MODE_EL0t 0x00
+
+/*
+ * We set PSR_Z to be able to boot Linux kernel versions with an invalid
+ * encoding of the first 8 NOP instructions. See commit a92882a4d270 in
+ * Linux.
+ *
+ * Note that PSR_Z is also set by U-Boot and QEMU -kernel when loading
+ * zImage kernels on aarch32.
+ */
+#define PSR_GUEST32_INIT (PSR_Z|PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC)
+#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)
+
+#define SCTLR_GUEST_INIT xen_mk_ullong(0x00c50078)
+
+/*
+ * Virtual machine platform (memory layout, interrupts)
+ *
+ * These are defined for consistency between the tools and the
+ * hypervisor. Guests must not rely on these hardcoded values but
+ * should instead use the FDT.
+ */
+
+/* Physical Address Space */
+
+/* Virtio MMIO mappings */
+#define GUEST_VIRTIO_MMIO_BASE xen_mk_ullong(0x02000000)
+#define GUEST_VIRTIO_MMIO_SIZE xen_mk_ullong(0x00100000)
+
+/*
+ * vGIC mappings: Only one set of mapping is used by the guest.
+ * Therefore they can overlap.
+ */
+
+/* vGIC v2 mappings */
+#define GUEST_GICD_BASE xen_mk_ullong(0x03001000)
+#define GUEST_GICD_SIZE xen_mk_ullong(0x00001000)
+#define GUEST_GICC_BASE xen_mk_ullong(0x03002000)
+#define GUEST_GICC_SIZE xen_mk_ullong(0x00002000)
+
+/* vGIC v3 mappings */
+#define GUEST_GICV3_GICD_BASE xen_mk_ullong(0x03001000)
+#define GUEST_GICV3_GICD_SIZE xen_mk_ullong(0x00010000)
+
+#define GUEST_GICV3_RDIST_REGIONS 1
+
+#define GUEST_GICV3_GICR0_BASE xen_mk_ullong(0x03020000) /* vCPU0..127 */
+#define GUEST_GICV3_GICR0_SIZE xen_mk_ullong(0x01000000)
+
+/*
+ * 256 MB is reserved for VPCI configuration space based on calculation
+ * 256 buses x 32 devices x 8 functions x 4 KB = 256 MB
+ */
+#define GUEST_VPCI_ECAM_BASE xen_mk_ullong(0x10000000)
+#define GUEST_VPCI_ECAM_SIZE xen_mk_ullong(0x10000000)
+
+/* ACPI tables physical address */
+#define GUEST_ACPI_BASE xen_mk_ullong(0x20000000)
+#define GUEST_ACPI_SIZE xen_mk_ullong(0x02000000)
+
+/* PL011 mappings */
+#define GUEST_PL011_BASE xen_mk_ullong(0x22000000)
+#define GUEST_PL011_SIZE xen_mk_ullong(0x00001000)
+
+/* Guest PCI-PCIe memory space where config space and BAR will be available.*/
+#define GUEST_VPCI_ADDR_TYPE_MEM xen_mk_ullong(0x02000000)
+#define GUEST_VPCI_MEM_ADDR xen_mk_ullong(0x23000000)
+#define GUEST_VPCI_MEM_SIZE xen_mk_ullong(0x10000000)
+
+/*
+ * 16MB == 4096 pages reserved for guest to use as a region to map its
+ * grant table in.
+ */
+#define GUEST_GNTTAB_BASE xen_mk_ullong(0x38000000)
+#define GUEST_GNTTAB_SIZE xen_mk_ullong(0x01000000)
+
+#define GUEST_MAGIC_BASE xen_mk_ullong(0x39000000)
+#define GUEST_MAGIC_SIZE xen_mk_ullong(0x01000000)
+
+#define GUEST_RAM_BANKS 2
+
+/*
+ * The way to find the extended regions (to be exposed to the guest as unused
+ * address space) relies on the fact that the regions reserved for the RAM
+ * below are big enough to also accommodate such regions.
+ */
+#define GUEST_RAM0_BASE xen_mk_ullong(0x40000000) /* 3GB of low RAM @ 1GB */
+#define GUEST_RAM0_SIZE xen_mk_ullong(0xc0000000)
+
+/* 4GB @ 4GB Prefetch Memory for VPCI */
+#define GUEST_VPCI_ADDR_TYPE_PREFETCH_MEM xen_mk_ullong(0x42000000)
+#define GUEST_VPCI_PREFETCH_MEM_ADDR xen_mk_ullong(0x100000000)
+#define GUEST_VPCI_PREFETCH_MEM_SIZE xen_mk_ullong(0x100000000)
+
+#define GUEST_RAM1_BASE xen_mk_ullong(0x0200000000) /* 1016GB of RAM @ 8GB */
+#define GUEST_RAM1_SIZE xen_mk_ullong(0xfe00000000)
+
+#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */
+/* Largest amount of actual RAM, not including holes */
+#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE)
+/* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */
+#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }
+#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }
+
+/* Current supported guest VCPUs */
+#define GUEST_MAX_VCPUS 128
+
+/* Interrupts */
+#define GUEST_TIMER_VIRT_PPI 27
+#define GUEST_TIMER_PHYS_S_PPI 29
+#define GUEST_TIMER_PHYS_NS_PPI 30
+#define GUEST_EVTCHN_PPI 31
+
+#define GUEST_VPL011_SPI 32
+
+#define GUEST_VIRTIO_MMIO_SPI_FIRST 33
+#define GUEST_VIRTIO_MMIO_SPI_LAST 43
+
+/* PSCI functions */
+#define PSCI_cpu_suspend 0
+#define PSCI_cpu_off 1
+#define PSCI_cpu_on 2
+#define PSCI_migrate 3
+
+#endif
+
+#ifndef __ASSEMBLY__
+/* Stub definition of PMU structure */
+typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
+#endif
+
+#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/arch-x86/cpuid.h b/include/hw/xen/interface/arch-x86/cpuid.h
new file mode 100644
index 0000000000..7ecd16ae05
--- /dev/null
+++ b/include/hw/xen/interface/arch-x86/cpuid.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * arch-x86/cpuid.h
+ *
+ * CPUID interface to Xen.
+ *
+ * Copyright (c) 2007 Citrix Systems, Inc.
+ *
+ * Authors:
+ * Keir Fraser <keir@xen.org>
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
+#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
+
+/*
+ * For compatibility with other hypervisor interfaces, the Xen cpuid leaves
+ * can be found at the first otherwise unused 0x100 aligned boundary starting
+ * from 0x40000000.
+ *
+ * e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid
+ * leaves will start at 0x40000100
+ */
+
+#define XEN_CPUID_FIRST_LEAF 0x40000000
+#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
+
+/*
+ * Leaf 1 (0x40000x00)
+ * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
+ * are supported by the Xen host.
+ * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
+ * of a Xen host.
+ */
+#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
+#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
+#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
+
+/*
+ * Leaf 2 (0x40000x01)
+ * EAX[31:16]: Xen major version.
+ * EAX[15: 0]: Xen minor version.
+ * EBX-EDX: Reserved (currently all zeroes).
+ */
+
+/*
+ * Leaf 3 (0x40000x02)
+ * EAX: Number of hypercall transfer pages. This register is always guaranteed
+ * to specify one hypercall page.
+ * EBX: Base address of Xen-specific MSRs.
+ * ECX: Features 1. Unused bits are set to zero.
+ * EDX: Features 2. Unused bits are set to zero.
+ */
+
+/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
+#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
+#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
+
+/*
+ * Leaf 4 (0x40000x03)
+ * Sub-leaf 0: EAX: bit 0: emulated tsc
+ * bit 1: host tsc is known to be reliable
+ * bit 2: RDTSCP instruction available
+ * EBX: tsc_mode: 0=default (emulate if necessary), 1=emulate,
+ * 2=no emulation, 3=no emulation + TSC_AUX support
+ * ECX: guest tsc frequency in kHz
+ * EDX: guest tsc incarnation (migration count)
+ * Sub-leaf 1: EAX: tsc offset low part
+ * EBX: tsc offset high part
+ * ECX: multiplicator for tsc->ns conversion
+ * EDX: shift amount for tsc->ns conversion
+ * Sub-leaf 2: EAX: host tsc frequency in kHz
+ */
+
+/*
+ * Leaf 5 (0x40000x04)
+ * HVM-specific features
+ * Sub-leaf 0: EAX: Features
+ * Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
+ * Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag)
+ */
+#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */
+#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */
+/* Memory mapped from other domains has valid IOMMU entries */
+#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
+#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
+#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
+/*
+ * With interrupt format set to 0 (non-remappable) bits 55:49 from the
+ * IO-APIC RTE and bits 11:5 from the MSI address can be used to store
+ * high bits for the Destination ID. This expands the Destination ID
+ * field from 8 to 15 bits, allowing to target APIC IDs up 32768.
+ */
+#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
+/*
+ * Per-vCPU event channel upcalls work correctly with physical IRQs
+ * bound to event channels.
+ */
+#define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6)
+
+/*
+ * Leaf 6 (0x40000x05)
+ * PV-specific parameters
+ * Sub-leaf 0: EAX: max available sub-leaf
+ * Sub-leaf 0: EBX: bits 0-7: max machine address width
+ */
+
+/* Max. address width in bits taking memory hotplug into account. */
+#define XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK (0xffu << 0)
+
+#define XEN_CPUID_MAX_NUM_LEAVES 5
+
+#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
diff --git a/include/hw/xen/interface/arch-x86/xen-x86_32.h b/include/hw/xen/interface/arch-x86/xen-x86_32.h
new file mode 100644
index 0000000000..139438e835
--- /dev/null
+++ b/include/hw/xen/interface/arch-x86/xen-x86_32.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * xen-x86_32.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ *
+ * Copyright (c) 2004-2007, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
+
+/*
+ * Hypercall interface:
+ * Input: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6)
+ * Output: %eax
+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
+ * call hypercall_page + hypercall-number * 32
+ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
+ */
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
+
+#define FLAT_KERNEL_CS FLAT_RING1_CS
+#define FLAT_KERNEL_DS FLAT_RING1_DS
+#define FLAT_KERNEL_SS FLAT_RING1_SS
+#define FLAT_USER_CS FLAT_RING3_CS
+#define FLAT_USER_DS FLAT_RING3_DS
+#define FLAT_USER_SS FLAT_RING3_SS
+
+#define __HYPERVISOR_VIRT_START_PAE 0xF5800000
+#define __MACH2PHYS_VIRT_START_PAE 0xF5800000
+#define __MACH2PHYS_VIRT_END_PAE 0xF6800000
+#define HYPERVISOR_VIRT_START_PAE xen_mk_ulong(__HYPERVISOR_VIRT_START_PAE)
+#define MACH2PHYS_VIRT_START_PAE xen_mk_ulong(__MACH2PHYS_VIRT_START_PAE)
+#define MACH2PHYS_VIRT_END_PAE xen_mk_ulong(__MACH2PHYS_VIRT_END_PAE)
+
+/* Non-PAE bounds are obsolete. */
+#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
+#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
+#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
+#define HYPERVISOR_VIRT_START_NONPAE \
+ xen_mk_ulong(__HYPERVISOR_VIRT_START_NONPAE)
+#define MACH2PHYS_VIRT_START_NONPAE \
+ xen_mk_ulong(__MACH2PHYS_VIRT_START_NONPAE)
+#define MACH2PHYS_VIRT_END_NONPAE \
+ xen_mk_ulong(__MACH2PHYS_VIRT_END_NONPAE)
+
+#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
+#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
+#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START)
+#endif
+
+#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
+#endif
+
+/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#undef ___DEFINE_XEN_GUEST_HANDLE
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } \
+ __guest_handle_ ## name; \
+ typedef struct { union { type *p; uint64_aligned_t q; }; } \
+ __guest_handle_64_ ## name
+#undef set_xen_guest_handle_raw
+#define set_xen_guest_handle_raw(hnd, val) \
+ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
+ (hnd).p = val; \
+ } while ( 0 )
+#define int64_aligned_t int64_t __attribute__((aligned(8)))
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
+#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#if defined(XEN_GENERATING_COMPAT_HEADERS)
+/* nothing */
+#elif defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax). */
+#define __DECL_REG_LO8(which) union { \
+ uint32_t e ## which ## x; \
+ uint16_t which ## x; \
+ struct { \
+ uint8_t which ## l; \
+ uint8_t which ## h; \
+ }; \
+}
+#define __DECL_REG_LO16(name) union { \
+ uint32_t e ## name, _e ## name; \
+ uint16_t name; \
+}
+#else
+/* Other sources must always use the proper 32-bit name (e.g., eax). */
+#define __DECL_REG_LO8(which) uint32_t e ## which ## x
+#define __DECL_REG_LO16(name) uint32_t e ## name
+#endif
+
+struct cpu_user_regs {
+ __DECL_REG_LO8(b);
+ __DECL_REG_LO8(c);
+ __DECL_REG_LO8(d);
+ __DECL_REG_LO16(si);
+ __DECL_REG_LO16(di);
+ __DECL_REG_LO16(bp);
+ __DECL_REG_LO8(a);
+ uint16_t error_code; /* private */
+ uint16_t entry_vector; /* private */
+ __DECL_REG_LO16(ip);
+ uint16_t cs;
+ uint8_t saved_upcall_mask;
+ uint8_t _pad0;
+ __DECL_REG_LO16(flags); /* eflags.IF == !saved_upcall_mask */
+ __DECL_REG_LO16(sp);
+ uint16_t ss, _pad1;
+ uint16_t es, _pad2;
+ uint16_t ds, _pad3;
+ uint16_t fs, _pad4;
+ uint16_t gs, _pad5;
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+
+/*
+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
+ * must use the following accessor macros to pack/unpack valid MFNs.
+ */
+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+struct xen_callback {
+ unsigned long cs;
+ unsigned long eip;
+};
+typedef struct xen_callback xen_callback_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/arch-x86/xen-x86_64.h b/include/hw/xen/interface/arch-x86/xen-x86_64.h
new file mode 100644
index 0000000000..5d9035ed22
--- /dev/null
+++ b/include/hw/xen/interface/arch-x86/xen-x86_64.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * xen-x86_64.h
+ *
+ * Guest OS interface to x86 64-bit Xen.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
+
+/*
+ * Hypercall interface:
+ * Input: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6)
+ * Output: %rax
+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
+ * call hypercall_page + hypercall-number * 32
+ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
+ */
+
+/*
+ * 64-bit segment selectors
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+
+#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
+#define FLAT_RING3_CS64 0xe033 /* GDT index 262 */
+#define FLAT_RING3_DS32 0xe02b /* GDT index 261 */
+#define FLAT_RING3_DS64 0x0000 /* NULL selector */
+#define FLAT_RING3_SS32 0xe02b /* GDT index 261 */
+#define FLAT_RING3_SS64 0xe02b /* GDT index 261 */
+
+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
+#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
+#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
+#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
+
+#define FLAT_USER_DS64 FLAT_RING3_DS64
+#define FLAT_USER_DS32 FLAT_RING3_DS32
+#define FLAT_USER_DS FLAT_USER_DS64
+#define FLAT_USER_CS64 FLAT_RING3_CS64
+#define FLAT_USER_CS32 FLAT_RING3_CS32
+#define FLAT_USER_CS FLAT_USER_CS64
+#define FLAT_USER_SS64 FLAT_RING3_SS64
+#define FLAT_USER_SS32 FLAT_RING3_SS32
+#define FLAT_USER_SS FLAT_USER_SS64
+
+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
+#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
+#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
+#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START)
+#define HYPERVISOR_VIRT_END xen_mk_ulong(__HYPERVISOR_VIRT_END)
+#endif
+
+#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+/*
+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
+ * @which == SEGBASE_* ; @base == 64-bit base address
+ * Returns 0 on success.
+ */
+#define SEGBASE_FS 0
+#define SEGBASE_GS_USER 1
+#define SEGBASE_GS_KERNEL 2
+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
+
+/*
+ * int HYPERVISOR_iret(void)
+ * All arguments are on the kernel stack, in the following format.
+ * Never returns if successful. Current kernel context is lost.
+ * The saved CS is mapped as follows:
+ * RING0 -> RING3 kernel mode.
+ * RING1 -> RING3 kernel mode.
+ * RING2 -> RING3 kernel mode.
+ * RING3 -> RING3 user mode.
+ * However RING0 indicates that the guest kernel should return to iteself
+ * directly with
+ * orb $3,1*8(%rsp)
+ * iretq
+ * If flags contains VGCF_in_syscall:
+ * Restore RAX, RIP, RFLAGS, RSP.
+ * Discard R11, RCX, CS, SS.
+ * Otherwise:
+ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
+ * All other registers are saved on hypercall entry and restored to user.
+ */
+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
+#define _VGCF_in_syscall 8
+#define VGCF_in_syscall (1<<_VGCF_in_syscall)
+#define VGCF_IN_SYSCALL VGCF_in_syscall
+
+#ifndef __ASSEMBLY__
+
+struct iret_context {
+ /* Top of stack (%rsp at point of hypercall). */
+ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
+ /* Bottom of iret stack frame. */
+};
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax/rax). */
+#define __DECL_REG_LOHI(which) union { \
+ uint64_t r ## which ## x; \
+ uint32_t e ## which ## x; \
+ uint16_t which ## x; \
+ struct { \
+ uint8_t which ## l; \
+ uint8_t which ## h; \
+ }; \
+}
+#define __DECL_REG_LO8(name) union { \
+ uint64_t r ## name; \
+ uint32_t e ## name; \
+ uint16_t name; \
+ uint8_t name ## l; \
+}
+#define __DECL_REG_LO16(name) union { \
+ uint64_t r ## name; \
+ uint32_t e ## name; \
+ uint16_t name; \
+}
+#define __DECL_REG_HI(num) union { \
+ uint64_t r ## num; \
+ uint32_t r ## num ## d; \
+ uint16_t r ## num ## w; \
+ uint8_t r ## num ## b; \
+}
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
+#define __DECL_REG(name) union { \
+ uint64_t r ## name, e ## name; \
+ uint32_t _e ## name; \
+}
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
+#define __DECL_REG(name) uint64_t r ## name
+#endif
+
+#ifndef __DECL_REG_LOHI
+#define __DECL_REG_LOHI(name) __DECL_REG(name ## x)
+#define __DECL_REG_LO8 __DECL_REG
+#define __DECL_REG_LO16 __DECL_REG
+#define __DECL_REG_HI(num) uint64_t r ## num
+#endif
+
+struct cpu_user_regs {
+ __DECL_REG_HI(15);
+ __DECL_REG_HI(14);
+ __DECL_REG_HI(13);
+ __DECL_REG_HI(12);
+ __DECL_REG_LO8(bp);
+ __DECL_REG_LOHI(b);
+ __DECL_REG_HI(11);
+ __DECL_REG_HI(10);
+ __DECL_REG_HI(9);
+ __DECL_REG_HI(8);
+ __DECL_REG_LOHI(a);
+ __DECL_REG_LOHI(c);
+ __DECL_REG_LOHI(d);
+ __DECL_REG_LO8(si);
+ __DECL_REG_LO8(di);
+ uint32_t error_code; /* private */
+ uint32_t entry_vector; /* private */
+ __DECL_REG_LO16(ip);
+ uint16_t cs, _pad0[1];
+ uint8_t saved_upcall_mask;
+ uint8_t _pad1[3];
+ __DECL_REG_LO16(flags); /* rflags.IF == !saved_upcall_mask */
+ __DECL_REG_LO8(sp);
+ uint16_t ss, _pad2[3];
+ uint16_t es, _pad3[3];
+ uint16_t ds, _pad4[3];
+ uint16_t fs, _pad5[3];
+ uint16_t gs, _pad6[3];
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+#undef __DECL_REG
+#undef __DECL_REG_LOHI
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+#undef __DECL_REG_HI
+
+#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
+#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+typedef unsigned long xen_callback_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/arch-x86/xen.h b/include/hw/xen/interface/arch-x86/xen.h
new file mode 100644
index 0000000000..c0f4551247
--- /dev/null
+++ b/include/hw/xen/interface/arch-x86/xen.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * arch-x86/xen.h
+ *
+ * Guest OS interface to x86 Xen.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
+ */
+
+#include "../xen.h"
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_H__
+
+/* Structural guest handles introduced in 0x00030201. */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030201
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory.
+ * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an
+ * hypercall argument.
+ * XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but
+ * they might not be on other architectures.
+ */
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
+ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
+#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
+#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
+
+#if defined(__i386__)
+# ifdef __XEN__
+__DeFiNe__ __DECL_REG_LO8(which) uint32_t e ## which ## x
+__DeFiNe__ __DECL_REG_LO16(name) union { uint32_t e ## name; }
+# endif
+#include "xen-x86_32.h"
+# ifdef __XEN__
+__UnDeF__ __DECL_REG_LO8
+__UnDeF__ __DECL_REG_LO16
+__DeFiNe__ __DECL_REG_LO8(which) e ## which ## x
+__DeFiNe__ __DECL_REG_LO16(name) e ## name
+# endif
+#elif defined(__x86_64__)
+#include "xen-x86_64.h"
+#endif
+
+#ifndef __ASSEMBLY__
+typedef unsigned long xen_pfn_t;
+#define PRI_xen_pfn "lx"
+#define PRIu_xen_pfn "lu"
+#endif
+
+#define XEN_HAVE_PV_GUEST_ENTRY 1
+
+#define XEN_HAVE_PV_UPCALL_MASK 1
+
+/*
+ * `incontents 200 segdesc Segment Descriptor Tables
+ */
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_gdt(const xen_pfn_t frames[], unsigned int entries);
+ * `
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way, at the far end of the GDT.
+ *
+ * NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op
+ */
+#define FIRST_RESERVED_GDT_PAGE 14
+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
+
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_descriptor(u64 pa, u64 desc);
+ * `
+ * ` @pa The machine physical address of the descriptor to
+ * ` update. Must be either a descriptor page or writable.
+ * ` @desc The descriptor value to update, in the same format as a
+ * ` native descriptor table entry.
+ */
+
+/* Maximum number of virtual CPUs in legacy multi-processor guests. */
+#define XEN_LEGACY_MAX_VCPUS 32
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long xen_ulong_t;
+#define PRI_xen_ulong "lx"
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp);
+ * `
+ * Sets the stack segment and pointer for the current vcpu.
+ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_trap_table(const struct trap_info traps[]);
+ * `
+ */
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table().
+ * Terminate the array with a sentinel entry, with traps[].address==0.
+ * The privilege level specifies which modes may enter a trap via a software
+ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
+ * privilege levels as follows:
+ * Level == 0: Noone may enter
+ * Level == 1: Kernel may enter
+ * Level == 2: Kernel may enter
+ * Level == 3: Everyone may enter
+ *
+ * Note: For compatibility with kernels not setting up exception handlers
+ * early enough, Xen will avoid trying to inject #GP (and hence crash
+ * the domain) when an RDMSR would require this, but no handler was
+ * set yet. The precise conditions are implementation specific, and
+ * new code may not rely on such behavior anyway.
+ */
+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
+struct trap_info {
+ uint8_t vector; /* exception vector */
+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
+ uint16_t cs; /* code selector */
+ unsigned long address; /* code offset */
+};
+typedef struct trap_info trap_info_t;
+DEFINE_XEN_GUEST_HANDLE(trap_info_t);
+
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
+
+/*
+ * The following is all CPU context. Note that the fpu_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ *
+ * Also note that when calling DOMCTL_setvcpucontext for HVM guests, not all
+ * information in this structure is updated, the fields read include: fpu_ctxt
+ * (if VGCT_I387_VALID is set), flags, user_regs and debugreg[*].
+ *
+ * Note: VCPUOP_initialise for HVM guests is non-symetric with
+ * DOMCTL_setvcpucontext, and uses struct vcpu_hvm_context from hvm/hvm_vcpu.h
+ */
+struct vcpu_guest_context {
+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_IN_KERNEL (1<<2)
+#define _VGCF_i387_valid 0
+#define VGCF_i387_valid (1<<_VGCF_i387_valid)
+#define _VGCF_in_kernel 2
+#define VGCF_in_kernel (1<<_VGCF_in_kernel)
+#define _VGCF_failsafe_disables_events 3
+#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
+#define _VGCF_syscall_disables_events 4
+#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
+#define _VGCF_online 5
+#define VGCF_online (1<<_VGCF_online)
+ unsigned long flags; /* VGCF_* flags */
+ struct cpu_user_regs user_regs; /* User-level CPU registers */
+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
+ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
+#ifdef __i386__
+ unsigned long event_callback_cs; /* CS:EIP of event callback */
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
+ unsigned long failsafe_callback_eip;
+#else
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_eip;
+#ifdef __XEN__
+ union {
+ unsigned long syscall_callback_eip;
+ struct {
+ unsigned int event_callback_cs; /* compat CS of event cb */
+ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */
+ };
+ };
+#else
+ unsigned long syscall_callback_eip;
+#endif
+#endif
+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
+#ifdef __x86_64__
+ /* Segment base addresses. */
+ uint64_t fs_base;
+ uint64_t gs_base_kernel;
+ uint64_t gs_base_user;
+#endif
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+struct arch_shared_info {
+ /*
+ * Number of valid entries in the p2m table(s) anchored at
+ * pfn_to_mfn_frame_list_list and/or p2m_vaddr.
+ */
+ unsigned long max_pfn;
+ /*
+ * Frame containing list of mfns containing list of mfns containing p2m.
+ * A value of 0 indicates it has not yet been set up, ~0 indicates it has
+ * been set to invalid e.g. due to the p2m being too large for the 3-level
+ * p2m tree. In this case the linear mapper p2m list anchored at p2m_vaddr
+ * is to be used.
+ */
+ xen_pfn_t pfn_to_mfn_frame_list_list;
+ unsigned long nmi_reason;
+ /*
+ * Following three fields are valid if p2m_cr3 contains a value different
+ * from 0.
+ * p2m_cr3 is the root of the address space where p2m_vaddr is valid.
+ * p2m_cr3 is in the same format as a cr3 value in the vcpu register state
+ * and holds the folded machine frame number (via xen_pfn_to_cr3) of a
+ * L3 or L4 page table.
+ * p2m_vaddr holds the virtual address of the linear p2m list. All entries
+ * in the range [0...max_pfn[ are accessible via this pointer.
+ * p2m_generation will be incremented by the guest before and after each
+ * change of the mappings of the p2m list. p2m_generation starts at 0 and
+ * a value with the least significant bit set indicates that a mapping
+ * update is in progress. This allows guest external software (e.g. in Dom0)
+ * to verify that read mappings are consistent and whether they have changed
+ * since the last check.
+ * Modifying a p2m element in the linear p2m list is allowed via an atomic
+ * write only.
+ */
+ unsigned long p2m_cr3; /* cr3 value of the p2m address space */
+ unsigned long p2m_vaddr; /* virtual address of the p2m list */
+ unsigned long p2m_generation; /* generation count of p2m mapping */
+#ifdef __i386__
+ /* There's no room for this field in the generic structure. */
+ uint32_t wc_sec_hi;
+#endif
+};
+typedef struct arch_shared_info arch_shared_info_t;
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/*
+ * struct xen_arch_domainconfig's ABI is covered by
+ * XEN_DOMCTL_INTERFACE_VERSION.
+ */
+struct xen_arch_domainconfig {
+#define _XEN_X86_EMU_LAPIC 0
+#define XEN_X86_EMU_LAPIC (1U<<_XEN_X86_EMU_LAPIC)
+#define _XEN_X86_EMU_HPET 1
+#define XEN_X86_EMU_HPET (1U<<_XEN_X86_EMU_HPET)
+#define _XEN_X86_EMU_PM 2
+#define XEN_X86_EMU_PM (1U<<_XEN_X86_EMU_PM)
+#define _XEN_X86_EMU_RTC 3
+#define XEN_X86_EMU_RTC (1U<<_XEN_X86_EMU_RTC)
+#define _XEN_X86_EMU_IOAPIC 4
+#define XEN_X86_EMU_IOAPIC (1U<<_XEN_X86_EMU_IOAPIC)
+#define _XEN_X86_EMU_PIC 5
+#define XEN_X86_EMU_PIC (1U<<_XEN_X86_EMU_PIC)
+#define _XEN_X86_EMU_VGA 6
+#define XEN_X86_EMU_VGA (1U<<_XEN_X86_EMU_VGA)
+#define _XEN_X86_EMU_IOMMU 7
+#define XEN_X86_EMU_IOMMU (1U<<_XEN_X86_EMU_IOMMU)
+#define _XEN_X86_EMU_PIT 8
+#define XEN_X86_EMU_PIT (1U<<_XEN_X86_EMU_PIT)
+#define _XEN_X86_EMU_USE_PIRQ 9
+#define XEN_X86_EMU_USE_PIRQ (1U<<_XEN_X86_EMU_USE_PIRQ)
+#define _XEN_X86_EMU_VPCI 10
+#define XEN_X86_EMU_VPCI (1U<<_XEN_X86_EMU_VPCI)
+
+#define XEN_X86_EMU_ALL (XEN_X86_EMU_LAPIC | XEN_X86_EMU_HPET | \
+ XEN_X86_EMU_PM | XEN_X86_EMU_RTC | \
+ XEN_X86_EMU_IOAPIC | XEN_X86_EMU_PIC | \
+ XEN_X86_EMU_VGA | XEN_X86_EMU_IOMMU | \
+ XEN_X86_EMU_PIT | XEN_X86_EMU_USE_PIRQ |\
+ XEN_X86_EMU_VPCI)
+ uint32_t emulation_flags;
+
+/*
+ * Select whether to use a relaxed behavior for accesses to MSRs not explicitly
+ * handled by Xen instead of injecting a #GP to the guest. Note this option
+ * doesn't allow the guest to read or write to the underlying MSR.
+ */
+#define XEN_X86_MSR_RELAXED (1u << 0)
+ uint32_t misc_flags;
+};
+
+/* Max XEN_X86_* constant. Used for ABI checking. */
+#define XEN_X86_MISC_FLAGS_MAX XEN_X86_MSR_RELAXED
+
+#endif
+
+/*
+ * Representations of architectural CPUID and MSR information. Used as the
+ * serialised version of Xen's internal representation.
+ */
+typedef struct xen_cpuid_leaf {
+#define XEN_CPUID_NO_SUBLEAF 0xffffffffu
+ uint32_t leaf, subleaf;
+ uint32_t a, b, c, d;
+} xen_cpuid_leaf_t;
+DEFINE_XEN_GUEST_HANDLE(xen_cpuid_leaf_t);
+
+typedef struct xen_msr_entry {
+ uint32_t idx;
+ uint32_t flags; /* Reserved MBZ. */
+ uint64_t val;
+} xen_msr_entry_t;
+DEFINE_XEN_GUEST_HANDLE(xen_msr_entry_t);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_fpu_taskswitch(int set);
+ * `
+ * Sets (if set!=0) or clears (if set==0) CR0.TS.
+ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_debugreg(int regno, unsigned long value);
+ *
+ * ` unsigned long
+ * ` HYPERVISOR_get_debugreg(int regno);
+ * For 0<=reg<=7, returns the debug register value.
+ * For other values of reg, returns ((unsigned long)-EINVAL).
+ * (Unfortunately, this interface is defective.)
+ */
+
+/*
+ * Prefix forces emulation of some non-trapping instructions.
+ * Currently only CPUID.
+ */
+#ifdef __ASSEMBLY__
+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
+#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
+#else
+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
+#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
+#endif
+
+/*
+ * Debug console IO port, also called "port E9 hack". Each character written
+ * to this IO port will be printed on the hypervisor console, subject to log
+ * level restrictions.
+ */
+#define XEN_HVM_DEBUGCONS_IOPORT 0xe9
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/event_channel.h b/include/hw/xen/interface/event_channel.h
new file mode 100644
index 0000000000..0d91a1c4af
--- /dev/null
+++ b/include/hw/xen/interface/event_channel.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * event_channel.h
+ *
+ * Event channels between domains.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ */
+
+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
+
+#include "xen.h"
+
+/*
+ * `incontents 150 evtchn Event Channels
+ *
+ * Event channels are the basic primitive provided by Xen for event
+ * notifications. An event is the Xen equivalent of a hardware
+ * interrupt. They essentially store one bit of information, the event
+ * of interest is signalled by transitioning this bit from 0 to 1.
+ *
+ * Notifications are received by a guest via an upcall from Xen,
+ * indicating when an event arrives (setting the bit). Further
+ * notifications are masked until the bit is cleared again (therefore,
+ * guests must check the value of the bit after re-enabling event
+ * delivery to ensure no missed notifications).
+ *
+ * Event notifications can be masked by setting a flag; this is
+ * equivalent to disabling interrupts and can be used to ensure
+ * atomicity of certain operations in the guest kernel.
+ *
+ * Event channels are represented by the evtchn_* fields in
+ * struct shared_info and struct vcpu_info.
+ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_event_channel_op(enum event_channel_op cmd, void *args)
+ * `
+ * @cmd == EVTCHNOP_* (event-channel operation).
+ * @args == struct evtchn_* Operation-specific extra arguments (NULL if none).
+ */
+
+/* ` enum event_channel_op { // EVTCHNOP_* => struct evtchn_* */
+#define EVTCHNOP_bind_interdomain 0
+#define EVTCHNOP_bind_virq 1
+#define EVTCHNOP_bind_pirq 2
+#define EVTCHNOP_close 3
+#define EVTCHNOP_send 4
+#define EVTCHNOP_status 5
+#define EVTCHNOP_alloc_unbound 6
+#define EVTCHNOP_bind_ipi 7
+#define EVTCHNOP_bind_vcpu 8
+#define EVTCHNOP_unmask 9
+#define EVTCHNOP_reset 10
+#define EVTCHNOP_init_control 11
+#define EVTCHNOP_expand_array 12
+#define EVTCHNOP_set_priority 13
+#ifdef __XEN__
+#define EVTCHNOP_reset_cont 14
+#endif
+/* ` } */
+
+typedef uint32_t evtchn_port_t;
+DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
+
+/*
+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
+ * is allocated in <dom> and returned as <port>.
+ * NOTES:
+ * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
+ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
+ */
+struct evtchn_alloc_unbound {
+ /* IN parameters */
+ domid_t dom, remote_dom;
+ /* OUT parameters */
+ evtchn_port_t port;
+};
+typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
+
+/*
+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
+ * a port that is unbound and marked as accepting bindings from the calling
+ * domain. A fresh port is allocated in the calling domain and returned as
+ * <local_port>.
+ *
+ * In case the peer domain has already tried to set our event channel
+ * pending, before it was bound, EVTCHNOP_bind_interdomain always sets
+ * the local event channel pending.
+ *
+ * The usual pattern of use, in the guest's upcall (or subsequent
+ * handler) is as follows: (Re-enable the event channel for subsequent
+ * signalling and then) check for the existence of whatever condition
+ * is being waited for by other means, and take whatever action is
+ * needed (if any).
+ *
+ * NOTES:
+ * 1. <remote_dom> may be DOMID_SELF, allowing loopback connections.
+ */
+struct evtchn_bind_interdomain {
+ /* IN parameters. */
+ domid_t remote_dom;
+ evtchn_port_t remote_port;
+ /* OUT parameters. */
+ evtchn_port_t local_port;
+};
+typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
+
+/*
+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
+ * vcpu.
+ * NOTES:
+ * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
+ * in xen.h for the classification of each VIRQ.
+ * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
+ * re-bound via EVTCHNOP_bind_vcpu.
+ * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
+ * The allocated event channel is bound to the specified vcpu and the
+ * binding cannot be changed.
+ */
+struct evtchn_bind_virq {
+ /* IN parameters. */
+ uint32_t virq; /* enum virq */
+ uint32_t vcpu;
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_virq evtchn_bind_virq_t;
+
+/*
+ * EVTCHNOP_bind_pirq: Bind a local event channel to a real IRQ (PIRQ <irq>).
+ * NOTES:
+ * 1. A physical IRQ may be bound to at most one event channel per domain.
+ * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
+ */
+struct evtchn_bind_pirq {
+ /* IN parameters. */
+ uint32_t pirq;
+#define BIND_PIRQ__WILL_SHARE 1
+ uint32_t flags; /* BIND_PIRQ__* */
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
+
+/*
+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
+ * NOTES:
+ * 1. The allocated event channel is bound to the specified vcpu. The binding
+ * may not be changed.
+ */
+struct evtchn_bind_ipi {
+ uint32_t vcpu;
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
+
+/*
+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
+ * interdomain then the remote end is placed in the unbound state
+ * (EVTCHNSTAT_unbound), awaiting a new connection.
+ */
+struct evtchn_close {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_close evtchn_close_t;
+
+/*
+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
+ * endpoint is <port>.
+ */
+struct evtchn_send {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_send evtchn_send_t;
+
+/*
+ * EVTCHNOP_status: Get the current status of the communication channel which
+ * has an endpoint at <dom, port>.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may obtain the status of an event
+ * channel for which <dom> is not DOMID_SELF.
+ */
+struct evtchn_status {
+ /* IN parameters */
+ domid_t dom;
+ evtchn_port_t port;
+ /* OUT parameters */
+#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
+#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
+#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
+#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
+#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
+#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
+ uint32_t status;
+ uint32_t vcpu; /* VCPU to which this channel is bound. */
+ union {
+ struct {
+ domid_t dom;
+ } unbound; /* EVTCHNSTAT_unbound */
+ struct {
+ domid_t dom;
+ evtchn_port_t port;
+ } interdomain; /* EVTCHNSTAT_interdomain */
+ uint32_t pirq; /* EVTCHNSTAT_pirq */
+ uint32_t virq; /* EVTCHNSTAT_virq */
+ } u;
+};
+typedef struct evtchn_status evtchn_status_t;
+
+/*
+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
+ * event is pending.
+ * NOTES:
+ * 1. IPI-bound channels always notify the vcpu specified at bind time.
+ * This binding cannot be changed.
+ * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
+ * This binding cannot be changed.
+ * 3. All other channels notify vcpu0 by default. This default is set when
+ * the channel is allocated (a port that is freed and subsequently reused
+ * has its binding reset to vcpu0).
+ */
+struct evtchn_bind_vcpu {
+ /* IN parameters. */
+ evtchn_port_t port;
+ uint32_t vcpu;
+};
+typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
+
+/*
+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
+ * a notification to the appropriate VCPU if an event is pending.
+ */
+struct evtchn_unmask {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_unmask evtchn_unmask_t;
+
+/*
+ * EVTCHNOP_reset: Close all event channels associated with specified domain.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
+ * 3. Destroys all control blocks and event array, resets event channel
+ * operations to 2-level ABI if called with <dom> == DOMID_SELF and FIFO
+ * ABI was used. Guests should not bind events during EVTCHNOP_reset call
+ * as these events are likely to be lost.
+ */
+struct evtchn_reset {
+ /* IN parameters. */
+ domid_t dom;
+};
+typedef struct evtchn_reset evtchn_reset_t;
+
+/*
+ * EVTCHNOP_init_control: initialize the control block for the FIFO ABI.
+ *
+ * Note: any events that are currently pending will not be resent and
+ * will be lost. Guests should call this before binding any event to
+ * avoid losing any events.
+ */
+struct evtchn_init_control {
+ /* IN parameters. */
+ uint64_t control_gfn;
+ uint32_t offset;
+ uint32_t vcpu;
+ /* OUT parameters. */
+ uint8_t link_bits;
+ uint8_t _pad[7];
+};
+typedef struct evtchn_init_control evtchn_init_control_t;
+
+/*
+ * EVTCHNOP_expand_array: add an additional page to the event array.
+ */
+struct evtchn_expand_array {
+ /* IN parameters. */
+ uint64_t array_gfn;
+};
+typedef struct evtchn_expand_array evtchn_expand_array_t;
+
+/*
+ * EVTCHNOP_set_priority: set the priority for an event channel.
+ */
+struct evtchn_set_priority {
+ /* IN parameters. */
+ evtchn_port_t port;
+ uint32_t priority;
+};
+typedef struct evtchn_set_priority evtchn_set_priority_t;
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op)
+ * `
+ * Superceded by new event_channel_op() hypercall since 0x00030202.
+ */
+struct evtchn_op {
+ uint32_t cmd; /* enum event_channel_op */
+ union {
+ evtchn_alloc_unbound_t alloc_unbound;
+ evtchn_bind_interdomain_t bind_interdomain;
+ evtchn_bind_virq_t bind_virq;
+ evtchn_bind_pirq_t bind_pirq;
+ evtchn_bind_ipi_t bind_ipi;
+ evtchn_close_t close;
+ evtchn_send_t send;
+ evtchn_status_t status;
+ evtchn_bind_vcpu_t bind_vcpu;
+ evtchn_unmask_t unmask;
+ } u;
+};
+typedef struct evtchn_op evtchn_op_t;
+DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
+
+/*
+ * 2-level ABI
+ */
+
+#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
+
+/*
+ * FIFO ABI
+ */
+
+/* Events may have priorities from 0 (highest) to 15 (lowest). */
+#define EVTCHN_FIFO_PRIORITY_MAX 0
+#define EVTCHN_FIFO_PRIORITY_DEFAULT 7
+#define EVTCHN_FIFO_PRIORITY_MIN 15
+
+#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1)
+
+typedef uint32_t event_word_t;
+
+#define EVTCHN_FIFO_PENDING 31
+#define EVTCHN_FIFO_MASKED 30
+#define EVTCHN_FIFO_LINKED 29
+#define EVTCHN_FIFO_BUSY 28
+
+#define EVTCHN_FIFO_LINK_BITS 17
+#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1)
+
+#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS)
+
+struct evtchn_fifo_control_block {
+ uint32_t ready;
+ uint32_t _rsvd;
+ uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
+};
+typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t;
+
+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/features.h b/include/hw/xen/interface/features.h
new file mode 100644
index 0000000000..d2a9175aae
--- /dev/null
+++ b/include/hw/xen/interface/features.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * features.h
+ *
+ * Feature flags, reported by XENVER_get_features.
+ *
+ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_FEATURES_H__
+#define __XEN_PUBLIC_FEATURES_H__
+
+/*
+ * `incontents 200 elfnotes_features XEN_ELFNOTE_FEATURES
+ *
+ * The list of all the features the guest supports. They are set by
+ * parsing the XEN_ELFNOTE_FEATURES and XEN_ELFNOTE_SUPPORTED_FEATURES
+ * string. The format is the feature names (as given here without the
+ * "XENFEAT_" prefix) separated by '|' characters.
+ * If a feature is required for the kernel to function then the feature name
+ * must be preceded by a '!' character.
+ *
+ * Note that if XEN_ELFNOTE_SUPPORTED_FEATURES is used, then in the
+ * XENFEAT_dom0 MUST be set if the guest is to be booted as dom0,
+ */
+
+/*
+ * If set, the guest does not need to write-protect its pagetables, and can
+ * update them via direct writes.
+ */
+#define XENFEAT_writable_page_tables 0
+
+/*
+ * If set, the guest does not need to write-protect its segment descriptor
+ * tables, and can update them via direct writes.
+ */
+#define XENFEAT_writable_descriptor_tables 1
+
+/*
+ * If set, translation between the guest's 'pseudo-physical' address space
+ * and the host's machine address space are handled by the hypervisor. In this
+ * mode the guest does not need to perform phys-to/from-machine translations
+ * when performing page table operations.
+ */
+#define XENFEAT_auto_translated_physmap 2
+
+/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
+#define XENFEAT_supervisor_mode_kernel 3
+
+/*
+ * If set, the guest does not need to allocate x86 PAE page directories
+ * below 4GB. This flag is usually implied by auto_translated_physmap.
+ */
+#define XENFEAT_pae_pgdir_above_4gb 4
+
+/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
+#define XENFEAT_mmu_pt_update_preserve_ad 5
+
+/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
+#define XENFEAT_highmem_assist 6
+
+/*
+ * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
+ * available pte bits.
+ */
+#define XENFEAT_gnttab_map_avail_bits 7
+
+/* x86: Does this Xen host support the HVM callback vector type? */
+#define XENFEAT_hvm_callback_vector 8
+
+/* x86: pvclock algorithm is safe to use on HVM */
+#define XENFEAT_hvm_safe_pvclock 9
+
+/* x86: pirq can be used by HVM guests */
+#define XENFEAT_hvm_pirqs 10
+
+/* operation as Dom0 is supported */
+#define XENFEAT_dom0 11
+
+/* Xen also maps grant references at pfn = mfn.
+ * This feature flag is deprecated and should not be used.
+#define XENFEAT_grant_map_identity 12
+ */
+
+/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */
+#define XENFEAT_memory_op_vnode_supported 13
+
+/* arm: Hypervisor supports ARM SMC calling convention. */
+#define XENFEAT_ARM_SMCCC_supported 14
+
+/*
+ * x86/PVH: If set, ACPI RSDP can be placed at any address. Otherwise RSDP
+ * must be located in lower 1MB, as required by ACPI Specification for IA-PC
+ * systems.
+ * This feature flag is only consulted if XEN_ELFNOTE_GUEST_OS contains
+ * the "linux" string.
+ */
+#define XENFEAT_linux_rsdp_unrestricted 15
+
+/*
+ * A direct-mapped (or 1:1 mapped) domain is a domain for which its
+ * local pages have gfn == mfn. If a domain is direct-mapped,
+ * XENFEAT_direct_mapped is set; otherwise XENFEAT_not_direct_mapped
+ * is set.
+ *
+ * If neither flag is set (e.g. older Xen releases) the assumptions are:
+ * - not auto_translated domains (x86 only) are always direct-mapped
+ * - on x86, auto_translated domains are not direct-mapped
+ * - on ARM, Dom0 is direct-mapped, DomUs are not
+ */
+#define XENFEAT_not_direct_mapped 16
+#define XENFEAT_direct_mapped 17
+
+#define XENFEAT_NR_SUBMAPS 1
+
+#endif /* __XEN_PUBLIC_FEATURES_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/grant_table.h b/include/hw/xen/interface/grant_table.h
new file mode 100644
index 0000000000..1dfa17a6d0
--- /dev/null
+++ b/include/hw/xen/interface/grant_table.h
@@ -0,0 +1,669 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * grant_table.h
+ *
+ * Interface for granting foreign access to page frames, and receiving
+ * page-ownership transfers.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
+#define __XEN_PUBLIC_GRANT_TABLE_H__
+
+#include "xen.h"
+
+/*
+ * `incontents 150 gnttab Grant Tables
+ *
+ * Xen's grant tables provide a generic mechanism to memory sharing
+ * between domains. This shared memory interface underpins the split
+ * device drivers for block and network IO.
+ *
+ * Each domain has its own grant table. This is a data structure that
+ * is shared with Xen; it allows the domain to tell Xen what kind of
+ * permissions other domains have on its pages. Entries in the grant
+ * table are identified by grant references. A grant reference is an
+ * integer, which indexes into the grant table. It acts as a
+ * capability which the grantee can use to perform operations on the
+ * granter's memory.
+ *
+ * This capability-based system allows shared-memory communications
+ * between unprivileged domains. A grant reference also encapsulates
+ * the details of a shared page, removing the need for a domain to
+ * know the real machine address of a page it is sharing. This makes
+ * it possible to share memory correctly with domains running in
+ * fully virtualised memory.
+ */
+
+/***********************************
+ * GRANT TABLE REPRESENTATION
+ */
+
+/* Some rough guidelines on accessing and updating grant-table entries
+ * in a concurrency-safe manner. For more information, Linux contains a
+ * reference implementation for guest OSes (drivers/xen/grant_table.c, see
+ * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD
+ *
+ * NB. WMB is a no-op on current-generation x86 processors. However, a
+ * compiler barrier will still be required.
+ *
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ *
+ * Invalidating an unused GTF_permit_access entry:
+ * 1. flags = ent->flags.
+ * 2. Observe that !(flags & (GTF_reading|GTF_writing)).
+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ * NB. No need for WMB as reuse of entry is control-dependent on success of
+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ *
+ * Invalidating an in-use GTF_permit_access entry:
+ * This cannot be done directly. Request assistance from the domain controller
+ * which can set a timeout on the use of a grant entry and take necessary
+ * action. (NB. This is not yet implemented!).
+ *
+ * Invalidating an unused GTF_accept_transfer entry:
+ * 1. flags = ent->flags.
+ * 2. Observe that !(flags & GTF_transfer_committed). [*]
+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ * NB. No need for WMB as reuse of entry is control-dependent on success of
+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ * [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
+ * The guest must /not/ modify the grant entry until the address of the
+ * transferred frame is written. It is safe for the guest to spin waiting
+ * for this to occur (detect by observing GTF_transfer_completed in
+ * ent->flags).
+ *
+ * Invalidating a committed GTF_accept_transfer entry:
+ * 1. Wait for (ent->flags & GTF_transfer_completed).
+ *
+ * Changing a GTF_permit_access from writable to read-only:
+ * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
+ *
+ * Changing a GTF_permit_access from read-only to writable:
+ * Use SMP-safe bit-setting instruction.
+ */
+
+/*
+ * Reference to a grant entry in a specified domain's grant table.
+ */
+typedef uint32_t grant_ref_t;
+
+/*
+ * A grant table comprises a packed array of grant entries in one or more
+ * page frames shared between Xen and a guest.
+ * [XEN]: This field is written by Xen and read by the sharing guest.
+ * [GST]: This field is written by the guest and read by Xen.
+ */
+
+/*
+ * Version 1 of the grant table entry structure is maintained largely for
+ * backwards compatibility. New guests are recommended to support using
+ * version 2 to overcome version 1 limitations, but to default to version 1.
+ */
+#if __XEN_INTERFACE_VERSION__ < 0x0003020a
+#define grant_entry_v1 grant_entry
+#define grant_entry_v1_t grant_entry_t
+#endif
+struct grant_entry_v1 {
+ /* GTF_xxx: various type and flag information. [XEN,GST] */
+ uint16_t flags;
+ /* The domain being granted foreign privileges. [GST] */
+ domid_t domid;
+ /*
+ * GTF_permit_access: GFN that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST]
+ * GTF_transfer_completed: MFN whose ownership transferred by @domid
+ * (non-translated guests only). [XEN]
+ */
+ uint32_t frame;
+};
+typedef struct grant_entry_v1 grant_entry_v1_t;
+
+/* The first few grant table entries will be preserved across grant table
+ * version changes and may be pre-populated at domain creation by tools.
+ */
+#define GNTTAB_NR_RESERVED_ENTRIES 8
+#define GNTTAB_RESERVED_CONSOLE 0
+#define GNTTAB_RESERVED_XENSTORE 1
+
+/*
+ * Type of grant entry.
+ * GTF_invalid: This grant entry grants no privileges.
+ * GTF_permit_access: Allow @domid to map/access @frame.
+ * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
+ * to this guest. Xen writes the page number to @frame.
+ * GTF_transitive: Allow @domid to transitively access a subrange of
+ * @trans_grant in @trans_domid. No mappings are allowed.
+ */
+#define GTF_invalid (0U<<0)
+#define GTF_permit_access (1U<<0)
+#define GTF_accept_transfer (2U<<0)
+#define GTF_transitive (3U<<0)
+#define GTF_type_mask (3U<<0)
+
+/*
+ * Subflags for GTF_permit_access and GTF_transitive.
+ * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
+ * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
+ * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ * Further subflags for GTF_permit_access only.
+ * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags to be used for
+ * mappings of the grant [GST]
+ * GTF_sub_page: Grant access to only a subrange of the page. @domid
+ * will only be allowed to copy from the grant, and not
+ * map it. [GST]
+ */
+#define _GTF_readonly (2)
+#define GTF_readonly (1U<<_GTF_readonly)
+#define _GTF_reading (3)
+#define GTF_reading (1U<<_GTF_reading)
+#define _GTF_writing (4)
+#define GTF_writing (1U<<_GTF_writing)
+#define _GTF_PWT (5)
+#define GTF_PWT (1U<<_GTF_PWT)
+#define _GTF_PCD (6)
+#define GTF_PCD (1U<<_GTF_PCD)
+#define _GTF_PAT (7)
+#define GTF_PAT (1U<<_GTF_PAT)
+#define _GTF_sub_page (8)
+#define GTF_sub_page (1U<<_GTF_sub_page)
+
+/*
+ * Subflags for GTF_accept_transfer:
+ * GTF_transfer_committed: Xen sets this flag to indicate that it is committed
+ * to transferring ownership of a page frame. When a guest sees this flag
+ * it must /not/ modify the grant entry until GTF_transfer_completed is
+ * set by Xen.
+ * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
+ * after reading GTF_transfer_committed. Xen will always write the frame
+ * address, followed by ORing this flag, in a timely manner.
+ */
+#define _GTF_transfer_committed (2)
+#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
+#define _GTF_transfer_completed (3)
+#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
+
+/*
+ * Version 2 grant table entries. These fulfil the same role as
+ * version 1 entries, but can represent more complicated operations.
+ * Any given domain will have either a version 1 or a version 2 table,
+ * and every entry in the table will be the same version.
+ *
+ * The interface by which domains use grant references does not depend
+ * on the grant table version in use by the other domain.
+ */
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+/*
+ * Version 1 and version 2 grant entries share a common prefix. The
+ * fields of the prefix are documented as part of struct
+ * grant_entry_v1.
+ */
+struct grant_entry_header {
+ uint16_t flags;
+ domid_t domid;
+};
+typedef struct grant_entry_header grant_entry_header_t;
+
+/*
+ * Version 2 of the grant entry structure.
+ */
+union grant_entry_v2 {
+ grant_entry_header_t hdr;
+
+ /*
+ * This member is used for V1-style full page grants, where either:
+ *
+ * -- hdr.type is GTF_accept_transfer, or
+ * -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
+ *
+ * In that case, the frame field has the same semantics as the
+ * field of the same name in the V1 entry structure.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ uint32_t pad0;
+ uint64_t frame;
+ } full_page;
+
+ /*
+ * If the grant type is GTF_grant_access and GTF_sub_page is set,
+ * @domid is allowed to access bytes [@page_off,@page_off+@length)
+ * in frame @frame.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ uint16_t page_off;
+ uint16_t length;
+ uint64_t frame;
+ } sub_page;
+
+ /*
+ * If the grant is GTF_transitive, @domid is allowed to use the
+ * grant @gref in domain @trans_domid, as if it was the local
+ * domain. Obviously, the transitive access must be compatible
+ * with the original grant.
+ *
+ * The current version of Xen does not allow transitive grants
+ * to be mapped.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ domid_t trans_domid;
+ uint16_t pad0;
+ grant_ref_t gref;
+ } transitive;
+
+ uint32_t __spacer[4]; /* Pad to a power of two */
+};
+typedef union grant_entry_v2 grant_entry_v2_t;
+
+typedef uint16_t grant_status_t;
+
+#endif /* __XEN_INTERFACE_VERSION__ */
+
+/***********************************
+ * GRANT TABLE QUERIES AND USES
+ */
+
+/* ` enum neg_errnoval
+ * ` HYPERVISOR_grant_table_op(enum grant_table_op cmd,
+ * ` void *args,
+ * ` unsigned int count)
+ * `
+ *
+ * @args points to an array of a per-command data structure. The array
+ * has @count members
+ */
+
+/* ` enum grant_table_op { // GNTTABOP_* => struct gnttab_* */
+#define GNTTABOP_map_grant_ref 0
+#define GNTTABOP_unmap_grant_ref 1
+#define GNTTABOP_setup_table 2
+#define GNTTABOP_dump_table 3
+#define GNTTABOP_transfer 4
+#define GNTTABOP_copy 5
+#define GNTTABOP_query_size 6
+#define GNTTABOP_unmap_and_replace 7
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+#define GNTTABOP_set_version 8
+#define GNTTABOP_get_status_frames 9
+#define GNTTABOP_get_version 10
+#define GNTTABOP_swap_grant_ref 11
+#define GNTTABOP_cache_flush 12
+#endif /* __XEN_INTERFACE_VERSION__ */
+/* ` } */
+
+/*
+ * Handle to track a mapping created via a grant reference.
+ */
+typedef uint32_t grant_handle_t;
+
+/*
+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
+ * that must be presented later to destroy the mapping(s). On error, <status>
+ * is a negative status code.
+ * NOTES:
+ * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
+ * via which I/O devices may access the granted frame.
+ * 2. If GNTMAP_host_map is specified then a mapping will be added at
+ * either a host virtual address in the current address space, or at
+ * a PTE at the specified machine address. The type of mapping to
+ * perform is selected through the GNTMAP_contains_pte flag, and the
+ * address is specified in <host_addr>.
+ * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
+ * host mapping is destroyed by other means then it is *NOT* guaranteed
+ * to be accounted to the correct grant reference!
+ */
+struct gnttab_map_grant_ref {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint32_t flags; /* GNTMAP_* */
+ grant_ref_t ref;
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+ grant_handle_t handle;
+ uint64_t dev_bus_addr;
+};
+typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
+
+/*
+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
+ * field is ignored. If non-zero, they must refer to a device/host mapping
+ * that is tracked by <handle>
+ * NOTES:
+ * 1. The call may fail in an undefined manner if either mapping is not
+ * tracked by <handle>.
+ * 3. After executing a batch of unmaps, it is guaranteed that no stale
+ * mappings will remain in the device or host TLBs.
+ */
+struct gnttab_unmap_grant_ref {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint64_t dev_bus_addr;
+ grant_handle_t handle;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
+
+/*
+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
+ * Only <nr_frames> addresses are written, even if the table is larger.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ * 3. Xen may not support more than a single grant-table page per domain.
+ */
+struct gnttab_setup_table {
+ /* IN parameters. */
+ domid_t dom;
+ uint32_t nr_frames;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
+ XEN_GUEST_HANDLE(ulong) frame_list;
+#else
+ XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
+#endif
+};
+typedef struct gnttab_setup_table gnttab_setup_table_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
+
+/*
+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
+ * xen console. Debugging use only.
+ */
+struct gnttab_dump_table {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_dump_table gnttab_dump_table_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
+
+/*
+ * GNTTABOP_transfer: Transfer <frame> to a foreign domain. The foreign domain
+ * has previously registered its interest in the transfer via <domid, ref>.
+ *
+ * Note that, even if the transfer fails, the specified page no longer belongs
+ * to the calling domain *unless* the error is GNTST_bad_page.
+ *
+ * Note further that only PV guests can use this operation.
+ */
+struct gnttab_transfer {
+ /* IN parameters. */
+ xen_pfn_t mfn;
+ domid_t domid;
+ grant_ref_t ref;
+ /* OUT parameters. */
+ int16_t status;
+};
+typedef struct gnttab_transfer gnttab_transfer_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
+
+
+/*
+ * GNTTABOP_copy: Hypervisor based copy
+ * source and destinations can be eithers MFNs or, for foreign domains,
+ * grant references. the foreign domain has to grant read/write access
+ * in its grant table.
+ *
+ * The flags specify what type source and destinations are (either MFN
+ * or grant reference).
+ *
+ * Note that this can also be used to copy data between two domains
+ * via a third party if the source and destination domains had previously
+ * grant appropriate access to their pages to the third party.
+ *
+ * source_offset specifies an offset in the source frame, dest_offset
+ * the offset in the target frame and len specifies the number of
+ * bytes to be copied.
+ */
+
+#define _GNTCOPY_source_gref (0)
+#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
+#define _GNTCOPY_dest_gref (1)
+#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
+
+struct gnttab_copy {
+ /* IN parameters. */
+ struct gnttab_copy_ptr {
+ union {
+ grant_ref_t ref;
+ xen_pfn_t gmfn;
+ } u;
+ domid_t domid;
+ uint16_t offset;
+ } source, dest;
+ uint16_t len;
+ uint16_t flags; /* GNTCOPY_* */
+ /* OUT parameters. */
+ int16_t status;
+};
+typedef struct gnttab_copy gnttab_copy_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
+
+/*
+ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
+ * grant table.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+struct gnttab_query_size {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ uint32_t nr_frames;
+ uint32_t max_nr_frames;
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_query_size gnttab_query_size_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
+
+/*
+ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
+ * tracked by <handle> but atomically replace the page table entry with one
+ * pointing to the machine address under <new_addr>. <new_addr> will be
+ * redirected to the null entry.
+ * NOTES:
+ * 1. The call may fail in an undefined manner if either mapping is not
+ * tracked by <handle>.
+ * 2. After executing a batch of unmaps, it is guaranteed that no stale
+ * mappings will remain in the device or host TLBs.
+ */
+struct gnttab_unmap_and_replace {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint64_t new_addr;
+ grant_handle_t handle;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
+
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+/*
+ * GNTTABOP_set_version: Request a particular version of the grant
+ * table shared table structure. This operation may be used to toggle
+ * between different versions, but must be performed while no grants
+ * are active. The only defined versions are 1 and 2.
+ */
+struct gnttab_set_version {
+ /* IN/OUT parameters */
+ uint32_t version;
+};
+typedef struct gnttab_set_version gnttab_set_version_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t);
+
+
+/*
+ * GNTTABOP_get_status_frames: Get the list of frames used to store grant
+ * status for <dom>. In grant format version 2, the status is separated
+ * from the other shared grant fields to allow more efficient synchronization
+ * using barriers instead of atomic cmpexch operations.
+ * <nr_frames> specify the size of vector <frame_list>.
+ * The frame addresses are returned in the <frame_list>.
+ * Only <nr_frames> addresses are returned, even if the table is larger.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+struct gnttab_get_status_frames {
+ /* IN parameters. */
+ uint32_t nr_frames;
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+ XEN_GUEST_HANDLE(uint64_t) frame_list;
+};
+typedef struct gnttab_get_status_frames gnttab_get_status_frames_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_get_status_frames_t);
+
+/*
+ * GNTTABOP_get_version: Get the grant table version which is in
+ * effect for domain <dom>.
+ */
+struct gnttab_get_version {
+ /* IN parameters */
+ domid_t dom;
+ uint16_t pad;
+ /* OUT parameters */
+ uint32_t version;
+};
+typedef struct gnttab_get_version gnttab_get_version_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_get_version_t);
+
+/*
+ * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries.
+ */
+struct gnttab_swap_grant_ref {
+ /* IN parameters */
+ grant_ref_t ref_a;
+ grant_ref_t ref_b;
+ /* OUT parameters */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t);
+
+/*
+ * Issue one or more cache maintenance operations on a portion of a
+ * page granted to the calling domain by a foreign domain.
+ */
+struct gnttab_cache_flush {
+ union {
+ uint64_t dev_bus_addr;
+ grant_ref_t ref;
+ } a;
+ uint16_t offset; /* offset from start of grant */
+ uint16_t length; /* size within the grant */
+#define GNTTAB_CACHE_CLEAN (1u<<0)
+#define GNTTAB_CACHE_INVAL (1u<<1)
+#define GNTTAB_CACHE_SOURCE_GREF (1u<<31)
+ uint32_t op;
+};
+typedef struct gnttab_cache_flush gnttab_cache_flush_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_cache_flush_t);
+
+#endif /* __XEN_INTERFACE_VERSION__ */
+
+/*
+ * Bitfield values for gnttab_map_grant_ref.flags.
+ */
+ /* Map the grant entry for access by I/O devices. */
+#define _GNTMAP_device_map (0)
+#define GNTMAP_device_map (1<<_GNTMAP_device_map)
+ /* Map the grant entry for access by host CPUs. */
+#define _GNTMAP_host_map (1)
+#define GNTMAP_host_map (1<<_GNTMAP_host_map)
+ /* Accesses to the granted frame will be restricted to read-only access. */
+#define _GNTMAP_readonly (2)
+#define GNTMAP_readonly (1<<_GNTMAP_readonly)
+ /*
+ * GNTMAP_host_map subflag:
+ * 0 => The host mapping is usable only by the guest OS.
+ * 1 => The host mapping is usable by guest OS + current application.
+ */
+#define _GNTMAP_application_map (3)
+#define GNTMAP_application_map (1<<_GNTMAP_application_map)
+
+ /*
+ * GNTMAP_contains_pte subflag:
+ * 0 => This map request contains a host virtual address.
+ * 1 => This map request contains the machine addess of the PTE to update.
+ */
+#define _GNTMAP_contains_pte (4)
+#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
+
+/*
+ * Bits to be placed in guest kernel available PTE bits (architecture
+ * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
+ */
+#define _GNTMAP_guest_avail0 (16)
+#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
+
+/*
+ * Values for error status returns. All errors are -ve.
+ */
+/* ` enum grant_status { */
+#define GNTST_okay (0) /* Normal return. */
+#define GNTST_general_error (-1) /* General undefined error. */
+#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
+#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
+#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
+#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
+#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
+#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
+#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
+#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
+#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
+#define GNTST_address_too_big (-11) /* transfer page address too large. */
+#define GNTST_eagain (-12) /* Operation not done; try again. */
+#define GNTST_no_space (-13) /* Out of space (handles etc). */
+/* ` } */
+
+#define GNTTABOP_error_msgs { \
+ "okay", \
+ "undefined error", \
+ "unrecognised domain id", \
+ "invalid grant reference", \
+ "invalid mapping handle", \
+ "invalid virtual address", \
+ "invalid device address", \
+ "no spare translation slot in the I/O MMU", \
+ "permission denied", \
+ "bad page", \
+ "copy arguments cross page boundary", \
+ "page address size too large", \
+ "operation not done; try again", \
+ "out of space", \
+}
+
+#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/hvm/hvm_op.h b/include/hw/xen/interface/hvm/hvm_op.h
new file mode 100644
index 0000000000..e22adf0319
--- /dev/null
+++ b/include/hw/xen/interface/hvm/hvm_op.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2007, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
+#define __XEN_PUBLIC_HVM_HVM_OP_H__
+
+#include "../xen.h"
+#include "../trace.h"
+#include "../event_channel.h"
+
+/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
+#define HVMOP_set_param 0
+#define HVMOP_get_param 1
+struct xen_hvm_param {
+ domid_t domid; /* IN */
+ uint16_t pad;
+ uint32_t index; /* IN */
+ uint64_t value; /* IN/OUT */
+};
+typedef struct xen_hvm_param xen_hvm_param_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
+
+struct xen_hvm_altp2m_suppress_ve {
+ uint16_t view;
+ uint8_t suppress_ve; /* Boolean type. */
+ uint8_t pad1;
+ uint32_t pad2;
+ uint64_t gfn;
+};
+
+struct xen_hvm_altp2m_suppress_ve_multi {
+ uint16_t view;
+ uint8_t suppress_ve; /* Boolean type. */
+ uint8_t pad1;
+ int32_t first_error; /* Should be set to 0. */
+ uint64_t first_gfn; /* Value may be updated. */
+ uint64_t last_gfn;
+ uint64_t first_error_gfn; /* Gfn of the first error. */
+};
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040900
+
+/* Set the logical level of one of a domain's PCI INTx wires. */
+#define HVMOP_set_pci_intx_level 2
+struct xen_hvm_set_pci_intx_level {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
+ uint8_t domain, bus, device, intx;
+ /* Assertion level (0 = unasserted, 1 = asserted). */
+ uint8_t level;
+};
+typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
+
+/* Set the logical level of one of a domain's ISA IRQ wires. */
+#define HVMOP_set_isa_irq_level 3
+struct xen_hvm_set_isa_irq_level {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* ISA device identification, by ISA IRQ (0-15). */
+ uint8_t isa_irq;
+ /* Assertion level (0 = unasserted, 1 = asserted). */
+ uint8_t level;
+};
+typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
+
+#define HVMOP_set_pci_link_route 4
+struct xen_hvm_set_pci_link_route {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* PCI link identifier (0-3). */
+ uint8_t link;
+ /* ISA IRQ (1-15), or 0 (disable link). */
+ uint8_t isa_irq;
+};
+typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
+
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
+
+/* Flushes all VCPU TLBs: @arg must be NULL. */
+#define HVMOP_flush_tlbs 5
+
+/*
+ * hvmmem_type_t should not be defined when generating the corresponding
+ * compat header. This will ensure that the improperly named HVMMEM_(*)
+ * values are defined only once.
+ */
+#ifndef XEN_GENERATING_COMPAT_HEADERS
+
+typedef enum {
+ HVMMEM_ram_rw, /* Normal read/write guest RAM */
+ HVMMEM_ram_ro, /* Read-only; writes are discarded */
+ HVMMEM_mmio_dm, /* Reads and write go to the device model */
+#if __XEN_INTERFACE_VERSION__ < 0x00040700
+ HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */
+#else
+ HVMMEM_unused, /* Placeholder; setting memory to this type
+ will fail for code after 4.7.0 */
+#endif
+ HVMMEM_ioreq_server /* Memory type claimed by an ioreq server; type
+ changes to this value are only allowed after
+ an ioreq server has claimed its ownership.
+ Only pages with HVMMEM_ram_rw are allowed to
+ change to this type; conversely, pages with
+ this type are only allowed to be changed back
+ to HVMMEM_ram_rw. */
+} hvmmem_type_t;
+
+#endif /* XEN_GENERATING_COMPAT_HEADERS */
+
+/* Hint from PV drivers for pagetable destruction. */
+#define HVMOP_pagetable_dying 9
+struct xen_hvm_pagetable_dying {
+ /* Domain with a pagetable about to be destroyed. */
+ domid_t domid;
+ uint16_t pad[3]; /* align next field on 8-byte boundary */
+ /* guest physical address of the toplevel pagetable dying */
+ uint64_t gpa;
+};
+typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t);
+
+/* Get the current Xen time, in nanoseconds since system boot. */
+#define HVMOP_get_time 10
+struct xen_hvm_get_time {
+ uint64_t now; /* OUT */
+};
+typedef struct xen_hvm_get_time xen_hvm_get_time_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t);
+
+#define HVMOP_xentrace 11
+struct xen_hvm_xentrace {
+ uint16_t event, extra_bytes;
+ uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)];
+};
+typedef struct xen_hvm_xentrace xen_hvm_xentrace_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
+
+/* Following tools-only interfaces may change in future. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/* Deprecated by XENMEM_access_op_set_access */
+#define HVMOP_set_mem_access 12
+
+/* Deprecated by XENMEM_access_op_get_access */
+#define HVMOP_get_mem_access 13
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+#define HVMOP_get_mem_type 15
+/* Return hvmmem_type_t for the specified pfn. */
+struct xen_hvm_get_mem_type {
+ /* Domain to be queried. */
+ domid_t domid;
+ /* OUT variable. */
+ uint16_t mem_type;
+ uint16_t pad[2]; /* align next field on 8-byte boundary */
+ /* IN variable. */
+ uint64_t pfn;
+};
+typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
+
+/* Following tools-only interfaces may change in future. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/*
+ * Definitions relating to DMOP_create_ioreq_server. (Defined here for
+ * backwards compatibility).
+ */
+
+#define HVM_IOREQSRV_BUFIOREQ_OFF 0
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
+ * channel upcalls on the specified <vcpu>. If set,
+ * this vector will be used in preference to the
+ * domain global callback via (see
+ * HVM_PARAM_CALLBACK_IRQ).
+ */
+#define HVMOP_set_evtchn_upcall_vector 23
+struct xen_hvm_evtchn_upcall_vector {
+ uint32_t vcpu;
+ uint8_t vector;
+};
+typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+#define HVMOP_guest_request_vm_event 24
+
+/* HVMOP_altp2m: perform altp2m state operations */
+#define HVMOP_altp2m 25
+
+#define HVMOP_ALTP2M_INTERFACE_VERSION 0x00000001
+
+struct xen_hvm_altp2m_domain_state {
+ /* IN or OUT variable on/off */
+ uint8_t state;
+};
+typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
+
+struct xen_hvm_altp2m_vcpu_enable_notify {
+ uint32_t vcpu_id;
+ uint32_t pad;
+ /* #VE info area gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_vcpu_enable_notify xen_hvm_altp2m_vcpu_enable_notify_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
+
+struct xen_hvm_altp2m_vcpu_disable_notify {
+ uint32_t vcpu_id;
+};
+typedef struct xen_hvm_altp2m_vcpu_disable_notify xen_hvm_altp2m_vcpu_disable_notify_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_disable_notify_t);
+
+struct xen_hvm_altp2m_view {
+ /* IN/OUT variable */
+ uint16_t view;
+ uint16_t hvmmem_default_access; /* xenmem_access_t */
+};
+typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040a00
+struct xen_hvm_altp2m_set_mem_access {
+ /* view */
+ uint16_t view;
+ /* Memory type */
+ uint16_t access; /* xenmem_access_t */
+ uint32_t pad;
+ /* gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */
+
+struct xen_hvm_altp2m_mem_access {
+ /* view */
+ uint16_t view;
+ /* Memory type */
+ uint16_t access; /* xenmem_access_t */
+ uint32_t pad;
+ /* gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_mem_access xen_hvm_altp2m_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_mem_access_t);
+
+struct xen_hvm_altp2m_set_mem_access_multi {
+ /* view */
+ uint16_t view;
+ uint16_t pad;
+ /* Number of pages */
+ uint32_t nr;
+ /*
+ * Used for continuation purposes.
+ * Must be set to zero upon initial invocation.
+ */
+ uint64_t opaque;
+ /* List of pfns to set access for */
+ XEN_GUEST_HANDLE(const_uint64) pfn_list;
+ /* Corresponding list of access settings for pfn_list */
+ XEN_GUEST_HANDLE(const_uint8) access_list;
+};
+
+struct xen_hvm_altp2m_change_gfn {
+ /* view */
+ uint16_t view;
+ uint16_t pad1;
+ uint32_t pad2;
+ /* old gfn */
+ uint64_t old_gfn;
+ /* new gfn, INVALID_GFN (~0UL) means revert */
+ uint64_t new_gfn;
+};
+typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t);
+
+struct xen_hvm_altp2m_get_vcpu_p2m_idx {
+ uint32_t vcpu_id;
+ uint16_t altp2m_idx;
+};
+
+struct xen_hvm_altp2m_set_visibility {
+ uint16_t altp2m_idx;
+ uint8_t visible;
+ uint8_t pad;
+};
+
+struct xen_hvm_altp2m_op {
+ uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */
+ uint32_t cmd;
+/* Get/set the altp2m state for a domain */
+#define HVMOP_altp2m_get_domain_state 1
+#define HVMOP_altp2m_set_domain_state 2
+/* Set a given VCPU to receive altp2m event notifications */
+#define HVMOP_altp2m_vcpu_enable_notify 3
+/* Create a new view */
+#define HVMOP_altp2m_create_p2m 4
+/* Destroy a view */
+#define HVMOP_altp2m_destroy_p2m 5
+/* Switch view for an entire domain */
+#define HVMOP_altp2m_switch_p2m 6
+/* Notify that a page of memory is to have specific access types */
+#define HVMOP_altp2m_set_mem_access 7
+/* Change a p2m entry to have a different gfn->mfn mapping */
+#define HVMOP_altp2m_change_gfn 8
+/* Set access for an array of pages */
+#define HVMOP_altp2m_set_mem_access_multi 9
+/* Set the "Suppress #VE" bit on a page */
+#define HVMOP_altp2m_set_suppress_ve 10
+/* Get the "Suppress #VE" bit of a page */
+#define HVMOP_altp2m_get_suppress_ve 11
+/* Get the access of a page of memory from a certain view */
+#define HVMOP_altp2m_get_mem_access 12
+/* Disable altp2m event notifications for a given VCPU */
+#define HVMOP_altp2m_vcpu_disable_notify 13
+/* Get the active vcpu p2m index */
+#define HVMOP_altp2m_get_p2m_idx 14
+/* Set the "Supress #VE" bit for a range of pages */
+#define HVMOP_altp2m_set_suppress_ve_multi 15
+/* Set visibility for a given altp2m view */
+#define HVMOP_altp2m_set_visibility 16
+ domid_t domain;
+ uint16_t pad1;
+ uint32_t pad2;
+ union {
+ struct xen_hvm_altp2m_domain_state domain_state;
+ struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
+ struct xen_hvm_altp2m_view view;
+#if __XEN_INTERFACE_VERSION__ < 0x00040a00
+ struct xen_hvm_altp2m_set_mem_access set_mem_access;
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */
+ struct xen_hvm_altp2m_mem_access mem_access;
+ struct xen_hvm_altp2m_change_gfn change_gfn;
+ struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi;
+ struct xen_hvm_altp2m_suppress_ve suppress_ve;
+ struct xen_hvm_altp2m_suppress_ve_multi suppress_ve_multi;
+ struct xen_hvm_altp2m_vcpu_disable_notify disable_notify;
+ struct xen_hvm_altp2m_get_vcpu_p2m_idx get_vcpu_p2m_idx;
+ struct xen_hvm_altp2m_set_visibility set_visibility;
+ uint8_t pad[64];
+ } u;
+};
+typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t);
+
+#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/hvm/params.h b/include/hw/xen/interface/hvm/params.h
new file mode 100644
index 0000000000..a22b4ed45d
--- /dev/null
+++ b/include/hw/xen/interface/hvm/params.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2007, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
+#define __XEN_PUBLIC_HVM_PARAMS_H__
+
+#include "hvm_op.h"
+
+/* These parameters are deprecated and their meaning is undefined. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#define HVM_PARAM_PAE_ENABLED 4
+#define HVM_PARAM_DM_DOMAIN 13
+#define HVM_PARAM_MEMORY_EVENT_CR0 20
+#define HVM_PARAM_MEMORY_EVENT_CR3 21
+#define HVM_PARAM_MEMORY_EVENT_CR4 22
+#define HVM_PARAM_MEMORY_EVENT_INT3 23
+#define HVM_PARAM_NESTEDHVM 24
+#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
+#define HVM_PARAM_BUFIOREQ_EVTCHN 26
+#define HVM_PARAM_MEMORY_EVENT_MSR 30
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+/*
+ * Parameter space for HVMOP_{set,get}_param.
+ */
+
+#define HVM_PARAM_CALLBACK_IRQ 0
+#define HVM_PARAM_CALLBACK_IRQ_TYPE_MASK xen_mk_ullong(0xFF00000000000000)
+/*
+ * How should CPU0 event-channel notifications be delivered?
+ *
+ * If val == 0 then CPU0 event-channel notifications are not delivered.
+ * If val != 0, val[63:56] encodes the type, as follows:
+ */
+
+#define HVM_PARAM_CALLBACK_TYPE_GSI 0
+/*
+ * val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0,
+ * and disables all notifications.
+ */
+
+#define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1
+/*
+ * val[55:0] is a delivery PCI INTx line:
+ * Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0]
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define HVM_PARAM_CALLBACK_TYPE_VECTOR 2
+/*
+ * val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know
+ * if this delivery method is available.
+ */
+#elif defined(__arm__) || defined(__aarch64__)
+#define HVM_PARAM_CALLBACK_TYPE_PPI 2
+/*
+ * val[55:16] needs to be zero.
+ * val[15:8] is interrupt flag of the PPI used by event-channel:
+ * bit 8: the PPI is edge(1) or level(0) triggered
+ * bit 9: the PPI is active low(1) or high(0)
+ * val[7:0] is a PPI number used by event-channel.
+ * This is only used by ARM/ARM64 and masking/eoi the interrupt associated to
+ * the notification is handled by the interrupt controller.
+ */
+#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK 0xFF00
+#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL 2
+#endif
+
+/*
+ * These are not used by Xen. They are here for convenience of HVM-guest
+ * xenbus implementations.
+ */
+#define HVM_PARAM_STORE_PFN 1
+#define HVM_PARAM_STORE_EVTCHN 2
+
+#define HVM_PARAM_IOREQ_PFN 5
+
+#define HVM_PARAM_BUFIOREQ_PFN 6
+
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * Viridian enlightenments
+ *
+ * (See http://download.microsoft.com/download/A/B/4/AB43A34E-BDD0-4FA6-BDEF-79EEF16E880B/Hypervisor%20Top%20Level%20Functional%20Specification%20v4.0.docx)
+ *
+ * To expose viridian enlightenments to the guest set this parameter
+ * to the desired feature mask. The base feature set must be present
+ * in any valid feature mask.
+ */
+#define HVM_PARAM_VIRIDIAN 9
+
+/* Base+Freq viridian feature sets:
+ *
+ * - Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL)
+ * - APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
+ * - Virtual Processor index MSR (HV_X64_MSR_VP_INDEX)
+ * - Timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
+ * HV_X64_MSR_APIC_FREQUENCY)
+ */
+#define _HVMPV_base_freq 0
+#define HVMPV_base_freq (1 << _HVMPV_base_freq)
+
+/* Feature set modifications */
+
+/* Disable timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
+ * HV_X64_MSR_APIC_FREQUENCY).
+ * This modification restores the viridian feature set to the
+ * original 'base' set exposed in releases prior to Xen 4.4.
+ */
+#define _HVMPV_no_freq 1
+#define HVMPV_no_freq (1 << _HVMPV_no_freq)
+
+/* Enable Partition Time Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */
+#define _HVMPV_time_ref_count 2
+#define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count)
+
+/* Enable Reference TSC Page (HV_X64_MSR_REFERENCE_TSC) */
+#define _HVMPV_reference_tsc 3
+#define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc)
+
+/* Use Hypercall for remote TLB flush */
+#define _HVMPV_hcall_remote_tlb_flush 4
+#define HVMPV_hcall_remote_tlb_flush (1 << _HVMPV_hcall_remote_tlb_flush)
+
+/* Use APIC assist */
+#define _HVMPV_apic_assist 5
+#define HVMPV_apic_assist (1 << _HVMPV_apic_assist)
+
+/* Enable crash MSRs */
+#define _HVMPV_crash_ctl 6
+#define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
+
+/* Enable SYNIC MSRs */
+#define _HVMPV_synic 7
+#define HVMPV_synic (1 << _HVMPV_synic)
+
+/* Enable STIMER MSRs */
+#define _HVMPV_stimer 8
+#define HVMPV_stimer (1 << _HVMPV_stimer)
+
+/* Use Synthetic Cluster IPI Hypercall */
+#define _HVMPV_hcall_ipi 9
+#define HVMPV_hcall_ipi (1 << _HVMPV_hcall_ipi)
+
+/* Enable ExProcessorMasks */
+#define _HVMPV_ex_processor_masks 10
+#define HVMPV_ex_processor_masks (1 << _HVMPV_ex_processor_masks)
+
+/* Allow more than 64 VPs */
+#define _HVMPV_no_vp_limit 11
+#define HVMPV_no_vp_limit (1 << _HVMPV_no_vp_limit)
+
+/* Enable vCPU hotplug */
+#define _HVMPV_cpu_hotplug 12
+#define HVMPV_cpu_hotplug (1 << _HVMPV_cpu_hotplug)
+
+#define HVMPV_feature_mask \
+ (HVMPV_base_freq | \
+ HVMPV_no_freq | \
+ HVMPV_time_ref_count | \
+ HVMPV_reference_tsc | \
+ HVMPV_hcall_remote_tlb_flush | \
+ HVMPV_apic_assist | \
+ HVMPV_crash_ctl | \
+ HVMPV_synic | \
+ HVMPV_stimer | \
+ HVMPV_hcall_ipi | \
+ HVMPV_ex_processor_masks | \
+ HVMPV_no_vp_limit | \
+ HVMPV_cpu_hotplug)
+
+#endif
+
+/*
+ * Set mode for virtual timers (currently x86 only):
+ * delay_for_missed_ticks (default):
+ * Do not advance a vcpu's time beyond the correct delivery time for
+ * interrupts that have been missed due to preemption. Deliver missed
+ * interrupts when the vcpu is rescheduled and advance the vcpu's virtual
+ * time stepwise for each one.
+ * no_delay_for_missed_ticks:
+ * As above, missed interrupts are delivered, but guest time always tracks
+ * wallclock (i.e., real) time while doing so.
+ * no_missed_ticks_pending:
+ * No missed interrupts are held pending. Instead, to ensure ticks are
+ * delivered at some non-zero rate, if we detect missed ticks then the
+ * internal tick alarm is not disabled if the VCPU is preempted during the
+ * next tick period.
+ * one_missed_tick_pending:
+ * Missed interrupts are collapsed together and delivered as one 'late tick'.
+ * Guest time always tracks wallclock (i.e., real) time.
+ */
+#define HVM_PARAM_TIMER_MODE 10
+#define HVMPTM_delay_for_missed_ticks 0
+#define HVMPTM_no_delay_for_missed_ticks 1
+#define HVMPTM_no_missed_ticks_pending 2
+#define HVMPTM_one_missed_tick_pending 3
+
+/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
+#define HVM_PARAM_HPET_ENABLED 11
+
+/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
+#define HVM_PARAM_IDENT_PT 12
+
+/* ACPI S state: currently support S0 and S3 on x86. */
+#define HVM_PARAM_ACPI_S_STATE 14
+
+/* TSS used on Intel when CR0.PE=0. */
+#define HVM_PARAM_VM86_TSS 15
+
+/* Boolean: Enable aligning all periodic vpts to reduce interrupts */
+#define HVM_PARAM_VPT_ALIGN 16
+
+/* Console debug shared memory ring and event channel */
+#define HVM_PARAM_CONSOLE_PFN 17
+#define HVM_PARAM_CONSOLE_EVTCHN 18
+
+/*
+ * Select location of ACPI PM1a and TMR control blocks. Currently two locations
+ * are supported, specified by version 0 or 1 in this parameter:
+ * - 0: default, use the old addresses
+ * PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48
+ * - 1: use the new default qemu addresses
+ * PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008
+ * You can find these address definitions in <hvm/ioreq.h>
+ */
+#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
+
+/* Params for the mem event rings */
+#define HVM_PARAM_PAGING_RING_PFN 27
+#define HVM_PARAM_MONITOR_RING_PFN 28
+#define HVM_PARAM_SHARING_RING_PFN 29
+
+/* SHUTDOWN_* action in case of a triple fault */
+#define HVM_PARAM_TRIPLE_FAULT_REASON 31
+
+#define HVM_PARAM_IOREQ_SERVER_PFN 32
+#define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33
+
+/* Location of the VM Generation ID in guest physical address space. */
+#define HVM_PARAM_VM_GENERATION_ID_ADDR 34
+
+/*
+ * Set mode for altp2m:
+ * disabled: don't activate altp2m (default)
+ * mixed: allow access to all altp2m ops for both in-guest and external tools
+ * external: allow access to external privileged tools only
+ * limited: guest only has limited access (ie. control VMFUNC and #VE)
+ *
+ * Note that 'mixed' mode has not been evaluated for safety from a
+ * security perspective. Before using this mode in a
+ * security-critical environment, each subop should be evaluated for
+ * safety, with unsafe subops blacklisted in XSM.
+ */
+#define HVM_PARAM_ALTP2M 35
+#define XEN_ALTP2M_disabled 0
+#define XEN_ALTP2M_mixed 1
+#define XEN_ALTP2M_external 2
+#define XEN_ALTP2M_limited 3
+
+/*
+ * Size of the x87 FPU FIP/FDP registers that the hypervisor needs to
+ * save/restore. This is a workaround for a hardware limitation that
+ * does not allow the full FIP/FDP and FCS/FDS to be restored.
+ *
+ * Valid values are:
+ *
+ * 8: save/restore 64-bit FIP/FDP and clear FCS/FDS (default if CPU
+ * has FPCSDS feature).
+ *
+ * 4: save/restore 32-bit FIP/FDP, FCS/FDS, and clear upper 32-bits of
+ * FIP/FDP.
+ *
+ * 0: allow hypervisor to choose based on the value of FIP/FDP
+ * (default if CPU does not have FPCSDS).
+ *
+ * If FPCSDS (bit 13 in CPUID leaf 0x7, subleaf 0x0) is set, the CPU
+ * never saves FCS/FDS and this parameter should be left at the
+ * default of 8.
+ */
+#define HVM_PARAM_X87_FIP_WIDTH 36
+
+/*
+ * TSS (and its size) used on Intel when CR0.PE=0. The address occupies
+ * the low 32 bits, while the size is in the high 32 ones.
+ */
+#define HVM_PARAM_VM86_TSS_SIZED 37
+
+/* Enable MCA capabilities. */
+#define HVM_PARAM_MCA_CAP 38
+#define XEN_HVM_MCA_CAP_LMCE (xen_mk_ullong(1) << 0)
+#define XEN_HVM_MCA_CAP_MASK XEN_HVM_MCA_CAP_LMCE
+
+#define HVM_NR_PARAMS 39
+
+#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff --git a/include/hw/xen/interface/io/blkif.h b/include/hw/xen/interface/io/blkif.h
new file mode 100644
index 0000000000..22f1eef0c0
--- /dev/null
+++ b/include/hw/xen/interface/io/blkif.h
@@ -0,0 +1,713 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * blkif.h
+ *
+ * Unified block-device I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ * Copyright (c) 2012, Spectra Logic Corporation
+ */
+
+#ifndef __XEN_PUBLIC_IO_BLKIF_H__
+#define __XEN_PUBLIC_IO_BLKIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Front->back notifications: When enqueuing a new request, sending a
+ * notification can be made conditional on req_event (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Backends must set
+ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
+ *
+ * Back->front notifications: When enqueuing a new response, sending a
+ * notification can be made conditional on rsp_event (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Frontends must set
+ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
+ */
+
+#ifndef blkif_vdev_t
+#define blkif_vdev_t uint16_t
+#endif
+#define blkif_sector_t uint64_t
+
+/*
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen block driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * All data in the XenStore is stored as strings. Nodes specifying numeric
+ * values are encoded in decimal. Integer value ranges listed below are
+ * expressed as fixed sized integer types capable of storing the conversion
+ * of a properly formated node string, without loss of information.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ * XenStore nodes marked "DEPRECATED" in their notes section should only be
+ * used to provide interoperability with legacy implementations.
+ *
+ * See the XenBus state transition diagram below for details on when XenBus
+ * nodes must be published and when they can be queried.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * mode
+ * Values: "r" (read only), "w" (writable)
+ *
+ * The read or write access permissions to the backing store to be
+ * granted to the frontend.
+ *
+ * params
+ * Values: string
+ *
+ * A free formatted string providing sufficient information for the
+ * hotplug script to attach the device and provide a suitable
+ * handler (ie: a block device) for blkback to use.
+ *
+ * physical-device
+ * Values: "MAJOR:MINOR"
+ * Notes: 11
+ *
+ * MAJOR and MINOR are the major number and minor number of the
+ * backing device respectively.
+ *
+ * physical-device-path
+ * Values: path string
+ *
+ * A string that contains the absolute path to the disk image. On
+ * NetBSD and Linux this is always a block device, while on FreeBSD
+ * it can be either a block device or a regular file.
+ *
+ * type
+ * Values: "file", "phy", "tap"
+ *
+ * The type of the backing device/object.
+ *
+ *
+ * direct-io-safe
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ *
+ * The underlying storage is not affected by the direct IO memory
+ * lifetime bug. See:
+ * https://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
+ *
+ * Therefore this option gives the backend permission to use
+ * O_DIRECT, notwithstanding that bug.
+ *
+ * That is, if this option is enabled, use of O_DIRECT is safe,
+ * in circumstances where we would normally have avoided it as a
+ * workaround for that bug. This option is not relevant for all
+ * backends, and even not necessarily supported for those for
+ * which it is relevant. A backend which knows that it is not
+ * affected by the bug can ignore this option.
+ *
+ * This option doesn't require a backend to use O_DIRECT, so it
+ * should not be used to try to control the caching behaviour.
+ *
+ *--------------------------------- Features ---------------------------------
+ *
+ * feature-barrier
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ *
+ * A value of "1" indicates that the backend can process requests
+ * containing the BLKIF_OP_WRITE_BARRIER request opcode. Requests
+ * of this type may still be returned at any time with the
+ * BLKIF_RSP_EOPNOTSUPP result code.
+ *
+ * feature-flush-cache
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ *
+ * A value of "1" indicates that the backend can process requests
+ * containing the BLKIF_OP_FLUSH_DISKCACHE request opcode. Requests
+ * of this type may still be returned at any time with the
+ * BLKIF_RSP_EOPNOTSUPP result code.
+ *
+ * feature-discard
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ *
+ * A value of "1" indicates that the backend can process requests
+ * containing the BLKIF_OP_DISCARD request opcode. Requests
+ * of this type may still be returned at any time with the
+ * BLKIF_RSP_EOPNOTSUPP result code.
+ *
+ * feature-persistent
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ * Notes: 7
+ *
+ * A value of "1" indicates that the backend can keep the grants used
+ * by the frontend driver mapped, so the same set of grants should be
+ * used in all transactions. The maximum number of grants the backend
+ * can map persistently depends on the implementation, but ideally it
+ * should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
+ * feature the backend doesn't need to unmap each grant, preventing
+ * costly TLB flushes. The backend driver should only map grants
+ * persistently if the frontend supports it. If a backend driver chooses
+ * to use the persistent protocol when the frontend doesn't support it,
+ * it will probably hit the maximum number of persistently mapped grants
+ * (due to the fact that the frontend won't be reusing the same grants),
+ * and fall back to non-persistent mode. Backend implementations may
+ * shrink or expand the number of persistently mapped grants without
+ * notifying the frontend depending on memory constraints (this might
+ * cause a performance degradation).
+ *
+ * If a backend driver wants to limit the maximum number of persistently
+ * mapped grants to a value less than RING_SIZE *
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
+ * discard the grants that are less commonly used. Using a LRU in the
+ * backend driver paired with a LIFO queue in the frontend will
+ * allow us to have better performance in this scenario.
+ *
+ *----------------------- Request Transport Parameters ------------------------
+ *
+ * max-ring-page-order
+ * Values: <uint32_t>
+ * Default Value: 0
+ * Notes: 1, 3
+ *
+ * The maximum supported size of the request ring buffer in units of
+ * lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
+ * etc.).
+ *
+ * max-ring-pages
+ * Values: <uint32_t>
+ * Default Value: 1
+ * Notes: DEPRECATED, 2, 3
+ *
+ * The maximum supported size of the request ring buffer in units of
+ * machine pages. The value must be a power of 2.
+ *
+ *------------------------- Backend Device Properties -------------------------
+ *
+ * discard-enable
+ * Values: 0/1 (boolean)
+ * Default Value: 1
+ *
+ * This optional property, set by the toolstack, instructs the backend
+ * to offer (or not to offer) discard to the frontend. If the property
+ * is missing the backend should offer discard if the backing storage
+ * actually supports it.
+ *
+ * discard-alignment
+ * Values: <uint32_t>
+ * Default Value: 0
+ * Notes: 4, 5
+ *
+ * The offset, in bytes from the beginning of the virtual block device,
+ * to the first, addressable, discard extent on the underlying device.
+ *
+ * discard-granularity
+ * Values: <uint32_t>
+ * Default Value: <"sector-size">
+ * Notes: 4
+ *
+ * The size, in bytes, of the individually addressable discard extents
+ * of the underlying device.
+ *
+ * discard-secure
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ * Notes: 10
+ *
+ * A value of "1" indicates that the backend can process BLKIF_OP_DISCARD
+ * requests with the BLKIF_DISCARD_SECURE flag set.
+ *
+ * info
+ * Values: <uint32_t> (bitmap)
+ *
+ * A collection of bit flags describing attributes of the backing
+ * device. The VDISK_* macros define the meaning of each bit
+ * location.
+ *
+ * sector-size
+ * Values: <uint32_t>
+ *
+ * The logical block size, in bytes, of the underlying storage. This
+ * must be a power of two with a minimum value of 512.
+ *
+ * NOTE: Because of implementation bugs in some frontends this must be
+ * set to 512, unless the frontend advertizes a non-zero value
+ * in its "feature-large-sector-size" xenbus node. (See below).
+ *
+ * physical-sector-size
+ * Values: <uint32_t>
+ * Default Value: <"sector-size">
+ *
+ * The physical block size, in bytes, of the backend storage. This
+ * must be an integer multiple of "sector-size".
+ *
+ * sectors
+ * Values: <uint64_t>
+ *
+ * The size of the backend device, expressed in units of "sector-size".
+ * The product of "sector-size" and "sectors" must also be an integer
+ * multiple of "physical-sector-size", if that node is present.
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: <uint32_t>
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * ring-ref
+ * Values: <uint32_t>
+ * Notes: 6
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer.
+ *
+ * ring-ref%u
+ * Values: <uint32_t>
+ * Notes: 6
+ *
+ * For a frontend providing a multi-page ring, a "number of ring pages"
+ * sized list of nodes, each containing a Xen grant reference granting
+ * permission for the backend to map the page of the ring located
+ * at page index "%u". Page indexes are zero based.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ *
+ * ring-page-order
+ * Values: <uint32_t>
+ * Default Value: 0
+ * Maximum Value: MAX(ffs(max-ring-pages) - 1, max-ring-page-order)
+ * Notes: 1, 3
+ *
+ * The size of the frontend allocated request ring buffer in units
+ * of lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
+ * etc.).
+ *
+ * num-ring-pages
+ * Values: <uint32_t>
+ * Default Value: 1
+ * Maximum Value: MAX(max-ring-pages,(0x1 << max-ring-page-order))
+ * Notes: DEPRECATED, 2, 3
+ *
+ * The size of the frontend allocated request ring buffer in units of
+ * machine pages. The value must be a power of 2.
+ *
+ *--------------------------------- Features ---------------------------------
+ *
+ * feature-persistent
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ * Notes: 7, 8, 9
+ *
+ * A value of "1" indicates that the frontend will reuse the same grants
+ * for all transactions, allowing the backend to map them with write
+ * access (even when it should be read-only). If the frontend hits the
+ * maximum number of allowed persistently mapped grants, it can fallback
+ * to non persistent mode. This will cause a performance degradation,
+ * since the the backend driver will still try to map those grants
+ * persistently. Since the persistent grants protocol is compatible with
+ * the previous protocol, a frontend driver can choose to work in
+ * persistent mode even when the backend doesn't support it.
+ *
+ * It is recommended that the frontend driver stores the persistently
+ * mapped grants in a LIFO queue, so a subset of all persistently mapped
+ * grants gets used commonly. This is done in case the backend driver
+ * decides to limit the maximum number of persistently mapped grants
+ * to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
+ *
+ * feature-large-sector-size
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ *
+ * A value of "1" indicates that the frontend will correctly supply and
+ * interpret all sector-based quantities in terms of the "sector-size"
+ * value supplied in the backend info, whatever that may be set to.
+ * If this node is not present or its value is "0" then it is assumed
+ * that the frontend requires that the logical block size is 512 as it
+ * is hardcoded (which is the case in some frontend implementations).
+ *
+ * trusted
+ * Values: 0/1 (boolean)
+ * Default value: 1
+ *
+ * A value of "0" indicates that the frontend should not trust the
+ * backend, and should deploy whatever measures available to protect from
+ * a malicious backend on the other end.
+ *
+ *------------------------- Virtual Device Properties -------------------------
+ *
+ * device-type
+ * Values: "disk", "cdrom", "floppy", etc.
+ *
+ * virtual-device
+ * Values: <uint32_t>
+ *
+ * A value indicating the physical device to virtualize within the
+ * frontend's domain. (e.g. "The first ATA disk", "The third SCSI
+ * disk", etc.)
+ *
+ * See docs/misc/vbd-interface.txt for details on the format of this
+ * value.
+ *
+ * Notes
+ * -----
+ * (1) Multi-page ring buffer scheme first developed in the Citrix XenServer
+ * PV drivers.
+ * (2) Multi-page ring buffer scheme first used in some RedHat distributions
+ * including a distribution deployed on certain nodes of the Amazon
+ * EC2 cluster.
+ * (3) Support for multi-page ring buffers was implemented independently,
+ * in slightly different forms, by both Citrix and RedHat/Amazon.
+ * For full interoperability, block front and backends should publish
+ * identical ring parameters, adjusted for unit differences, to the
+ * XenStore nodes used in both schemes.
+ * (4) Devices that support discard functionality may internally allocate space
+ * (discardable extents) in units that are larger than the exported logical
+ * block size. If the backing device has such discardable extents the
+ * backend should provide both discard-granularity and discard-alignment.
+ * Providing just one of the two may be considered an error by the frontend.
+ * Backends supporting discard should include discard-granularity and
+ * discard-alignment even if it supports discarding individual sectors.
+ * Frontends should assume discard-alignment == 0 and discard-granularity
+ * == sector size if these keys are missing.
+ * (5) The discard-alignment parameter allows a physical device to be
+ * partitioned into virtual devices that do not necessarily begin or
+ * end on a discardable extent boundary.
+ * (6) When there is only a single page allocated to the request ring,
+ * 'ring-ref' is used to communicate the grant reference for this
+ * page to the backend. When using a multi-page ring, the 'ring-ref'
+ * node is not created. Instead 'ring-ref0' - 'ring-refN' are used.
+ * (7) When using persistent grants data has to be copied from/to the page
+ * where the grant is currently mapped. The overhead of doing this copy
+ * however doesn't suppress the speed improvement of not having to unmap
+ * the grants.
+ * (8) The frontend driver has to allow the backend driver to map all grants
+ * with write access, even when they should be mapped read-only, since
+ * further requests may reuse these grants and require write permissions.
+ * (9) Linux implementation doesn't have a limit on the maximum number of
+ * grants that can be persistently mapped in the frontend driver, but
+ * due to the frontent driver implementation it should never be bigger
+ * than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
+ *(10) The discard-secure property may be present and will be set to 1 if the
+ * backing device supports secure discard.
+ *(11) Only used by Linux and NetBSD.
+ */
+
+/*
+ * Multiple hardware queues/rings:
+ * If supported, the backend will write the key "multi-queue-max-queues" to
+ * the directory for that vbd, and set its value to the maximum supported
+ * number of queues.
+ * Frontends that are aware of this feature and wish to use it can write the
+ * key "multi-queue-num-queues" with the number they wish to use, which must be
+ * greater than zero, and no more than the value reported by the backend in
+ * "multi-queue-max-queues".
+ *
+ * For frontends requesting just one queue, the usual event-channel and
+ * ring-ref keys are written as before, simplifying the backend processing
+ * to avoid distinguishing between a frontend that doesn't understand the
+ * multi-queue feature, and one that does, but requested only one queue.
+ *
+ * Frontends requesting two or more queues must not write the toplevel
+ * event-channel and ring-ref keys, instead writing those keys under sub-keys
+ * having the name "queue-N" where N is the integer ID of the queue/ring for
+ * which those keys belong. Queues are indexed from zero.
+ * For example, a frontend with two queues must write the following set of
+ * queue-related keys:
+ *
+ * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
+ * /local/domain/1/device/vbd/0/queue-0 = ""
+ * /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>"
+ * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
+ * /local/domain/1/device/vbd/0/queue-1 = ""
+ * /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>"
+ * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
+ *
+ * It is also possible to use multiple queues/rings together with
+ * feature multi-page ring buffer.
+ * For example, a frontend requests two queues/rings and the size of each ring
+ * buffer is two pages must write the following set of related keys:
+ *
+ * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
+ * /local/domain/1/device/vbd/0/ring-page-order = "1"
+ * /local/domain/1/device/vbd/0/queue-0 = ""
+ * /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>"
+ * /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>"
+ * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
+ * /local/domain/1/device/vbd/0/queue-1 = ""
+ * /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>"
+ * /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>"
+ * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
+ *
+ */
+
+/*
+ * STATE DIAGRAMS
+ *
+ *****************************************************************************
+ * Startup *
+ *****************************************************************************
+ *
+ * Tool stack creates front and back nodes with state XenbusStateInitialising.
+ *
+ * Front Back
+ * ================================= =====================================
+ * XenbusStateInitialising XenbusStateInitialising
+ * o Query virtual device o Query backend device identification
+ * properties. data.
+ * o Setup OS device instance. o Open and validate backend device.
+ * o Publish backend features and
+ * transport parameters.
+ * |
+ * |
+ * V
+ * XenbusStateInitWait
+ *
+ * o Query backend features and
+ * transport parameters.
+ * o Allocate and initialize the
+ * request ring.
+ * o Publish transport parameters
+ * that will be in effect during
+ * this connection.
+ * |
+ * |
+ * V
+ * XenbusStateInitialised
+ *
+ * o Query frontend transport parameters.
+ * o Connect to the request ring and
+ * event channel.
+ * o Publish backend device properties.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * o Query backend device properties.
+ * o Finalize OS virtual device
+ * instance.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * Note: Drivers that do not support any optional features, or the negotiation
+ * of transport parameters, can skip certain states in the state machine:
+ *
+ * o A frontend may transition to XenbusStateInitialised without
+ * waiting for the backend to enter XenbusStateInitWait. In this
+ * case, default transport parameters are in effect and any
+ * transport parameters published by the frontend must contain
+ * their default values.
+ *
+ * o A backend may transition to XenbusStateInitialised, bypassing
+ * XenbusStateInitWait, without waiting for the frontend to first
+ * enter the XenbusStateInitialised state. In this case, default
+ * transport parameters are in effect and any transport parameters
+ * published by the backend must contain their default values.
+ *
+ * Drivers that support optional features and/or transport parameter
+ * negotiation must tolerate these additional state transition paths.
+ * In general this means performing the work of any skipped state
+ * transition, if it has not already been performed, in addition to the
+ * work associated with entry into the current state.
+ */
+
+/*
+ * REQUEST CODES.
+ */
+#define BLKIF_OP_READ 0
+#define BLKIF_OP_WRITE 1
+/*
+ * All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER
+ * operation code ("barrier request") must be completed prior to the
+ * execution of the barrier request. All writes issued after the barrier
+ * request must not execute until after the completion of the barrier request.
+ *
+ * Optional. See "feature-barrier" XenBus node documentation above.
+ */
+#define BLKIF_OP_WRITE_BARRIER 2
+/*
+ * Commit any uncommitted contents of the backing device's volatile cache
+ * to stable storage.
+ *
+ * Optional. See "feature-flush-cache" XenBus node documentation above.
+ */
+#define BLKIF_OP_FLUSH_DISKCACHE 3
+/*
+ * Used in SLES sources for device specific command packet
+ * contained within the request. Reserved for that purpose.
+ */
+#define BLKIF_OP_RESERVED_1 4
+/*
+ * Indicate to the backend device that a region of storage is no longer in
+ * use, and may be discarded at any time without impact to the client. If
+ * the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the
+ * discarded region on the device must be rendered unrecoverable before the
+ * command returns.
+ *
+ * This operation is analogous to performing a trim (ATA) or unamp (SCSI),
+ * command on a native device.
+ *
+ * More information about trim/unmap operations can be found at:
+ * http://t13.org/Documents/UploadedDocuments/docs2008/
+ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
+ * http://www.seagate.com/staticfiles/support/disc/manuals/
+ * Interface%20manuals/100293068c.pdf
+ *
+ * Optional. See "feature-discard", "discard-alignment",
+ * "discard-granularity", and "discard-secure" in the XenBus node
+ * documentation above.
+ */
+#define BLKIF_OP_DISCARD 5
+
+/*
+ * Recognized if "feature-max-indirect-segments" in present in the backend
+ * xenbus info. The "feature-max-indirect-segments" node contains the maximum
+ * number of segments allowed by the backend per request. If the node is
+ * present, the frontend might use blkif_request_indirect structs in order to
+ * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
+ * maximum number of indirect segments is fixed by the backend, but the
+ * frontend can issue requests with any number of indirect segments as long as
+ * it's less than the number provided by the backend. The indirect_grefs field
+ * in blkif_request_indirect should be filled by the frontend with the
+ * grant references of the pages that are holding the indirect segments.
+ * These pages are filled with an array of blkif_request_segment that hold the
+ * information about the segments. The number of indirect pages to use is
+ * determined by the number of segments an indirect request contains. Every
+ * indirect page can contain a maximum of
+ * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
+ * calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
+ *
+ * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
+ * create the "feature-max-indirect-segments" node!
+ */
+#define BLKIF_OP_INDIRECT 6
+
+/*
+ * Maximum scatter/gather segments per request.
+ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
+ */
+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
+
+/*
+ * Maximum number of indirect pages to use per request.
+ */
+#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
+
+/*
+ * NB. 'first_sect' and 'last_sect' in blkif_request_segment, as well as
+ * 'sector_number' in blkif_request, blkif_request_discard and
+ * blkif_request_indirect are sector-based quantities. See the description
+ * of the "feature-large-sector-size" frontend xenbus node above for
+ * more information.
+ */
+struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+};
+
+/*
+ * Starting ring element for any I/O request.
+ */
+struct blkif_request {
+ uint8_t operation; /* BLKIF_OP_??? */
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+typedef struct blkif_request blkif_request_t;
+
+/*
+ * Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD
+ * sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request)
+ */
+struct blkif_request_discard {
+ uint8_t operation; /* BLKIF_OP_DISCARD */
+ uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
+#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
+ blkif_vdev_t handle; /* same as for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
+ blkif_sector_t sector_number;/* start sector idx on disk */
+ uint64_t nr_sectors; /* number of contiguous sectors to discard*/
+};
+typedef struct blkif_request_discard blkif_request_discard_t;
+
+struct blkif_request_indirect {
+ uint8_t operation; /* BLKIF_OP_INDIRECT */
+ uint8_t indirect_op; /* BLKIF_OP_{READ/WRITE} */
+ uint16_t nr_segments; /* number of segments */
+ uint64_t id; /* private guest value, echoed in resp */
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ blkif_vdev_t handle; /* same as for read/write requests */
+ grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+#ifdef __i386__
+ uint64_t pad; /* Make it 64 byte aligned on i386 */
+#endif
+};
+typedef struct blkif_request_indirect blkif_request_indirect_t;
+
+struct blkif_response {
+ uint64_t id; /* copied from request */
+ uint8_t operation; /* copied from request */
+ int16_t status; /* BLKIF_RSP_??? */
+};
+typedef struct blkif_response blkif_response_t;
+
+/*
+ * STATUS RETURN CODES.
+ */
+ /* Operation not supported (only happens on barrier writes). */
+#define BLKIF_RSP_EOPNOTSUPP -2
+ /* Operation failed for some unspecified reason (-EIO). */
+#define BLKIF_RSP_ERROR -1
+ /* Operation completed successfully. */
+#define BLKIF_RSP_OKAY 0
+
+/*
+ * Generate blkif ring structures and types.
+ */
+DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
+
+#define VDISK_CDROM 0x1
+#define VDISK_REMOVABLE 0x2
+#define VDISK_READONLY 0x4
+
+#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/console.h b/include/hw/xen/interface/io/console.h
new file mode 100644
index 0000000000..4509b4b689
--- /dev/null
+++ b/include/hw/xen/interface/io/console.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * console.h
+ *
+ * Console I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2005, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
+#define __XEN_PUBLIC_IO_CONSOLE_H__
+
+typedef uint32_t XENCONS_RING_IDX;
+
+#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
+
+struct xencons_interface {
+ char in[1024];
+ char out[2048];
+ XENCONS_RING_IDX in_cons, in_prod;
+ XENCONS_RING_IDX out_cons, out_prod;
+};
+
+#ifdef XEN_WANT_FLEX_CONSOLE_RING
+#include "ring.h"
+DEFINE_XEN_FLEX_RING(xencons);
+#endif
+
+#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/fbif.h b/include/hw/xen/interface/io/fbif.h
new file mode 100644
index 0000000000..93c73195d8
--- /dev/null
+++ b/include/hw/xen/interface/io/fbif.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * fbif.h -- Xen virtual frame buffer device
+ *
+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_FBIF_H__
+#define __XEN_PUBLIC_IO_FBIF_H__
+
+/* Out events (frontend -> backend) */
+
+/*
+ * Out events may be sent only when requested by backend, and receipt
+ * of an unknown out event is an error.
+ */
+
+/* Event type 1 currently not used */
+/*
+ * Framebuffer update notification event
+ * Capable frontend sets feature-update in xenstore.
+ * Backend requests it by setting request-update in xenstore.
+ */
+#define XENFB_TYPE_UPDATE 2
+
+struct xenfb_update
+{
+ uint8_t type; /* XENFB_TYPE_UPDATE */
+ int32_t x; /* source x */
+ int32_t y; /* source y */
+ int32_t width; /* rect width */
+ int32_t height; /* rect height */
+};
+
+/*
+ * Framebuffer resize notification event
+ * Capable backend sets feature-resize in xenstore.
+ */
+#define XENFB_TYPE_RESIZE 3
+
+struct xenfb_resize
+{
+ uint8_t type; /* XENFB_TYPE_RESIZE */
+ int32_t width; /* width in pixels */
+ int32_t height; /* height in pixels */
+ int32_t stride; /* stride in bytes */
+ int32_t depth; /* depth in bits */
+ int32_t offset; /* offset of the framebuffer in bytes */
+};
+
+#define XENFB_OUT_EVENT_SIZE 40
+
+union xenfb_out_event
+{
+ uint8_t type;
+ struct xenfb_update update;
+ struct xenfb_resize resize;
+ char pad[XENFB_OUT_EVENT_SIZE];
+};
+
+/* In events (backend -> frontend) */
+
+/*
+ * Frontends should ignore unknown in events.
+ */
+
+/*
+ * Framebuffer refresh period advice
+ * Backend sends it to advise the frontend their preferred period of
+ * refresh. Frontends that keep the framebuffer constantly up-to-date
+ * just ignore it. Frontends that use the advice should immediately
+ * refresh the framebuffer (and send an update notification event if
+ * those have been requested), then use the update frequency to guide
+ * their periodical refreshs.
+ */
+#define XENFB_TYPE_REFRESH_PERIOD 1
+#define XENFB_NO_REFRESH 0
+
+struct xenfb_refresh_period
+{
+ uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */
+ uint32_t period; /* period of refresh, in ms,
+ * XENFB_NO_REFRESH if no refresh is needed */
+};
+
+#define XENFB_IN_EVENT_SIZE 40
+
+union xenfb_in_event
+{
+ uint8_t type;
+ struct xenfb_refresh_period refresh_period;
+ char pad[XENFB_IN_EVENT_SIZE];
+};
+
+/* shared page */
+
+#define XENFB_IN_RING_SIZE 1024
+#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
+#define XENFB_IN_RING_OFFS 1024
+#define XENFB_IN_RING(page) \
+ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
+#define XENFB_IN_RING_REF(page, idx) \
+ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
+
+#define XENFB_OUT_RING_SIZE 2048
+#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
+#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
+#define XENFB_OUT_RING(page) \
+ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
+#define XENFB_OUT_RING_REF(page, idx) \
+ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
+
+struct xenfb_page
+{
+ uint32_t in_cons, in_prod;
+ uint32_t out_cons, out_prod;
+
+ int32_t width; /* the width of the framebuffer (in pixels) */
+ int32_t height; /* the height of the framebuffer (in pixels) */
+ uint32_t line_length; /* the length of a row of pixels (in bytes) */
+ uint32_t mem_length; /* the length of the framebuffer (in bytes) */
+ uint8_t depth; /* the depth of a pixel (in bits) */
+
+ /*
+ * Framebuffer page directory
+ *
+ * Each directory page holds PAGE_SIZE / sizeof(*pd)
+ * framebuffer pages, and can thus map up to PAGE_SIZE *
+ * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
+ * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs
+ * 64 bit. 256 directories give enough room for a 512 Meg
+ * framebuffer with a max resolution of 12,800x10,240. Should
+ * be enough for a while with room leftover for expansion.
+ */
+ unsigned long pd[256];
+};
+
+/*
+ * Wart: xenkbd needs to know default resolution. Put it here until a
+ * better solution is found, but don't leak it to the backend.
+ */
+#ifdef __KERNEL__
+#define XENFB_WIDTH 800
+#define XENFB_HEIGHT 600
+#define XENFB_DEPTH 32
+#endif
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/kbdif.h b/include/hw/xen/interface/io/kbdif.h
new file mode 100644
index 0000000000..4bde6b3821
--- /dev/null
+++ b/include/hw/xen/interface/io/kbdif.h
@@ -0,0 +1,559 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * kbdif.h -- Xen virtual keyboard/mouse
+ *
+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_KBDIF_H__
+#define __XEN_PUBLIC_IO_KBDIF_H__
+
+/*
+ *****************************************************************************
+ * Feature and Parameter Negotiation
+ *****************************************************************************
+ *
+ * The two halves of a para-virtual driver utilize nodes within
+ * XenStore to communicate capabilities and to negotiate operating parameters.
+ * This section enumerates these nodes which reside in the respective front and
+ * backend portions of XenStore, following XenBus convention.
+ *
+ * All data in XenStore is stored as strings. Nodes specifying numeric
+ * values are encoded in decimal. Integer value ranges listed below are
+ * expressed as fixed sized integer types capable of storing the conversion
+ * of a properly formated node string, without loss of information.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *---------------------------- Features supported ----------------------------
+ *
+ * Capable backend advertises supported features by publishing
+ * corresponding entries in XenStore and puts 1 as the value of the entry.
+ * If a feature is not supported then 0 must be set or feature entry omitted.
+ *
+ * feature-disable-keyboard
+ * Values: <uint>
+ *
+ * If there is no need to expose a virtual keyboard device by the
+ * frontend then this must be set to 1.
+ *
+ * feature-disable-pointer
+ * Values: <uint>
+ *
+ * If there is no need to expose a virtual pointer device by the
+ * frontend then this must be set to 1.
+ *
+ * feature-abs-pointer
+ * Values: <uint>
+ *
+ * Backends, which support reporting of absolute coordinates for pointer
+ * device should set this to 1.
+ *
+ * feature-multi-touch
+ * Values: <uint>
+ *
+ * Backends, which support reporting of multi-touch events
+ * should set this to 1.
+ *
+ * feature-raw-pointer
+ * Values: <uint>
+ *
+ * Backends, which support reporting raw (unscaled) absolute coordinates
+ * for pointer devices should set this to 1. Raw (unscaled) values have
+ * a range of [0, 0x7fff].
+ *
+ *----------------------- Device Instance Parameters ------------------------
+ *
+ * unique-id
+ * Values: <string>
+ *
+ * After device instance initialization it is assigned a unique ID,
+ * so every instance of the frontend can be identified by the backend
+ * by this ID. This can be UUID or such.
+ *
+ *------------------------- Pointer Device Parameters ------------------------
+ *
+ * width
+ * Values: <uint>
+ *
+ * Maximum X coordinate (width) to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ * height
+ * Values: <uint>
+ *
+ * Maximum Y coordinate (height) to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ *----------------------- Multi-touch Device Parameters ----------------------
+ *
+ * multi-touch-num-contacts
+ * Values: <uint>
+ *
+ * Number of simultaneous touches reported.
+ *
+ * multi-touch-width
+ * Values: <uint>
+ *
+ * Width of the touch area to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ * multi-touch-height
+ * Values: <uint>
+ *
+ * Height of the touch area to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------------------ Feature request -----------------------------
+ *
+ * Capable frontend requests features from backend via setting corresponding
+ * entries to 1 in XenStore. Requests for features not advertised as supported
+ * by the backend have no effect.
+ *
+ * request-abs-pointer
+ * Values: <uint>
+ *
+ * Request backend to report absolute pointer coordinates
+ * (XENKBD_TYPE_POS) instead of relative ones (XENKBD_TYPE_MOTION).
+ *
+ * request-multi-touch
+ * Values: <uint>
+ *
+ * Request backend to report multi-touch events.
+ *
+ * request-raw-pointer
+ * Values: <uint>
+ *
+ * Request backend to report raw unscaled absolute pointer coordinates.
+ * This option is only valid if request-abs-pointer is also set.
+ * Raw unscaled coordinates have the range [0, 0x7fff]
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: <uint>
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * page-gref
+ * Values: <uint>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page in a single page sized event ring buffer.
+ *
+ * page-ref
+ * Values: <uint>
+ *
+ * OBSOLETE, not recommended for use.
+ * PFN of the shared page.
+ */
+
+/*
+ * EVENT CODES.
+ */
+
+#define XENKBD_TYPE_MOTION 1
+#define XENKBD_TYPE_RESERVED 2
+#define XENKBD_TYPE_KEY 3
+#define XENKBD_TYPE_POS 4
+#define XENKBD_TYPE_MTOUCH 5
+
+/* Multi-touch event sub-codes */
+
+#define XENKBD_MT_EV_DOWN 0
+#define XENKBD_MT_EV_UP 1
+#define XENKBD_MT_EV_MOTION 2
+#define XENKBD_MT_EV_SYN 3
+#define XENKBD_MT_EV_SHAPE 4
+#define XENKBD_MT_EV_ORIENT 5
+
+/*
+ * CONSTANTS, XENSTORE FIELD AND PATH NAME STRINGS, HELPERS.
+ */
+
+#define XENKBD_DRIVER_NAME "vkbd"
+
+#define XENKBD_FIELD_FEAT_DSBL_KEYBRD "feature-disable-keyboard"
+#define XENKBD_FIELD_FEAT_DSBL_POINTER "feature-disable-pointer"
+#define XENKBD_FIELD_FEAT_ABS_POINTER "feature-abs-pointer"
+#define XENKBD_FIELD_FEAT_RAW_POINTER "feature-raw-pointer"
+#define XENKBD_FIELD_FEAT_MTOUCH "feature-multi-touch"
+#define XENKBD_FIELD_REQ_ABS_POINTER "request-abs-pointer"
+#define XENKBD_FIELD_REQ_RAW_POINTER "request-raw-pointer"
+#define XENKBD_FIELD_REQ_MTOUCH "request-multi-touch"
+#define XENKBD_FIELD_RING_GREF "page-gref"
+#define XENKBD_FIELD_EVT_CHANNEL "event-channel"
+#define XENKBD_FIELD_WIDTH "width"
+#define XENKBD_FIELD_HEIGHT "height"
+#define XENKBD_FIELD_MT_WIDTH "multi-touch-width"
+#define XENKBD_FIELD_MT_HEIGHT "multi-touch-height"
+#define XENKBD_FIELD_MT_NUM_CONTACTS "multi-touch-num-contacts"
+#define XENKBD_FIELD_UNIQUE_ID "unique-id"
+
+/* OBSOLETE, not recommended for use */
+#define XENKBD_FIELD_RING_REF "page-ref"
+
+/*
+ *****************************************************************************
+ * Description of the protocol between frontend and backend driver.
+ *****************************************************************************
+ *
+ * The two halves of a Para-virtual driver communicate with
+ * each other using a shared page and an event channel.
+ * Shared page contains a ring with event structures.
+ *
+ * All reserved fields in the structures below must be 0.
+ *
+ *****************************************************************************
+ * Backend to frontend events
+ *****************************************************************************
+ *
+ * Frontends should ignore unknown in events.
+ * All event packets have the same length (40 octets)
+ * All event packets have common header:
+ *
+ * 0 octet
+ * +-----------------+
+ * | type |
+ * +-----------------+
+ * type - uint8_t, event code, XENKBD_TYPE_???
+ *
+ *
+ * Pointer relative movement event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MOTION | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | rel_x | 8
+ * +----------------+----------------+----------------+----------------+
+ * | rel_y | 12
+ * +----------------+----------------+----------------+----------------+
+ * | rel_z | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * rel_x - int32_t, relative X motion
+ * rel_y - int32_t, relative Y motion
+ * rel_z - int32_t, relative Z motion (wheel)
+ */
+
+struct xenkbd_motion
+{
+ uint8_t type;
+ int32_t rel_x;
+ int32_t rel_y;
+ int32_t rel_z;
+};
+
+/*
+ * Key event (includes pointer buttons)
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_KEY | pressed | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | keycode | 8
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * pressed - uint8_t, 1 if pressed; 0 otherwise
+ * keycode - uint32_t, KEY_* from linux/input.h
+ */
+
+struct xenkbd_key
+{
+ uint8_t type;
+ uint8_t pressed;
+ uint32_t keycode;
+};
+
+/*
+ * Pointer absolute position event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_POS | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 12
+ * +----------------+----------------+----------------+----------------+
+ * | rel_z | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position (in FB pixels)
+ * abs_y - int32_t, absolute Y position (in FB pixels)
+ * rel_z - int32_t, relative Z motion (wheel)
+ */
+
+struct xenkbd_position
+{
+ uint8_t type;
+ int32_t abs_x;
+ int32_t abs_y;
+ int32_t rel_z;
+};
+
+/*
+ * Multi-touch event and its sub-types
+ *
+ * All multi-touch event packets have common header:
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | event_type | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * event_type - unt8_t, multi-touch event sub-type, XENKBD_MT_EV_???
+ * contact_id - unt8_t, ID of the contact
+ *
+ * Touch interactions can consist of one or more contacts.
+ * For each contact, a series of events is generated, starting
+ * with a down event, followed by zero or more motion events,
+ * and ending with an up event. Events relating to the same
+ * contact point can be identified by the ID of the sequence: contact ID.
+ * Contact ID may be reused after XENKBD_MT_EV_UP event and
+ * is in the [0; XENKBD_FIELD_NUM_CONTACTS - 1] range.
+ *
+ * For further information please refer to documentation on Wayland [1],
+ * Linux [2] and Windows [3] multi-touch support.
+ *
+ * [1] https://cgit.freedesktop.org/wayland/wayland/tree/protocol/wayland.xml
+ * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.txt
+ * [3] https://msdn.microsoft.com/en-us/library/jj151564(v=vs.85).aspx
+ *
+ *
+ * Multi-touch down event - sent when a new touch is made: touch is assigned
+ * a unique contact ID, sent with this and consequent events related
+ * to this touch.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_DOWN | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 12
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position, in pixels
+ * abs_y - int32_t, absolute Y position, in pixels
+ *
+ * Multi-touch contact release event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_UP | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Multi-touch motion event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_MOTION | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 12
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position, in pixels,
+ * abs_y - int32_t, absolute Y position, in pixels,
+ *
+ * Multi-touch input synchronization event - shows end of a set of events
+ * which logically belong together.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_SYN | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Multi-touch shape event - touch point's shape has changed its shape.
+ * Shape is approximated by an ellipse through the major and minor axis
+ * lengths: major is the longer diameter of the ellipse and minor is the
+ * shorter one. Center of the ellipse is reported via
+ * XENKBD_MT_EV_DOWN/XENKBD_MT_EV_MOTION events.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_SHAPE | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | major | 12
+ * +----------------+----------------+----------------+----------------+
+ * | minor | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * major - unt32_t, length of the major axis, pixels
+ * minor - unt32_t, length of the minor axis, pixels
+ *
+ * Multi-touch orientation event - touch point's shape has changed
+ * its orientation: calculated as a clockwise angle between the major axis
+ * of the ellipse and positive Y axis in degrees, [-180; +180].
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_ORIENT | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | orientation | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * orientation - int16_t, clockwise angle of the major axis
+ */
+
+struct xenkbd_mtouch {
+ uint8_t type; /* XENKBD_TYPE_MTOUCH */
+ uint8_t event_type; /* XENKBD_MT_EV_??? */
+ uint8_t contact_id;
+ uint8_t reserved[5]; /* reserved for the future use */
+ union {
+ struct {
+ int32_t abs_x; /* absolute X position, pixels */
+ int32_t abs_y; /* absolute Y position, pixels */
+ } pos;
+ struct {
+ uint32_t major; /* length of the major axis, pixels */
+ uint32_t minor; /* length of the minor axis, pixels */
+ } shape;
+ int16_t orientation; /* clockwise angle of the major axis */
+ } u;
+};
+
+#define XENKBD_IN_EVENT_SIZE 40
+
+union xenkbd_in_event
+{
+ uint8_t type;
+ struct xenkbd_motion motion;
+ struct xenkbd_key key;
+ struct xenkbd_position pos;
+ struct xenkbd_mtouch mtouch;
+ char pad[XENKBD_IN_EVENT_SIZE];
+};
+
+/*
+ *****************************************************************************
+ * Frontend to backend events
+ *****************************************************************************
+ *
+ * Out events may be sent only when requested by backend, and receipt
+ * of an unknown out event is an error.
+ * No out events currently defined.
+
+ * All event packets have the same length (40 octets)
+ * All event packets have common header:
+ * 0 octet
+ * +-----------------+
+ * | type |
+ * +-----------------+
+ * type - uint8_t, event code
+ */
+
+#define XENKBD_OUT_EVENT_SIZE 40
+
+union xenkbd_out_event
+{
+ uint8_t type;
+ char pad[XENKBD_OUT_EVENT_SIZE];
+};
+
+/*
+ *****************************************************************************
+ * Shared page
+ *****************************************************************************
+ */
+
+#define XENKBD_IN_RING_SIZE 2048
+#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
+#define XENKBD_IN_RING_OFFS 1024
+#define XENKBD_IN_RING(page) \
+ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
+#define XENKBD_IN_RING_REF(page, idx) \
+ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
+
+#define XENKBD_OUT_RING_SIZE 1024
+#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
+#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
+#define XENKBD_OUT_RING(page) \
+ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
+#define XENKBD_OUT_RING_REF(page, idx) \
+ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
+
+struct xenkbd_page
+{
+ uint32_t in_cons, in_prod;
+ uint32_t out_cons, out_prod;
+};
+
+#endif /* __XEN_PUBLIC_IO_KBDIF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/netif.h b/include/hw/xen/interface/io/netif.h
new file mode 100644
index 0000000000..c13b85061d
--- /dev/null
+++ b/include/hw/xen/interface/io/netif.h
@@ -0,0 +1,1091 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * netif.h
+ *
+ * Unified network-device I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_NETIF_H__
+#define __XEN_PUBLIC_IO_NETIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Older implementation of Xen network frontend / backend has an
+ * implicit dependency on the MAX_SKB_FRAGS as the maximum number of
+ * ring slots a skb can use. Netfront / netback may not work as
+ * expected when frontend and backend have different MAX_SKB_FRAGS.
+ *
+ * A better approach is to add mechanism for netfront / netback to
+ * negotiate this value. However we cannot fix all possible
+ * frontends, so we need to define a value which states the minimum
+ * slots backend must support.
+ *
+ * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
+ * (18), which is proved to work with most frontends. Any new backend
+ * which doesn't negotiate with frontend should expect frontend to
+ * send a valid packet using slots up to this value.
+ */
+#define XEN_NETIF_NR_SLOTS_MIN 18
+
+/*
+ * Notifications after enqueuing any type of message should be conditional on
+ * the appropriate req_event or rsp_event field in the shared ring.
+ * If the client sends notification for rx requests then it should specify
+ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
+ * that it cannot safely queue packets (as it may not be kicked to send them).
+ */
+
+/*
+ * "feature-split-event-channels" is introduced to separate guest TX
+ * and RX notification. Backend either doesn't support this feature or
+ * advertises it via xenstore as 0 (disabled) or 1 (enabled).
+ *
+ * To make use of this feature, frontend should allocate two event
+ * channels for TX and RX, advertise them to backend as
+ * "event-channel-tx" and "event-channel-rx" respectively. If frontend
+ * doesn't want to use this feature, it just writes "event-channel"
+ * node as before.
+ */
+
+/*
+ * Multiple transmit and receive queues:
+ * If supported, the backend will write the key "multi-queue-max-queues" to
+ * the directory for that vif, and set its value to the maximum supported
+ * number of queues.
+ * Frontends that are aware of this feature and wish to use it can write the
+ * key "multi-queue-num-queues", set to the number they wish to use, which
+ * must be greater than zero, and no more than the value reported by the backend
+ * in "multi-queue-max-queues".
+ *
+ * Queues replicate the shared rings and event channels.
+ * "feature-split-event-channels" may optionally be used when using
+ * multiple queues, but is not mandatory.
+ *
+ * Each queue consists of one shared ring pair, i.e. there must be the same
+ * number of tx and rx rings.
+ *
+ * For frontends requesting just one queue, the usual event-channel and
+ * ring-ref keys are written as before, simplifying the backend processing
+ * to avoid distinguishing between a frontend that doesn't understand the
+ * multi-queue feature, and one that does, but requested only one queue.
+ *
+ * Frontends requesting two or more queues must not write the toplevel
+ * event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys,
+ * instead writing those keys under sub-keys having the name "queue-N" where
+ * N is the integer ID of the queue for which those keys belong. Queues
+ * are indexed from zero. For example, a frontend with two queues and split
+ * event channels must write the following set of queue-related keys:
+ *
+ * /local/domain/1/device/vif/0/multi-queue-num-queues = "2"
+ * /local/domain/1/device/vif/0/queue-0 = ""
+ * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>"
+ * /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>"
+ * /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>"
+ * /local/domain/1/device/vif/0/queue-0/event-channel-rx = "<evtchn-rx0>"
+ * /local/domain/1/device/vif/0/queue-1 = ""
+ * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>"
+ * /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1"
+ * /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>"
+ * /local/domain/1/device/vif/0/queue-1/event-channel-rx = "<evtchn-rx1>"
+ *
+ * If there is any inconsistency in the XenStore data, the backend may
+ * choose not to connect any queues, instead treating the request as an
+ * error. This includes scenarios where more (or fewer) queues were
+ * requested than the frontend provided details for.
+ *
+ * Mapping of packets to queues is considered to be a function of the
+ * transmitting system (backend or frontend) and is not negotiated
+ * between the two. Guests are free to transmit packets on any queue
+ * they choose, provided it has been set up correctly. Guests must be
+ * prepared to receive packets on any queue they have requested be set up.
+ */
+
+/*
+ * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
+ * offload off or on. If it is missing then the feature is assumed to be on.
+ * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
+ * offload on or off. If it is missing then the feature is assumed to be off.
+ */
+
+/*
+ * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to
+ * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither
+ * frontends nor backends are assumed to be capable unless the flags are
+ * present.
+ */
+
+/*
+ * "feature-multicast-control" and "feature-dynamic-multicast-control"
+ * advertise the capability to filter ethernet multicast packets in the
+ * backend. If the frontend wishes to take advantage of this feature then
+ * it may set "request-multicast-control". If the backend only advertises
+ * "feature-multicast-control" then "request-multicast-control" must be set
+ * before the frontend moves into the connected state. The backend will
+ * sample the value on this state transition and any subsequent change in
+ * value will have no effect. However, if the backend also advertises
+ * "feature-dynamic-multicast-control" then "request-multicast-control"
+ * may be set by the frontend at any time. In this case, the backend will
+ * watch the value and re-sample on watch events.
+ *
+ * If the sampled value of "request-multicast-control" is set then the
+ * backend transmit side should no longer flood multicast packets to the
+ * frontend, it should instead drop any multicast packet that does not
+ * match in a filter list.
+ * The list is amended by the frontend by sending dummy transmit requests
+ * containing XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as
+ * specified below.
+ * Note that the filter list may be amended even if the sampled value of
+ * "request-multicast-control" is not set, however the filter should only
+ * be applied if it is set.
+ */
+
+/*
+ * The setting of "trusted" node to "0" in the frontend path signals that the
+ * frontend should not trust the backend, and should deploy whatever measures
+ * available to protect from a malicious backend on the other end.
+ */
+
+/*
+ * Control ring
+ * ============
+ *
+ * Some features, such as hashing (detailed below), require a
+ * significant amount of out-of-band data to be passed from frontend to
+ * backend. Use of xenstore is not suitable for large quantities of data
+ * because of quota limitations and so a dedicated 'control ring' is used.
+ * The ability of the backend to use a control ring is advertised by
+ * setting:
+ *
+ * /local/domain/X/backend/vif/<domid>/<vif>/feature-ctrl-ring = "1"
+ *
+ * The frontend provides a control ring to the backend by setting:
+ *
+ * /local/domain/<domid>/device/vif/<vif>/ctrl-ring-ref = <gref>
+ * /local/domain/<domid>/device/vif/<vif>/event-channel-ctrl = <port>
+ *
+ * where <gref> is the grant reference of the shared page used to
+ * implement the control ring and <port> is an event channel to be used
+ * as a mailbox interrupt. These keys must be set before the frontend
+ * moves into the connected state.
+ *
+ * The control ring uses a fixed request/response message size and is
+ * balanced (i.e. one request to one response), so operationally it is much
+ * the same as a transmit or receive ring.
+ * Note that there is no requirement that responses are issued in the same
+ * order as requests.
+ */
+
+/*
+ * Link state
+ * ==========
+ *
+ * The backend can advertise its current link (carrier) state to the
+ * frontend using the /local/domain/X/backend/vif/<domid>/<vif>/carrier
+ * node. If this node is not present, then the frontend should assume that
+ * the link is up (for compatibility with backends that do not implement
+ * this feature). If this node is present, then a value of "0" should be
+ * interpreted by the frontend as the link being down (no carrier) and a
+ * value of "1" should be interpreted as the link being up (carrier
+ * present).
+ */
+
+/*
+ * MTU
+ * ===
+ *
+ * The toolstack may set a value of MTU for the frontend by setting the
+ * /local/domain/<domid>/device/vif/<vif>/mtu node with the MTU value in
+ * octets. If this node is absent the frontend should assume an MTU value
+ * of 1500 octets. A frontend is also at liberty to ignore this value so
+ * it is only suitable for informing the frontend that a packet payload
+ * >1500 octets is permitted.
+ */
+
+/*
+ * Hash types
+ * ==========
+ *
+ * For the purposes of the definitions below, 'Packet[]' is an array of
+ * octets containing an IP packet without options, 'Array[X..Y]' means a
+ * sub-array of 'Array' containing bytes X thru Y inclusive, and '+' is
+ * used to indicate concatenation of arrays.
+ */
+
+/*
+ * A hash calculated over an IP version 4 header as follows:
+ *
+ * Buffer[0..8] = Packet[12..15] (source address) +
+ * Packet[16..19] (destination address)
+ *
+ * Result = Hash(Buffer, 8)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4 0
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV4 \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4)
+
+/*
+ * A hash calculated over an IP version 4 header and TCP header as
+ * follows:
+ *
+ * Buffer[0..12] = Packet[12..15] (source address) +
+ * Packet[16..19] (destination address) +
+ * Packet[20..21] (source port) +
+ * Packet[22..23] (destination port)
+ *
+ * Result = Hash(Buffer, 12)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP 1
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
+
+/*
+ * A hash calculated over an IP version 6 header as follows:
+ *
+ * Buffer[0..32] = Packet[8..23] (source address ) +
+ * Packet[24..39] (destination address)
+ *
+ * Result = Hash(Buffer, 32)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6 2
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV6 \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6)
+
+/*
+ * A hash calculated over an IP version 6 header and TCP header as
+ * follows:
+ *
+ * Buffer[0..36] = Packet[8..23] (source address) +
+ * Packet[24..39] (destination address) +
+ * Packet[40..41] (source port) +
+ * Packet[42..43] (destination port)
+ *
+ * Result = Hash(Buffer, 36)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP 3
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
+
+/*
+ * Hash algorithms
+ * ===============
+ */
+
+#define XEN_NETIF_CTRL_HASH_ALGORITHM_NONE 0
+
+/*
+ * Toeplitz hash:
+ */
+
+#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1
+
+/*
+ * This algorithm uses a 'key' as well as the data buffer itself.
+ * (Buffer[] and Key[] are treated as shift-registers where the MSB of
+ * Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1]
+ * is the 'right-most').
+ *
+ * Value = 0
+ * For number of bits in Buffer[]
+ * If (left-most bit of Buffer[] is 1)
+ * Value ^= left-most 32 bits of Key[]
+ * Key[] << 1
+ * Buffer[] << 1
+ *
+ * The code below is provided for convenience where an operating system
+ * does not already provide an implementation.
+ */
+#ifdef XEN_NETIF_DEFINE_TOEPLITZ
+static uint32_t xen_netif_toeplitz_hash(const uint8_t *key,
+ unsigned int keylen,
+ const uint8_t *buf,
+ unsigned int buflen)
+{
+ unsigned int keyi, bufi;
+ uint64_t prefix = 0;
+ uint64_t hash = 0;
+
+ /* Pre-load prefix with the first 8 bytes of the key */
+ for (keyi = 0; keyi < 8; keyi++) {
+ prefix <<= 8;
+ prefix |= (keyi < keylen) ? key[keyi] : 0;
+ }
+
+ for (bufi = 0; bufi < buflen; bufi++) {
+ uint8_t byte = buf[bufi];
+ unsigned int bit;
+
+ for (bit = 0; bit < 8; bit++) {
+ if (byte & 0x80)
+ hash ^= prefix;
+ prefix <<= 1;
+ byte <<=1;
+ }
+
+ /*
+ * 'prefix' has now been left-shifted by 8, so
+ * OR in the next byte.
+ */
+ prefix |= (keyi < keylen) ? key[keyi] : 0;
+ keyi++;
+ }
+
+ /* The valid part of the hash is in the upper 32 bits. */
+ return hash >> 32;
+}
+#endif /* XEN_NETIF_DEFINE_TOEPLITZ */
+
+/*
+ * Control requests (struct xen_netif_ctrl_request)
+ * ================================================
+ *
+ * All requests have the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | type | data[0] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | data[1] | data[2] |
+ * +-----+-----+-----+-----+-----------------------+
+ *
+ * id: the request identifier, echoed in response.
+ * type: the type of request (see below)
+ * data[]: any data associated with the request (determined by type)
+ */
+
+struct xen_netif_ctrl_request {
+ uint16_t id;
+ uint16_t type;
+
+#define XEN_NETIF_CTRL_TYPE_INVALID 0
+#define XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS 1
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS 2
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_KEY 3
+#define XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE 4
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE 5
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING 6
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM 7
+#define XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE 8
+#define XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING 9
+#define XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING 10
+
+ uint32_t data[3];
+};
+
+/*
+ * Control responses (struct xen_netif_ctrl_response)
+ * ==================================================
+ *
+ * All responses have the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | type | status |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | data |
+ * +-----+-----+-----+-----+
+ *
+ * id: the corresponding request identifier
+ * type: the type of the corresponding request
+ * status: the status of request processing
+ * data: any data associated with the response (determined by type and
+ * status)
+ */
+
+struct xen_netif_ctrl_response {
+ uint16_t id;
+ uint16_t type;
+ uint32_t status;
+
+#define XEN_NETIF_CTRL_STATUS_SUCCESS 0
+#define XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED 1
+#define XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER 2
+#define XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW 3
+
+ uint32_t data;
+};
+
+/*
+ * Static Grants (struct xen_netif_gref)
+ * =====================================
+ *
+ * A frontend may provide a fixed set of grant references to be mapped on
+ * the backend. The message of type XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
+ * prior its usage in the command ring allows for creation of these mappings.
+ * The backend will maintain a fixed amount of these mappings.
+ *
+ * XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE lets a frontend query how many
+ * of these mappings can be kept.
+ *
+ * Each entry in the XEN_NETIF_CTRL_TYPE_{ADD,DEL}_GREF_MAPPING input table has
+ * the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | grant ref | flags | status |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * grant ref: grant reference (IN)
+ * flags: flags describing the control operation (IN)
+ * status: XEN_NETIF_CTRL_STATUS_* (OUT)
+ *
+ * 'status' is an output parameter which does not require to be set to zero
+ * prior to its usage in the corresponding control messages.
+ */
+
+struct xen_netif_gref {
+ grant_ref_t ref;
+ uint16_t flags;
+
+#define _XEN_NETIF_CTRLF_GREF_readonly 0
+#define XEN_NETIF_CTRLF_GREF_readonly (1U<<_XEN_NETIF_CTRLF_GREF_readonly)
+
+ uint16_t status;
+};
+
+/*
+ * Control messages
+ * ================
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
+ * --------------------------------------
+ *
+ * This is sent by the frontend to set the desired hash algorithm.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
+ * data[0] = a XEN_NETIF_CTRL_HASH_ALGORITHM_* value
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The algorithm is not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ *
+ * NOTE: Setting data[0] to XEN_NETIF_CTRL_HASH_ALGORITHM_NONE disables
+ * hashing and the backend is free to choose how it steers packets
+ * to queues (which is the default behaviour).
+ *
+ * XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
+ * ----------------------------------
+ *
+ * This is sent by the frontend to query the types of hash supported by
+ * the backend.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
+ * data[0] = 0
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = supported hash types (if operation was successful)
+ *
+ * NOTE: A valid hash algorithm must be selected before this operation can
+ * succeed.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
+ * ----------------------------------
+ *
+ * This is sent by the frontend to set the types of hash that the backend
+ * should calculate. (See above for hash type definitions).
+ * Note that the 'maximal' type of hash should always be chosen. For
+ * example, if the frontend sets both IPV4 and IPV4_TCP hash types then
+ * the latter hash type should be calculated for any TCP packet and the
+ * former only calculated for non-TCP packets.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
+ * data[0] = bitwise OR of XEN_NETIF_CTRL_HASH_TYPE_* values
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - One or more flag
+ * value is invalid or
+ * unsupported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: A valid hash algorithm must be selected before this operation can
+ * succeed.
+ * Also, setting data[0] to zero disables hashing and the backend
+ * is free to choose how it steers packets to queues.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
+ * --------------------------------
+ *
+ * This is sent by the frontend to set the key of the hash if the algorithm
+ * requires it. (See hash algorithms above).
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
+ * data[0] = grant reference of page containing the key (assumed to
+ * start at beginning of grant)
+ * data[1] = size of key in octets
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Key size is invalid
+ * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Key size is larger
+ * than the backend
+ * supports
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: Any key octets not specified are assumed to be zero (the key
+ * is assumed to be empty by default) and specifying a new key
+ * invalidates any previous key, hence specifying a key size of
+ * zero will clear the key (which ensures that the calculated hash
+ * will always be zero).
+ * The maximum size of key is algorithm and backend specific, but
+ * is also limited by the single grant reference.
+ * The grant reference may be read-only and must remain valid until
+ * the response has been processed.
+ *
+ * XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
+ * -----------------------------------------
+ *
+ * This is sent by the frontend to query the maximum size of mapping
+ * table supported by the backend. The size is specified in terms of
+ * table entries.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
+ * data[0] = 0
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = maximum number of entries allowed in the mapping table
+ * (if operation was successful) or zero if a mapping table is
+ * not supported (i.e. hash mapping is done only by modular
+ * arithmetic).
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * -------------------------------------
+ *
+ * This is sent by the frontend to set the actual size of the mapping
+ * table to be used by the backend. The size is specified in terms of
+ * table entries.
+ * Any previous table is invalidated by this message and any new table
+ * is assumed to be zero filled.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * data[0] = number of entries in mapping table
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size is invalid
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: Setting data[0] to 0 means that hash mapping should be done
+ * using modular arithmetic.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
+ * ------------------------------------
+ *
+ * This is sent by the frontend to set the content of the table mapping
+ * hash value to queue number. The backend should calculate the hash from
+ * the packet header, use it as an index into the table (modulo the size
+ * of the table) and then steer the packet to the queue number found at
+ * that index.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
+ * data[0] = grant reference of page containing the mapping (sub-)table
+ * (assumed to start at beginning of grant)
+ * data[1] = size of (sub-)table in entries
+ * data[2] = offset, in entries, of sub-table within overall table
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size or content
+ * is invalid
+ * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Table size is larger
+ * than the backend
+ * supports
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: The overall table has the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | mapping[0] | mapping[1] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | . |
+ * | . |
+ * | . |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | mapping[N-2] | mapping[N-1] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * where N is specified by a XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * message and each mapping must specifies a queue between 0 and
+ * "multi-queue-num-queues" (see above).
+ * The backend may support a mapping table larger than can be
+ * mapped by a single grant reference. Thus sub-tables within a
+ * larger table can be individually set by sending multiple messages
+ * with differing offset values. Specifying a new sub-table does not
+ * invalidate any table data outside that range.
+ * The grant reference may be read-only and must remain valid until
+ * the response has been processed.
+ *
+ * XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE
+ * -----------------------------------------
+ *
+ * This is sent by the frontend to fetch the number of grefs that can be kept
+ * mapped in the backend.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE
+ * data[0] = queue index (assumed 0 for single queue)
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The queue index is
+ * out of range
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = maximum number of entries allowed in the gref mapping table
+ * (if operation was successful) or zero if it is not supported.
+ *
+ * XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
+ * ------------------------------------
+ *
+ * This is sent by the frontend for backend to map a list of grant
+ * references.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
+ * data[0] = queue index
+ * data[1] = grant reference of page containing the mapping list
+ * (r/w and assumed to start at beginning of page)
+ * data[2] = size of list in entries
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Operation failed
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ *
+ * NOTE: Each entry in the input table has the format outlined
+ * in struct xen_netif_gref.
+ * Contrary to XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING, the struct
+ * xen_netif_gref 'status' field is not used and therefore the response
+ * 'status' determines the success of this operation. In case of
+ * failure none of grants mappings get added in the backend.
+ *
+ * XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING
+ * ------------------------------------
+ *
+ * This is sent by the frontend for backend to unmap a list of grant
+ * references.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING
+ * data[0] = queue index
+ * data[1] = grant reference of page containing the mapping list
+ * (r/w and assumed to start at beginning of page)
+ * data[2] = size of list in entries
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Operation failed
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = number of entries that were unmapped
+ *
+ * NOTE: Each entry in the input table has the format outlined in struct
+ * xen_netif_gref.
+ * The struct xen_netif_gref 'status' field determines if the entry
+ * was successfully removed.
+ * The entries used are only the ones representing grant references that
+ * were previously the subject of a XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
+ * operation. Any other entries will have their status set to
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER upon completion.
+ */
+
+DEFINE_RING_TYPES(xen_netif_ctrl,
+ struct xen_netif_ctrl_request,
+ struct xen_netif_ctrl_response);
+
+/*
+ * Guest transmit
+ * ==============
+ *
+ * This is the 'wire' format for transmit (frontend -> backend) packets:
+ *
+ * Fragment 1: netif_tx_request_t - flags = NETTXF_*
+ * size = total packet size
+ * [Extra 1: netif_extra_info_t] - (only if fragment 1 flags include
+ * NETTXF_extra_info)
+ * ...
+ * [Extra N: netif_extra_info_t] - (only if extra N-1 flags include
+ * XEN_NETIF_EXTRA_MORE)
+ * ...
+ * Fragment N: netif_tx_request_t - (only if fragment N-1 flags include
+ * NETTXF_more_data - flags on preceding
+ * extras are not relevant here)
+ * flags = 0
+ * size = fragment size
+ *
+ * NOTE:
+ *
+ * This format slightly is different from that used for receive
+ * (backend -> frontend) packets. Specifically, in a multi-fragment
+ * packet the actual size of fragment 1 can only be determined by
+ * subtracting the sizes of fragments 2..N from the total packet size.
+ *
+ * Ring slot size is 12 octets, however not all request/response
+ * structs use the full size.
+ *
+ * tx request data (netif_tx_request_t)
+ * ------------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | grant ref | offset | flags |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | size |
+ * +-----+-----+-----+-----+
+ *
+ * grant ref: Reference to buffer page.
+ * offset: Offset within buffer page.
+ * flags: NETTXF_*.
+ * id: request identifier, echoed in response.
+ * size: packet size in bytes.
+ *
+ * tx response (netif_tx_response_t)
+ * ---------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | status | unused |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | unused |
+ * +-----+-----+-----+-----+
+ *
+ * id: reflects id in transmit request
+ * status: NETIF_RSP_*
+ *
+ * Guest receive
+ * =============
+ *
+ * This is the 'wire' format for receive (backend -> frontend) packets:
+ *
+ * Fragment 1: netif_rx_request_t - flags = NETRXF_*
+ * size = fragment size
+ * [Extra 1: netif_extra_info_t] - (only if fragment 1 flags include
+ * NETRXF_extra_info)
+ * ...
+ * [Extra N: netif_extra_info_t] - (only if extra N-1 flags include
+ * XEN_NETIF_EXTRA_MORE)
+ * ...
+ * Fragment N: netif_rx_request_t - (only if fragment N-1 flags include
+ * NETRXF_more_data - flags on preceding
+ * extras are not relevant here)
+ * flags = 0
+ * size = fragment size
+ *
+ * NOTE:
+ *
+ * This format slightly is different from that used for transmit
+ * (frontend -> backend) packets. Specifically, in a multi-fragment
+ * packet the size of the packet can only be determined by summing the
+ * sizes of fragments 1..N.
+ *
+ * Ring slot size is 8 octets.
+ *
+ * rx request (netif_rx_request_t)
+ * -------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | pad | gref |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * id: request identifier, echoed in response.
+ * gref: reference to incoming granted frame.
+ *
+ * rx response (netif_rx_response_t)
+ * ---------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | offset | flags | status |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * id: reflects id in receive request
+ * offset: offset in page of start of received packet
+ * flags: NETRXF_*
+ * status: -ve: NETIF_RSP_*; +ve: Rx'ed pkt size.
+ *
+ * NOTE: Historically, to support GSO on the frontend receive side, Linux
+ * netfront does not make use of the rx response id (because, as
+ * described below, extra info structures overlay the id field).
+ * Instead it assumes that responses always appear in the same ring
+ * slot as their corresponding request. Thus, to maintain
+ * compatibility, backends must make sure this is the case.
+ *
+ * Extra Info
+ * ==========
+ *
+ * Can be present if initial request or response has NET{T,R}XF_extra_info,
+ * or previous extra request has XEN_NETIF_EXTRA_MORE.
+ *
+ * The struct therefore needs to fit into either a tx or rx slot and
+ * is therefore limited to 8 octets.
+ *
+ * NOTE: Because extra info data overlays the usual request/response
+ * structures, there is no id information in the opposite direction.
+ * So, if an extra info overlays an rx response the frontend can
+ * assume that it is in the same ring slot as the request that was
+ * consumed to make the slot available, and the backend must ensure
+ * this assumption is true.
+ *
+ * extra info (netif_extra_info_t)
+ * -------------------------------
+ *
+ * General format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| type specific data |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | padding for tx |
+ * +-----+-----+-----+-----+
+ *
+ * type: XEN_NETIF_EXTRA_TYPE_*
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * padding for tx: present only in the tx case due to 8 octet limit
+ * from rx case. Not shown in type specific entries
+ * below.
+ *
+ * XEN_NETIF_EXTRA_TYPE_GSO:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| size |type | pad | features |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * type: Must be XEN_NETIF_EXTRA_TYPE_GSO
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * size: Maximum payload size of each segment. For example,
+ * for TCP this is just the path MSS.
+ * type: XEN_NETIF_GSO_TYPE_*: This determines the protocol of
+ * the packet and any extra features required to segment the
+ * packet properly.
+ * features: EN_NETIF_GSO_FEAT_*: This specifies any extra GSO
+ * features required to process this packet, such as ECN
+ * support for TCPv4.
+ *
+ * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| addr |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * addr: address to add/remove
+ *
+ * XEN_NETIF_EXTRA_TYPE_HASH:
+ *
+ * A backend that supports teoplitz hashing is assumed to accept
+ * this type of extra info in transmit packets.
+ * A frontend that enables hashing is assumed to accept
+ * this type of extra info in receive packets.
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags|htype| alg |LSB ---- value ---- MSB|
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * type: Must be XEN_NETIF_EXTRA_TYPE_HASH
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * htype: Hash type (one of _XEN_NETIF_CTRL_HASH_TYPE_* - see above)
+ * alg: The algorithm used to calculate the hash (one of
+ * XEN_NETIF_CTRL_HASH_TYPE_ALGORITHM_* - see above)
+ * value: Hash value
+ */
+
+/* Protocol checksum field is blank in the packet (hardware offload)? */
+#define _NETTXF_csum_blank (0)
+#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
+
+/* Packet data has been validated against protocol checksum. */
+#define _NETTXF_data_validated (1)
+#define NETTXF_data_validated (1U<<_NETTXF_data_validated)
+
+/* Packet continues in the next request descriptor. */
+#define _NETTXF_more_data (2)
+#define NETTXF_more_data (1U<<_NETTXF_more_data)
+
+/* Packet to be followed by extra descriptor(s). */
+#define _NETTXF_extra_info (3)
+#define NETTXF_extra_info (1U<<_NETTXF_extra_info)
+
+#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
+struct netif_tx_request {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t flags;
+ uint16_t id;
+ uint16_t size;
+};
+typedef struct netif_tx_request netif_tx_request_t;
+
+/* Types of netif_extra_info descriptors. */
+#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
+#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
+#define XEN_NETIF_EXTRA_TYPE_MAX (5)
+
+/* netif_extra_info_t flags. */
+#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
+#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
+
+/* GSO types */
+#define XEN_NETIF_GSO_TYPE_NONE (0)
+#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
+#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
+
+/*
+ * This structure needs to fit within both netif_tx_request_t and
+ * netif_rx_response_t for compatibility.
+ */
+struct netif_extra_info {
+ uint8_t type;
+ uint8_t flags;
+ union {
+ struct {
+ uint16_t size;
+ uint8_t type;
+ uint8_t pad;
+ uint16_t features;
+ } gso;
+ struct {
+ uint8_t addr[6];
+ } mcast;
+ struct {
+ uint8_t type;
+ uint8_t algorithm;
+ uint8_t value[4];
+ } hash;
+ uint16_t pad[3];
+ } u;
+};
+typedef struct netif_extra_info netif_extra_info_t;
+
+struct netif_tx_response {
+ uint16_t id;
+ int16_t status;
+};
+typedef struct netif_tx_response netif_tx_response_t;
+
+struct netif_rx_request {
+ uint16_t id; /* Echoed in response message. */
+ uint16_t pad;
+ grant_ref_t gref;
+};
+typedef struct netif_rx_request netif_rx_request_t;
+
+/* Packet data has been validated against protocol checksum. */
+#define _NETRXF_data_validated (0)
+#define NETRXF_data_validated (1U<<_NETRXF_data_validated)
+
+/* Protocol checksum field is blank in the packet (hardware offload)? */
+#define _NETRXF_csum_blank (1)
+#define NETRXF_csum_blank (1U<<_NETRXF_csum_blank)
+
+/* Packet continues in the next request descriptor. */
+#define _NETRXF_more_data (2)
+#define NETRXF_more_data (1U<<_NETRXF_more_data)
+
+/* Packet to be followed by extra descriptor(s). */
+#define _NETRXF_extra_info (3)
+#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
+
+/* Packet has GSO prefix. Deprecated but included for compatibility */
+#define _NETRXF_gso_prefix (4)
+#define NETRXF_gso_prefix (1U<<_NETRXF_gso_prefix)
+
+struct netif_rx_response {
+ uint16_t id;
+ uint16_t offset;
+ uint16_t flags;
+ int16_t status;
+};
+typedef struct netif_rx_response netif_rx_response_t;
+
+/*
+ * Generate netif ring structures and types.
+ */
+
+DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
+DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
+
+#define NETIF_RSP_DROPPED -2
+#define NETIF_RSP_ERROR -1
+#define NETIF_RSP_OKAY 0
+/* No response: used for auxiliary requests (e.g., netif_extra_info_t). */
+#define NETIF_RSP_NULL 1
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/protocols.h b/include/hw/xen/interface/io/protocols.h
new file mode 100644
index 0000000000..7815e1ff0f
--- /dev/null
+++ b/include/hw/xen/interface/io/protocols.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * protocols.h
+ *
+ * Copyright (c) 2008, Keir Fraser
+ */
+
+#ifndef __XEN_PROTOCOLS_H__
+#define __XEN_PROTOCOLS_H__
+
+#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
+#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
+#define XEN_IO_PROTO_ABI_ARM "arm-abi"
+
+#if defined(__i386__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
+#elif defined(__x86_64__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
+#elif defined(__arm__) || defined(__aarch64__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM
+#else
+# error arch fixup needed here
+#endif
+
+#endif
diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/interface/io/ring.h
index 1adacf09f9..025939278b 100644
--- a/include/hw/xen/io/ring.h
+++ b/include/hw/xen/interface/io/ring.h
@@ -1,25 +1,8 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* ring.h
- *
- * Shared producer-consumer ring macros.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * Shared producer-consumer ring macros.
*
* Tim Deegan and Andrew Warfield November 2004.
*/
@@ -42,6 +25,8 @@
* and grant_table.h from the Xen public headers.
*/
+#include "../xen-compat.h"
+
#if __XEN_INTERFACE_VERSION__ < 0x00030208
#define xen_mb() mb()
#define xen_rmb() rmb()
@@ -60,12 +45,12 @@ typedef unsigned int RING_IDX;
/*
* Calculate size of a shared ring, given the total available space for the
* ring and indexes (_sz), and the name tag of the request/response structure.
- * A ring contains as many entries as will fit, rounded down to the nearest
+ * A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
#define __CONST_RING_SIZE(_s, _sz) \
(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
- sizeof_field(struct _s##_sring, ring[0])))
+ sizeof(((struct _s##_sring *)0)->ring[0])))
/*
* The same for passing in an actual pointer instead of a name tag.
*/
@@ -74,7 +59,7 @@ typedef unsigned int RING_IDX;
/*
* Macros to make the correct C datatypes for a new kind of ring.
- *
+ *
* To make a new ring datatype, you need to have two message structures,
* let's say request_t, and response_t already defined.
*
@@ -84,7 +69,7 @@ typedef unsigned int RING_IDX;
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
- *
+ *
* mytag_sring_t - The shared ring.
* mytag_front_ring_t - The 'front' half of the ring.
* mytag_back_ring_t - The 'back' half of the ring.
@@ -93,9 +78,8 @@ typedef unsigned int RING_IDX;
* of the shared memory area (PAGE_SIZE, for instance). To initialise
* the front half:
*
- * mytag_front_ring_t front_ring;
- * SHARED_RING_INIT((mytag_sring_t *)shared_page);
- * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
+ * mytag_front_ring_t ring;
+ * XEN_FRONT_RING_INIT(&ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*
* Initializing the back follows similarly (note that only the front
* initializes the shared ring):
@@ -152,15 +136,15 @@ typedef struct __name##_back_ring __name##_back_ring_t
/*
* Macros for manipulating rings.
- *
- * FRONT_RING_whatever works on the "front end" of a ring: here
+ *
+ * FRONT_RING_whatever works on the "front end" of a ring: here
* requests are pushed on to the ring and responses taken off it.
- *
- * BACK_RING_whatever works on the "back end" of a ring: here
+ *
+ * BACK_RING_whatever works on the "back end" of a ring: here
* requests are taken off the ring and responses put on.
- *
- * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
- * This is OK in 1-for-1 request-response situations where the
+ *
+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
+ * This is OK in 1-for-1 request-response situations where the
* requestor (front end) never has more than RING_SIZE()-1
* outstanding requests.
*/
@@ -173,20 +157,29 @@ typedef struct __name##_back_ring __name##_back_ring_t
(void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
-#define FRONT_RING_INIT(_r, _s, __size) do { \
- (_r)->req_prod_pvt = 0; \
- (_r)->rsp_cons = 0; \
+#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->req_prod_pvt = (_i); \
+ (_r)->rsp_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
-#define BACK_RING_INIT(_r, _s, __size) do { \
- (_r)->rsp_prod_pvt = 0; \
- (_r)->req_cons = 0; \
+#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
+
+#define XEN_FRONT_RING_INIT(r, s, size) do { \
+ SHARED_RING_INIT(s); \
+ FRONT_RING_INIT(r, s, size); \
+} while (0)
+
+#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->rsp_prod_pvt = (_i); \
+ (_r)->req_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
+#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
+
/* How big is this ring? */
#define RING_SIZE(_r) \
((_r)->nr_ents)
@@ -202,11 +195,11 @@ typedef struct __name##_back_ring __name##_back_ring_t
(RING_FREE_REQUESTS(_r) == 0)
/* Test if there are outstanding messages to be processed on a ring. */
-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+#define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \
((_r)->sring->rsp_prod - (_r)->rsp_cons)
#ifdef __GNUC__
-#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
+#define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \
unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
unsigned int rsp = RING_SIZE(_r) - \
((_r)->req_cons - (_r)->rsp_prod_pvt); \
@@ -214,33 +207,50 @@ typedef struct __name##_back_ring __name##_back_ring_t
})
#else
/* Same as above, but without the nice GCC ({ ... }) syntax. */
-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+#define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) \
((((_r)->sring->req_prod - (_r)->req_cons) < \
(RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
((_r)->sring->req_prod - (_r)->req_cons) : \
(RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
#endif
+#ifdef XEN_RING_HAS_UNCONSUMED_IS_BOOL
+/*
+ * These variants should only be used in case no caller is abusing them for
+ * obtaining the number of unconsumed responses/requests.
+ */
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+ (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r))
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+ (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r))
+#else
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) XEN_RING_NR_UNCONSUMED_RESPONSES(_r)
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) XEN_RING_NR_UNCONSUMED_REQUESTS(_r)
+#endif
+
/* Direct access to individual ring elements, by index. */
#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+#define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
/*
- * Get a local copy of a request.
+ * Get a local copy of a request/response.
*
- * Use this in preference to RING_GET_REQUEST() so all processing is
+ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
* done on a local copy that cannot be modified by the other end.
*
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
- * to be ineffective where _req is a struct which consists of only bitfields.
+ * to be ineffective where dest is a struct which consists of only bitfields.
*/
-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
- /* Use volatile to force the copy into _req. */ \
- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
+#define RING_COPY_(type, r, idx, dest) do { \
+ /* Use volatile to force the copy into dest. */ \
+ *(dest) = *(volatile __typeof__(dest))RING_GET_##type(r, idx); \
} while (0)
-#define RING_GET_RESPONSE(_r, _idx) \
- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
+#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
/* Loop termination condition: Would the specified index overflow the ring? */
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
@@ -250,6 +260,10 @@ typedef struct __name##_back_ring __name##_back_ring_t
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+/* Ill-behaved backend determination: Can there be this many responses? */
+#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
+ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
+
#define RING_PUSH_REQUESTS(_r) do { \
xen_wmb(); /* back sees requests /before/ updated producer index */ \
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
@@ -262,26 +276,26 @@ typedef struct __name##_back_ring __name##_back_ring_t
/*
* Notification hold-off (req_event and rsp_event):
- *
+ *
* When queueing requests or responses on a shared ring, it may not always be
* necessary to notify the remote end. For example, if requests are in flight
* in a backend, the front may be able to queue further requests without
* notifying the back (if the back checks for new requests when it queues
* responses).
- *
+ *
* When enqueuing requests or responses:
- *
+ *
* Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
* is a boolean return value. True indicates that the receiver requires an
* asynchronous notification.
- *
+ *
* After dequeuing requests or responses (before sleeping the connection):
- *
+ *
* Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
* The second argument is a boolean return value. True indicates that there
* are pending messages on the ring (i.e., the connection should not be put
* to sleep).
- *
+ *
* These macros will set the req_event/rsp_event field to trigger a
* notification on the very next message that is enqueued. If you want to
* create batches of work (i.e., only receive a notification after several
diff --git a/include/hw/xen/interface/io/usbif.h b/include/hw/xen/interface/io/usbif.h
new file mode 100644
index 0000000000..875af0dc7c
--- /dev/null
+++ b/include/hw/xen/interface/io/usbif.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * usbif.h
+ *
+ * USB I/O interface for Xen guest OSes.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_USBIF_H__
+#define __XEN_PUBLIC_IO_USBIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Detailed Interface Description
+ * ==============================
+ * The pvUSB interface is using a split driver design: a frontend driver in
+ * the guest and a backend driver in a driver domain (normally dom0) having
+ * access to the physical USB device(s) being passed to the guest.
+ *
+ * The frontend and backend drivers use XenStore to initiate the connection
+ * between them, the I/O activity is handled via two shared ring pages and an
+ * event channel. As the interface between frontend and backend is at the USB
+ * host connector level, multiple (up to 31) physical USB devices can be
+ * handled by a single connection.
+ *
+ * The Xen pvUSB device name is "qusb", so the frontend's XenStore entries are
+ * to be found under "device/qusb", while the backend's XenStore entries are
+ * under "backend/<guest-dom-id>/qusb".
+ *
+ * When a new pvUSB connection is established, the frontend needs to setup the
+ * two shared ring pages for communication and the event channel. The ring
+ * pages need to be made available to the backend via the grant table
+ * interface.
+ *
+ * One of the shared ring pages is used by the backend to inform the frontend
+ * about USB device plug events (device to be added or removed). This is the
+ * "conn-ring".
+ *
+ * The other ring page is used for USB I/O communication (requests and
+ * responses). This is the "urb-ring".
+ *
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen pvUSB driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * num-ports
+ * Values: unsigned [1...31]
+ *
+ * Number of ports for this (virtual) USB host connector.
+ *
+ * usb-ver
+ * Values: unsigned [1...2]
+ *
+ * USB version of this host connector: 1 = USB 1.1, 2 = USB 2.0.
+ *
+ * port/[1...31]
+ * Values: string
+ *
+ * Physical USB device connected to the given port, e.g. "3-1.5".
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: unsigned
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * urb-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for urb requests.
+ *
+ * conn-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for connection/disconnection requests.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ *
+ * Protocol Description
+ * ====================
+ *
+ *-------------------------- USB device plug events --------------------------
+ *
+ * USB device plug events are send via the "conn-ring" shared page. As only
+ * events are being sent, the respective requests from the frontend to the
+ * backend are just dummy ones.
+ * The events sent to the frontend have the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | portnum | speed | 4
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, event id (taken from the actual frontend dummy request)
+ * portnum - uint8_t, port number (1 ... 31)
+ * speed - uint8_t, device USBIF_SPEED_*, USBIF_SPEED_NONE == unplug
+ *
+ * The dummy request:
+ * 0 1 octet
+ * +----------------+----------------+
+ * | id | 2
+ * +----------------+----------------+
+ * id - uint16_t, guest supplied value (no need for being unique)
+ *
+ *-------------------------- USB I/O request ---------------------------------
+ *
+ * A single USB I/O request on the "urb-ring" has the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | nr_buffer_segs | 4
+ * +----------------+----------------+----------------+----------------+
+ * | pipe | 8
+ * +----------------+----------------+----------------+----------------+
+ * | transfer_flags | buffer_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | request type specific | 16
+ * | data | 20
+ * +----------------+----------------+----------------+----------------+
+ * | seg[0] | 24
+ * | data | 28
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | seg[USBIF_MAX_SEGMENTS_PER_REQUEST - 1] | 144
+ * | data | 148
+ * +----------------+----------------+----------------+----------------+
+ * Bit field bit number 0 is always least significant bit, undefined bits must
+ * be zero.
+ * id - uint16_t, guest supplied value
+ * nr_buffer_segs - uint16_t, number of segment entries in seg[] array
+ * pipe - uint32_t, bit field with multiple information:
+ * bits 0-4: port request to send to
+ * bit 5: unlink request with specified id (cancel I/O) if set (see below)
+ * bit 7: direction (1 = read from device)
+ * bits 8-14: device number on port
+ * bits 15-18: endpoint of device
+ * bits 30-31: request type: 00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk
+ * transfer_flags - uint16_t, bit field with processing flags:
+ * bit 0: less data than specified allowed
+ * buffer_length - uint16_t, total length of data
+ * request type specific data - 8 bytes, see below
+ * seg[] - array with 8 byte elements, see below
+ *
+ * Request type specific data for isochronous request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | number_of_packets | nr_frame_desc_segs | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time interval in msecs between frames
+ * start_frame - uint16_t, start frame number
+ * number_of_packets - uint16_t, number of packets to transfer
+ * nr_frame_desc_segs - uint16_t number of seg[] frame descriptors elements
+ *
+ * Request type specific data for interrupt request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time in msecs until interruption
+ *
+ * Request type specific data for control request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | data of setup packet | 4
+ * | | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for bulk request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 4
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for unlink request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | unlink_id | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * unlink_id - uint16_t, request id of request to terminate
+ *
+ * seg[] array element layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref | 4
+ * +----------------+----------------+----------------+----------------+
+ * | offset | length | 8
+ * +----------------+----------------+----------------+----------------+
+ * gref - uint32_t, grant reference of buffer page
+ * offset - uint16_t, offset of buffer start in page
+ * length - uint16_t, length of buffer in page
+ *
+ *-------------------------- USB I/O response --------------------------------
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | actual_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | error_count | 16
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, id of the request this response belongs to
+ * start_frame - uint16_t, start_frame this response (iso requests only)
+ * status - int32_t, USBIF_STATUS_* (non-iso requests)
+ * actual_length - uint32_t, actual size of data transferred
+ * error_count - uint32_t, number of errors (iso requests)
+ */
+
+enum usb_spec_version {
+ USB_VER_UNKNOWN = 0,
+ USB_VER_USB11,
+ USB_VER_USB20,
+ USB_VER_USB30, /* not supported yet */
+};
+
+/*
+ * USB pipe in usbif_request
+ *
+ * - port number: bits 0-4
+ * (USB_MAXCHILDREN is 31)
+ *
+ * - operation flag: bit 5
+ * (0 = submit urb,
+ * 1 = unlink urb)
+ *
+ * - direction: bit 7
+ * (0 = Host-to-Device [Out]
+ * 1 = Device-to-Host [In])
+ *
+ * - device address: bits 8-14
+ *
+ * - endpoint: bits 15-18
+ *
+ * - pipe type: bits 30-31
+ * (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
+ */
+
+#define USBIF_PIPE_PORT_MASK 0x0000001f
+#define USBIF_PIPE_UNLINK 0x00000020
+#define USBIF_PIPE_DIR 0x00000080
+#define USBIF_PIPE_DEV_MASK 0x0000007f
+#define USBIF_PIPE_DEV_SHIFT 8
+#define USBIF_PIPE_EP_MASK 0x0000000f
+#define USBIF_PIPE_EP_SHIFT 15
+#define USBIF_PIPE_TYPE_MASK 0x00000003
+#define USBIF_PIPE_TYPE_SHIFT 30
+#define USBIF_PIPE_TYPE_ISOC 0
+#define USBIF_PIPE_TYPE_INT 1
+#define USBIF_PIPE_TYPE_CTRL 2
+#define USBIF_PIPE_TYPE_BULK 3
+
+#define usbif_pipeportnum(pipe) ((pipe) & USBIF_PIPE_PORT_MASK)
+#define usbif_setportnum_pipe(pipe, portnum) ((pipe) | (portnum))
+
+#define usbif_pipeunlink(pipe) ((pipe) & USBIF_PIPE_UNLINK)
+#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe))
+#define usbif_setunlink_pipe(pipe) ((pipe) | USBIF_PIPE_UNLINK)
+
+#define usbif_pipein(pipe) ((pipe) & USBIF_PIPE_DIR)
+#define usbif_pipeout(pipe) (!usbif_pipein(pipe))
+
+#define usbif_pipedevice(pipe) \
+ (((pipe) >> USBIF_PIPE_DEV_SHIFT) & USBIF_PIPE_DEV_MASK)
+
+#define usbif_pipeendpoint(pipe) \
+ (((pipe) >> USBIF_PIPE_EP_SHIFT) & USBIF_PIPE_EP_MASK)
+
+#define usbif_pipetype(pipe) \
+ (((pipe) >> USBIF_PIPE_TYPE_SHIFT) & USBIF_PIPE_TYPE_MASK)
+#define usbif_pipeisoc(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_ISOC)
+#define usbif_pipeint(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_INT)
+#define usbif_pipectrl(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_CTRL)
+#define usbif_pipebulk(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_BULK)
+
+#define USBIF_MAX_SEGMENTS_PER_REQUEST (16)
+#define USBIF_MAX_PORTNR 31
+#define USBIF_RING_SIZE 4096
+
+/*
+ * RING for transferring urbs.
+ */
+struct usbif_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+
+struct usbif_urb_request {
+ uint16_t id; /* request id */
+ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
+
+ /* basic urb parameter */
+ uint32_t pipe;
+ uint16_t transfer_flags;
+#define USBIF_SHORT_NOT_OK 0x0001
+ uint16_t buffer_length;
+ union {
+ uint8_t ctrl[8]; /* setup_packet (Ctrl) */
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t start_frame; /* start frame */
+ uint16_t number_of_packets; /* number of ISO packet */
+ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */
+ } isoc;
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t pad[3];
+ } intr;
+
+ struct {
+ uint16_t unlink_id; /* unlink request id */
+ uint16_t pad[3];
+ } unlink;
+
+ } u;
+
+ /* urb data segments */
+ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST];
+};
+typedef struct usbif_urb_request usbif_urb_request_t;
+
+struct usbif_urb_response {
+ uint16_t id; /* request id */
+ uint16_t start_frame; /* start frame (ISO) */
+ int32_t status; /* status (non-ISO) */
+#define USBIF_STATUS_OK 0
+#define USBIF_STATUS_NODEV (-19)
+#define USBIF_STATUS_INVAL (-22)
+#define USBIF_STATUS_STALL (-32)
+#define USBIF_STATUS_IOERROR (-71)
+#define USBIF_STATUS_BABBLE (-75)
+#define USBIF_STATUS_SHUTDOWN (-108)
+ int32_t actual_length; /* actual transfer length */
+ int32_t error_count; /* number of ISO errors */
+};
+typedef struct usbif_urb_response usbif_urb_response_t;
+
+DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response);
+#define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, USBIF_RING_SIZE)
+
+/*
+ * RING for notifying connect/disconnect events to frontend
+ */
+struct usbif_conn_request {
+ uint16_t id;
+};
+typedef struct usbif_conn_request usbif_conn_request_t;
+
+struct usbif_conn_response {
+ uint16_t id; /* request id */
+ uint8_t portnum; /* port number */
+ uint8_t speed; /* usb_device_speed */
+#define USBIF_SPEED_NONE 0
+#define USBIF_SPEED_LOW 1
+#define USBIF_SPEED_FULL 2
+#define USBIF_SPEED_HIGH 3
+};
+typedef struct usbif_conn_response usbif_conn_response_t;
+
+DEFINE_RING_TYPES(usbif_conn, struct usbif_conn_request, struct usbif_conn_response);
+#define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, USBIF_RING_SIZE)
+
+#endif /* __XEN_PUBLIC_IO_USBIF_H__ */
diff --git a/include/hw/xen/interface/io/xenbus.h b/include/hw/xen/interface/io/xenbus.h
new file mode 100644
index 0000000000..9cd0cd7c67
--- /dev/null
+++ b/include/hw/xen/interface/io/xenbus.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: MIT */
+/*****************************************************************************
+ * xenbus.h
+ *
+ * Xenbus protocol details.
+ *
+ * Copyright (C) 2005 XenSource Ltd.
+ */
+
+#ifndef _XEN_PUBLIC_IO_XENBUS_H
+#define _XEN_PUBLIC_IO_XENBUS_H
+
+/*
+ * The state of either end of the Xenbus, i.e. the current communication
+ * status of initialisation across the bus. States here imply nothing about
+ * the state of the connection between the driver and the kernel's device
+ * layers.
+ */
+enum xenbus_state {
+ XenbusStateUnknown = 0,
+
+ XenbusStateInitialising = 1,
+
+ /*
+ * InitWait: Finished early initialisation but waiting for information
+ * from the peer or hotplug scripts.
+ */
+ XenbusStateInitWait = 2,
+
+ /*
+ * Initialised: Waiting for a connection from the peer.
+ */
+ XenbusStateInitialised = 3,
+
+ XenbusStateConnected = 4,
+
+ /*
+ * Closing: The device is being closed due to an error or an unplug event.
+ */
+ XenbusStateClosing = 5,
+
+ XenbusStateClosed = 6,
+
+ /*
+ * Reconfiguring: The device is being reconfigured.
+ */
+ XenbusStateReconfiguring = 7,
+
+ XenbusStateReconfigured = 8
+};
+typedef enum xenbus_state XenbusState;
+
+#endif /* _XEN_PUBLIC_IO_XENBUS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/xs_wire.h b/include/hw/xen/interface/io/xs_wire.h
new file mode 100644
index 0000000000..04e6849feb
--- /dev/null
+++ b/include/hw/xen/interface/io/xs_wire.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Details of the "wire" protocol between Xen Store Daemon and client
+ * library or guest kernel.
+ *
+ * Copyright (C) 2005 Rusty Russell IBM Corporation
+ */
+
+#ifndef _XS_WIRE_H
+#define _XS_WIRE_H
+
+enum xsd_sockmsg_type
+{
+ XS_CONTROL,
+#define XS_DEBUG XS_CONTROL
+ XS_DIRECTORY,
+ XS_READ,
+ XS_GET_PERMS,
+ XS_WATCH,
+ XS_UNWATCH,
+ XS_TRANSACTION_START,
+ XS_TRANSACTION_END,
+ XS_INTRODUCE,
+ XS_RELEASE,
+ XS_GET_DOMAIN_PATH,
+ XS_WRITE,
+ XS_MKDIR,
+ XS_RM,
+ XS_SET_PERMS,
+ XS_WATCH_EVENT,
+ XS_ERROR,
+ XS_IS_DOMAIN_INTRODUCED,
+ XS_RESUME,
+ XS_SET_TARGET,
+ /* XS_RESTRICT has been removed */
+ XS_RESET_WATCHES = XS_SET_TARGET + 2,
+ XS_DIRECTORY_PART,
+
+ XS_TYPE_COUNT, /* Number of valid types. */
+
+ XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
+};
+
+#define XS_WRITE_NONE "NONE"
+#define XS_WRITE_CREATE "CREATE"
+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
+
+/* We hand errors as strings, for portability. */
+struct xsd_errors
+{
+ int errnum;
+ const char *errstring;
+};
+#ifdef EINVAL
+#define XSD_ERROR(x) { x, #x }
+/* LINTED: static unused */
+static const struct xsd_errors xsd_errors[]
+#if defined(__GNUC__)
+__attribute__((unused))
+#endif
+ = {
+ /* /!\ New errors should be added at the end of the array. */
+ XSD_ERROR(EINVAL),
+ XSD_ERROR(EACCES),
+ XSD_ERROR(EEXIST),
+ XSD_ERROR(EISDIR),
+ XSD_ERROR(ENOENT),
+ XSD_ERROR(ENOMEM),
+ XSD_ERROR(ENOSPC),
+ XSD_ERROR(EIO),
+ XSD_ERROR(ENOTEMPTY),
+ XSD_ERROR(ENOSYS),
+ XSD_ERROR(EROFS),
+ XSD_ERROR(EBUSY),
+ XSD_ERROR(EAGAIN),
+ XSD_ERROR(EISCONN),
+ XSD_ERROR(E2BIG),
+ XSD_ERROR(EPERM),
+};
+#endif
+
+struct xsd_sockmsg
+{
+ uint32_t type; /* XS_??? */
+ uint32_t req_id;/* Request identifier, echoed in daemon's response. */
+ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
+ uint32_t len; /* Length of data following this. */
+
+ /* Generally followed by nul-terminated string(s). */
+};
+
+enum xs_watch_type
+{
+ XS_WATCH_PATH = 0,
+ XS_WATCH_TOKEN
+};
+
+/*
+ * `incontents 150 xenstore_struct XenStore wire protocol.
+ *
+ * Inter-domain shared memory communications. */
+#define XENSTORE_RING_SIZE 1024
+typedef uint32_t XENSTORE_RING_IDX;
+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
+struct xenstore_domain_interface {
+ char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
+ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
+ XENSTORE_RING_IDX req_cons, req_prod;
+ XENSTORE_RING_IDX rsp_cons, rsp_prod;
+ uint32_t server_features; /* Bitmap of features supported by the server */
+ uint32_t connection;
+ uint32_t error;
+};
+
+/* Violating this is very bad. See docs/misc/xenstore.txt. */
+#define XENSTORE_PAYLOAD_MAX 4096
+
+/* Violating these just gets you an error back */
+#define XENSTORE_ABS_PATH_MAX 3072
+#define XENSTORE_REL_PATH_MAX 2048
+
+/* The ability to reconnect a ring */
+#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
+/* The presence of the "error" field in the ring page */
+#define XENSTORE_SERVER_FEATURE_ERROR 2
+
+/* Valid values for the connection field */
+#define XENSTORE_CONNECTED 0 /* the steady-state */
+#define XENSTORE_RECONNECT 1 /* reconnect in progress */
+
+/* Valid values for the error field */
+#define XENSTORE_ERROR_NONE 0 /* No error */
+#define XENSTORE_ERROR_COMM 1 /* Communication problem */
+#define XENSTORE_ERROR_RINGIDX 2 /* Invalid ring index */
+#define XENSTORE_ERROR_PROTO 3 /* Protocol violation (payload too long) */
+
+#endif /* _XS_WIRE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/memory.h b/include/hw/xen/interface/memory.h
new file mode 100644
index 0000000000..29cf5c8239
--- /dev/null
+++ b/include/hw/xen/interface/memory.h
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * memory.h
+ *
+ * Memory reservation and information.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_MEMORY_H__
+#define __XEN_PUBLIC_MEMORY_H__
+
+#include "xen.h"
+#include "physdev.h"
+
+/*
+ * Increase or decrease the specified domain's memory reservation. Returns the
+ * number of extents successfully allocated or freed.
+ * arg == addr of struct xen_memory_reservation.
+ */
+#define XENMEM_increase_reservation 0
+#define XENMEM_decrease_reservation 1
+#define XENMEM_populate_physmap 6
+
+#if __XEN_INTERFACE_VERSION__ >= 0x00030209
+/*
+ * Maximum # bits addressable by the user of the allocated region (e.g., I/O
+ * devices often have a 32-bit limitation even in 64-bit systems). If zero
+ * then the user has no addressing restriction. This field is not used by
+ * XENMEM_decrease_reservation.
+ */
+#define XENMEMF_address_bits(x) (x)
+#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
+/* NUMA node to allocate from. */
+#define XENMEMF_node(x) (((x) + 1) << 8)
+#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
+/* Flag to populate physmap with populate-on-demand entries */
+#define XENMEMF_populate_on_demand (1<<16)
+/* Flag to request allocation only from the node specified */
+#define XENMEMF_exact_node_request (1<<17)
+#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
+/* Flag to indicate the node specified is virtual node */
+#define XENMEMF_vnode (1<<18)
+#endif
+
+struct xen_memory_reservation {
+
+ /*
+ * XENMEM_increase_reservation:
+ * OUT: MFN (*not* GMFN) bases of extents that were allocated
+ * XENMEM_decrease_reservation:
+ * IN: GMFN bases of extents to free
+ * XENMEM_populate_physmap:
+ * IN: GPFN bases of extents to populate with memory
+ * OUT: GMFN bases of extents that were allocated
+ * (NB. This command also updates the mach_to_phys translation table)
+ * XENMEM_claim_pages:
+ * IN: must be zero
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
+
+ /* Number of extents, and size/alignment of each (2^extent_order pages). */
+ xen_ulong_t nr_extents;
+ unsigned int extent_order;
+
+#if __XEN_INTERFACE_VERSION__ >= 0x00030209
+ /* XENMEMF flags. */
+ unsigned int mem_flags;
+#else
+ unsigned int address_bits;
+#endif
+
+ /*
+ * Domain whose reservation is being changed.
+ * Unprivileged domains can specify only DOMID_SELF.
+ */
+ domid_t domid;
+};
+typedef struct xen_memory_reservation xen_memory_reservation_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
+
+/*
+ * An atomic exchange of memory pages. If return code is zero then
+ * @out.extent_list provides GMFNs of the newly-allocated memory.
+ * Returns zero on complete success, otherwise a negative error code.
+ * On complete success then always @nr_exchanged == @in.nr_extents.
+ * On partial success @nr_exchanged indicates how much work was done.
+ *
+ * Note that only PV guests can use this operation.
+ */
+#define XENMEM_exchange 11
+struct xen_memory_exchange {
+ /*
+ * [IN] Details of memory extents to be exchanged (GMFN bases).
+ * Note that @in.address_bits is ignored and unused.
+ */
+ struct xen_memory_reservation in;
+
+ /*
+ * [IN/OUT] Details of new memory extents.
+ * We require that:
+ * 1. @in.domid == @out.domid
+ * 2. @in.nr_extents << @in.extent_order ==
+ * @out.nr_extents << @out.extent_order
+ * 3. @in.extent_start and @out.extent_start lists must not overlap
+ * 4. @out.extent_start lists GPFN bases to be populated
+ * 5. @out.extent_start is overwritten with allocated GMFN bases
+ */
+ struct xen_memory_reservation out;
+
+ /*
+ * [OUT] Number of input extents that were successfully exchanged:
+ * 1. The first @nr_exchanged input extents were successfully
+ * deallocated.
+ * 2. The corresponding first entries in the output extent list correctly
+ * indicate the GMFNs that were successfully exchanged.
+ * 3. All other input and output extents are untouched.
+ * 4. If not all input exents are exchanged then the return code of this
+ * command will be non-zero.
+ * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
+ */
+ xen_ulong_t nr_exchanged;
+};
+typedef struct xen_memory_exchange xen_memory_exchange_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
+
+/*
+ * Returns the maximum machine frame number of mapped RAM in this system.
+ * This command always succeeds (it never returns an error code).
+ * arg == NULL.
+ */
+#define XENMEM_maximum_ram_page 2
+
+struct xen_memory_domain {
+ /* [IN] Domain information is being queried for. */
+ domid_t domid;
+};
+
+/*
+ * Returns the current or maximum memory reservation, in pages, of the
+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
+ * arg == addr of struct xen_memory_domain.
+ */
+#define XENMEM_current_reservation 3
+#define XENMEM_maximum_reservation 4
+
+/*
+ * Returns the maximum GFN in use by the specified domain (may be DOMID_SELF).
+ * Returns -ve errcode on failure.
+ * arg == addr of struct xen_memory_domain.
+ */
+#define XENMEM_maximum_gpfn 14
+
+/*
+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table do not implement
+ * this command.
+ * arg == addr of xen_machphys_mfn_list_t.
+ */
+#define XENMEM_machphys_mfn_list 5
+struct xen_machphys_mfn_list {
+ /*
+ * Size of the 'extent_start' array. Fewer entries will be filled if the
+ * machphys table is smaller than max_extents * 2MB.
+ */
+ unsigned int max_extents;
+
+ /*
+ * Pointer to buffer to fill with list of extent starts. If there are
+ * any large discontiguities in the machine address space, 2MB gaps in
+ * the machphys table will be represented by an MFN base of zero.
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
+
+ /*
+ * Number of extents written to the above array. This will be smaller
+ * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
+ */
+ unsigned int nr_extents;
+};
+typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
+
+/*
+ * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
+ *
+ * For a non compat caller, this functions similarly to
+ * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
+ * m2p table.
+ */
+#define XENMEM_machphys_compat_mfn_list 25
+
+/*
+ * Returns the location in virtual address space of the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table, or which do not
+ * map it by default into guest address space, do not implement this command.
+ * arg == addr of xen_machphys_mapping_t.
+ */
+#define XENMEM_machphys_mapping 12
+struct xen_machphys_mapping {
+ xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
+ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
+};
+typedef struct xen_machphys_mapping xen_machphys_mapping_t;
+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
+
+/* Source mapping space. */
+/* ` enum phys_map_space { */
+#define XENMAPSPACE_shared_info 0 /* shared info page */
+#define XENMAPSPACE_grant_table 1 /* grant table page */
+#define XENMAPSPACE_gmfn 2 /* GMFN */
+#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
+#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
+ * XENMEM_add_to_physmap_batch only. */
+#define XENMAPSPACE_dev_mmio 5 /* device mmio region
+ ARM only; the region is mapped in
+ Stage-2 using the Normal Memory
+ Inner/Outer Write-Back Cacheable
+ memory attribute. */
+/* ` } */
+
+/*
+ * Sets the GPFN at which a particular page appears in the specified guest's
+ * physical address space (translated guests only).
+ * arg == addr of xen_add_to_physmap_t.
+ */
+#define XENMEM_add_to_physmap 7
+struct xen_add_to_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* Number of pages to go through for gmfn_range */
+ uint16_t size;
+
+ unsigned int space; /* => enum phys_map_space */
+
+#define XENMAPIDX_grant_table_status 0x80000000
+
+ /* Index into space being mapped. */
+ xen_ulong_t idx;
+
+ /* GPFN in domid where the source mapping page should appear. */
+ xen_pfn_t gpfn;
+};
+typedef struct xen_add_to_physmap xen_add_to_physmap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
+
+/* A batched version of add_to_physmap. */
+#define XENMEM_add_to_physmap_batch 23
+struct xen_add_to_physmap_batch {
+ /* IN */
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+ uint16_t space; /* => enum phys_map_space */
+
+ /* Number of pages to go through */
+ uint16_t size;
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040700
+ domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
+#else
+ union xen_add_to_physmap_batch_extra {
+ domid_t foreign_domid; /* gmfn_foreign */
+ uint16_t res0; /* All the other spaces. Should be 0 */
+ } u;
+#endif
+
+ /* Indexes into space being mapped. */
+ XEN_GUEST_HANDLE(xen_ulong_t) idxs;
+
+ /* GPFN in domid where the source mapping page should appear. */
+ XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
+
+ /* OUT */
+
+ /* Per index error code. */
+ XEN_GUEST_HANDLE(int) errs;
+};
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
+#define xen_add_to_physmap_range xen_add_to_physmap_batch
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+#endif
+
+/*
+ * Unmaps the page appearing at a particular GPFN from the specified guest's
+ * physical address space (translated guests only).
+ * arg == addr of xen_remove_from_physmap_t.
+ */
+#define XENMEM_remove_from_physmap 15
+struct xen_remove_from_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* GPFN of the current mapping of the page. */
+ xen_pfn_t gpfn;
+};
+typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
+
+/*** REMOVED ***/
+/*#define XENMEM_translate_gpfn_list 8*/
+
+/*
+ * Returns the pseudo-physical memory map as it was when the domain
+ * was started (specified by XENMEM_set_memory_map).
+ * arg == addr of xen_memory_map_t.
+ */
+#define XENMEM_memory_map 9
+struct xen_memory_map {
+ /*
+ * On call the number of entries which can be stored in buffer. On
+ * return the number of entries which have been stored in
+ * buffer.
+ */
+ unsigned int nr_entries;
+
+ /*
+ * Entries in the buffer are in the same format as returned by the
+ * BIOS INT 0x15 EAX=0xE820 call.
+ */
+ XEN_GUEST_HANDLE(void) buffer;
+};
+typedef struct xen_memory_map xen_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
+
+/*
+ * Returns the real physical memory map. Passes the same structure as
+ * XENMEM_memory_map.
+ * Specifying buffer as NULL will return the number of entries required
+ * to store the complete memory map.
+ * arg == addr of xen_memory_map_t.
+ */
+#define XENMEM_machine_memory_map 10
+
+/*
+ * Set the pseudo-physical memory map of a domain, as returned by
+ * XENMEM_memory_map.
+ * arg == addr of xen_foreign_memory_map_t.
+ */
+#define XENMEM_set_memory_map 13
+struct xen_foreign_memory_map {
+ domid_t domid;
+ struct xen_memory_map map;
+};
+typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
+
+#define XENMEM_set_pod_target 16
+#define XENMEM_get_pod_target 17
+struct xen_pod_target {
+ /* IN */
+ uint64_t target_pages;
+ /* OUT */
+ uint64_t tot_pages;
+ uint64_t pod_cache_pages;
+ uint64_t pod_entries;
+ /* IN */
+ domid_t domid;
+};
+typedef struct xen_pod_target xen_pod_target_t;
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+
+/*
+ * Get the number of MFNs saved through memory sharing.
+ * The call never fails.
+ */
+#define XENMEM_get_sharing_freed_pages 18
+#define XENMEM_get_sharing_shared_pages 19
+
+#define XENMEM_paging_op 20
+#define XENMEM_paging_op_nominate 0
+#define XENMEM_paging_op_evict 1
+#define XENMEM_paging_op_prep 2
+
+struct xen_mem_paging_op {
+ uint8_t op; /* XENMEM_paging_op_* */
+ domid_t domain;
+
+ /* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */
+ XEN_GUEST_HANDLE_64(const_uint8) buffer;
+ /* IN: gfn of page being operated on */
+ uint64_aligned_t gfn;
+};
+typedef struct xen_mem_paging_op xen_mem_paging_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
+
+#define XENMEM_access_op 21
+#define XENMEM_access_op_set_access 0
+#define XENMEM_access_op_get_access 1
+/*
+ * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are
+ * currently unused, but since they have been in use please do not reuse them.
+ *
+ * #define XENMEM_access_op_enable_emulate 2
+ * #define XENMEM_access_op_disable_emulate 3
+ */
+#define XENMEM_access_op_set_access_multi 4
+
+typedef enum {
+ XENMEM_access_n,
+ XENMEM_access_r,
+ XENMEM_access_w,
+ XENMEM_access_rw,
+ XENMEM_access_x,
+ XENMEM_access_rx,
+ XENMEM_access_wx,
+ XENMEM_access_rwx,
+ /*
+ * Page starts off as r-x, but automatically
+ * change to r-w on a write
+ */
+ XENMEM_access_rx2rw,
+ /*
+ * Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu
+ */
+ XENMEM_access_n2rwx,
+ /* Take the domain default */
+ XENMEM_access_default
+} xenmem_access_t;
+
+struct xen_mem_access_op {
+ /* XENMEM_access_op_* */
+ uint8_t op;
+ /* xenmem_access_t */
+ uint8_t access;
+ domid_t domid;
+ /*
+ * Number of pages for set op (or size of pfn_list for
+ * XENMEM_access_op_set_access_multi)
+ * Ignored on setting default access and other ops
+ */
+ uint32_t nr;
+ /*
+ * First pfn for set op
+ * pfn for get op
+ * ~0ull is used to set and get the default access for pages
+ */
+ uint64_aligned_t pfn;
+ /*
+ * List of pfns to set access for
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(const_uint64) pfn_list;
+ /*
+ * Corresponding list of access settings for pfn_list
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(const_uint8) access_list;
+};
+typedef struct xen_mem_access_op xen_mem_access_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
+
+#define XENMEM_sharing_op 22
+#define XENMEM_sharing_op_nominate_gfn 0
+#define XENMEM_sharing_op_nominate_gref 1
+#define XENMEM_sharing_op_share 2
+#define XENMEM_sharing_op_debug_gfn 3
+#define XENMEM_sharing_op_debug_mfn 4
+#define XENMEM_sharing_op_debug_gref 5
+#define XENMEM_sharing_op_add_physmap 6
+#define XENMEM_sharing_op_audit 7
+#define XENMEM_sharing_op_range_share 8
+#define XENMEM_sharing_op_fork 9
+#define XENMEM_sharing_op_fork_reset 10
+
+#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
+#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
+
+/* The following allows sharing of grant refs. This is useful
+ * for sharing utilities sitting as "filters" in IO backends
+ * (e.g. memshr + blktap(2)). The IO backend is only exposed
+ * to grant references, and this allows sharing of the grefs */
+#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62)
+
+#define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
+ (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
+#define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
+ ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
+#define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
+ ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
+
+struct xen_mem_sharing_op {
+ uint8_t op; /* XENMEM_sharing_op_* */
+ domid_t domain;
+
+ union {
+ struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
+ union {
+ uint64_aligned_t gfn; /* IN: gfn to nominate */
+ uint32_t grant_ref; /* IN: grant ref to nominate */
+ } u;
+ uint64_aligned_t handle; /* OUT: the handle */
+ } nominate;
+ struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
+ uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
+ uint64_aligned_t source_handle; /* IN: handle to the source page */
+ uint64_aligned_t client_gfn; /* IN: the client gfn */
+ uint64_aligned_t client_handle; /* IN: handle to the client page */
+ domid_t client_domain; /* IN: the client domain id */
+ } share;
+ struct mem_sharing_op_range { /* OP_RANGE_SHARE */
+ uint64_aligned_t first_gfn; /* IN: the first gfn */
+ uint64_aligned_t last_gfn; /* IN: the last gfn */
+ uint64_aligned_t opaque; /* Must be set to 0 */
+ domid_t client_domain; /* IN: the client domain id */
+ uint16_t _pad[3]; /* Must be set to 0 */
+ } range;
+ struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
+ union {
+ uint64_aligned_t gfn; /* IN: gfn to debug */
+ uint64_aligned_t mfn; /* IN: mfn to debug */
+ uint32_t gref; /* IN: gref to debug */
+ } u;
+ } debug;
+ struct mem_sharing_op_fork { /* OP_FORK{,_RESET} */
+ domid_t parent_domain; /* IN: parent's domain id */
+/* Only makes sense for short-lived forks */
+#define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
+/* Only makes sense for short-lived forks */
+#define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
+#define XENMEM_FORK_RESET_STATE (1u << 2)
+#define XENMEM_FORK_RESET_MEMORY (1u << 3)
+ uint16_t flags; /* IN: optional settings */
+ uint32_t pad; /* Must be set to 0 */
+ } fork;
+ } u;
+};
+typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
+
+/*
+ * Attempt to stake a claim for a domain on a quantity of pages
+ * of system RAM, but _not_ assign specific pageframes. Only
+ * arithmetic is performed so the hypercall is very fast and need
+ * not be preemptible, thus sidestepping time-of-check-time-of-use
+ * races for memory allocation. Returns 0 if the hypervisor page
+ * allocator has atomically and successfully claimed the requested
+ * number of pages, else non-zero.
+ *
+ * Any domain may have only one active claim. When sufficient memory
+ * has been allocated to resolve the claim, the claim silently expires.
+ * Claiming zero pages effectively resets any outstanding claim and
+ * is always successful.
+ *
+ * Note that a valid claim may be staked even after memory has been
+ * allocated for a domain. In this case, the claim is not incremental,
+ * i.e. if the domain's total page count is 3, and a claim is staked
+ * for 10, only 7 additional pages are claimed.
+ *
+ * Caller must be privileged or the hypercall fails.
+ */
+#define XENMEM_claim_pages 24
+
+/*
+ * XENMEM_claim_pages flags - the are no flags at this time.
+ * The zero value is appropriate.
+ */
+
+/*
+ * With some legacy devices, certain guest-physical addresses cannot safely
+ * be used for other purposes, e.g. to map guest RAM. This hypercall
+ * enumerates those regions so the toolstack can avoid using them.
+ */
+#define XENMEM_reserved_device_memory_map 27
+struct xen_reserved_device_memory {
+ xen_pfn_t start_pfn;
+ xen_ulong_t nr_pages;
+};
+typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
+
+struct xen_reserved_device_memory_map {
+#define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
+ /* IN */
+ uint32_t flags;
+ /*
+ * IN/OUT
+ *
+ * Gets set to the required number of entries when too low,
+ * signaled by error code -ERANGE.
+ */
+ unsigned int nr_entries;
+ /* OUT */
+ XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
+ /* IN */
+ union {
+ physdev_pci_device_t pci;
+ } dev;
+};
+typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+/*
+ * Get the pages for a particular guest resource, so that they can be
+ * mapped directly by a tools domain.
+ */
+#define XENMEM_acquire_resource 28
+struct xen_mem_acquire_resource {
+ /* IN - The domain whose resource is to be mapped */
+ domid_t domid;
+ /* IN - the type of resource */
+ uint16_t type;
+
+#define XENMEM_resource_ioreq_server 0
+#define XENMEM_resource_grant_table 1
+#define XENMEM_resource_vmtrace_buf 2
+
+ /*
+ * IN - a type-specific resource identifier, which must be zero
+ * unless stated otherwise.
+ *
+ * type == XENMEM_resource_ioreq_server -> id == ioreq server id
+ * type == XENMEM_resource_grant_table -> id defined below
+ */
+ uint32_t id;
+
+#define XENMEM_resource_grant_table_id_shared 0
+#define XENMEM_resource_grant_table_id_status 1
+
+ /*
+ * IN/OUT
+ *
+ * As an IN parameter number of frames of the resource to be mapped.
+ * This value may be updated over the course of the operation.
+ *
+ * When frame_list is NULL and nr_frames is 0, this is interpreted as a
+ * request for the size of the resource, which shall be returned in the
+ * nr_frames field.
+ *
+ * The size of a resource will never be zero, but a nonzero result doesn't
+ * guarantee that a subsequent mapping request will be successful. There
+ * are further type/id specific constraints which may change between the
+ * two calls.
+ */
+ uint32_t nr_frames;
+ /*
+ * Padding field, must be zero on input.
+ * In a previous version this was an output field with the lowest bit
+ * named XENMEM_rsrc_acq_caller_owned. Future versions of this interface
+ * will not reuse this bit as an output with the field being zero on
+ * input.
+ */
+ uint32_t pad;
+ /*
+ * IN - the index of the initial frame to be mapped. This parameter
+ * is ignored if nr_frames is 0. This value may be updated
+ * over the course of the operation.
+ */
+ uint64_t frame;
+
+#define XENMEM_resource_ioreq_server_frame_bufioreq 0
+#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
+
+ /*
+ * IN/OUT - If the tools domain is PV then, upon return, frame_list
+ * will be populated with the MFNs of the resource.
+ * If the tools domain is HVM then it is expected that, on
+ * entry, frame_list will be populated with a list of GFNs
+ * that will be mapped to the MFNs of the resource.
+ * If -EIO is returned then the frame_list has only been
+ * partially mapped and it is up to the caller to unmap all
+ * the GFNs.
+ * This parameter may be NULL if nr_frames is 0. This
+ * value may be updated over the course of the operation.
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
+};
+typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_acquire_resource_t);
+
+/*
+ * XENMEM_get_vnumainfo used by guest to get
+ * vNUMA topology from hypervisor.
+ */
+#define XENMEM_get_vnumainfo 26
+
+/* vNUMA node memory ranges */
+struct xen_vmemrange {
+ uint64_t start, end;
+ unsigned int flags;
+ unsigned int nid;
+};
+typedef struct xen_vmemrange xen_vmemrange_t;
+DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
+
+/*
+ * vNUMA topology specifies vNUMA node number, distance table,
+ * memory ranges and vcpu mapping provided for guests.
+ * XENMEM_get_vnumainfo hypercall expects to see from guest
+ * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
+ * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
+ * copied back to guest. Domain returns expected values of nr_vnodes,
+ * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
+ */
+struct xen_vnuma_topology_info {
+ /* IN */
+ domid_t domid;
+ uint16_t pad;
+ /* IN/OUT */
+ unsigned int nr_vnodes;
+ unsigned int nr_vcpus;
+ unsigned int nr_vmemranges;
+ /* OUT */
+ union {
+ XEN_GUEST_HANDLE(uint) h;
+ uint64_t pad;
+ } vdistance;
+ union {
+ XEN_GUEST_HANDLE(uint) h;
+ uint64_t pad;
+ } vcpu_to_vnode;
+ union {
+ XEN_GUEST_HANDLE(xen_vmemrange_t) h;
+ uint64_t pad;
+ } vmemrange;
+};
+typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
+DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
+
+/* Next available subop number is 29 */
+
+#endif /* __XEN_PUBLIC_MEMORY_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/physdev.h b/include/hw/xen/interface/physdev.h
new file mode 100644
index 0000000000..f0c0d4727c
--- /dev/null
+++ b/include/hw/xen/interface/physdev.h
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2006, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_PHYSDEV_H__
+#define __XEN_PUBLIC_PHYSDEV_H__
+
+#include "xen.h"
+
+/*
+ * Prototype for this hypercall is:
+ * int physdev_op(int cmd, void *args)
+ * @cmd == PHYSDEVOP_??? (physdev operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Notify end-of-interrupt (EOI) for the specified IRQ.
+ * @arg == pointer to physdev_eoi structure.
+ */
+#define PHYSDEVOP_eoi 12
+struct physdev_eoi {
+ /* IN */
+ uint32_t irq;
+};
+typedef struct physdev_eoi physdev_eoi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
+
+/*
+ * Register a shared page for the hypervisor to indicate whether the guest
+ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
+ * once the guest used this function in that the associated event channel
+ * will automatically get unmasked. The page registered is used as a bit
+ * array indexed by Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_gmfn_v1 17
+/*
+ * Register a shared page for the hypervisor to indicate whether the
+ * guest must issue PHYSDEVOP_eoi. This hypercall is very similar to
+ * PHYSDEVOP_pirq_eoi_gmfn_v1 but it doesn't change the semantics of
+ * PHYSDEVOP_eoi. The page registered is used as a bit array indexed by
+ * Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_gmfn_v2 28
+struct physdev_pirq_eoi_gmfn {
+ /* IN */
+ xen_pfn_t gmfn;
+};
+typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t);
+
+/*
+ * Query the status of an IRQ line.
+ * @arg == pointer to physdev_irq_status_query structure.
+ */
+#define PHYSDEVOP_irq_status_query 5
+struct physdev_irq_status_query {
+ /* IN */
+ uint32_t irq;
+ /* OUT */
+ uint32_t flags; /* XENIRQSTAT_* */
+};
+typedef struct physdev_irq_status_query physdev_irq_status_query_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
+
+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
+#define _XENIRQSTAT_needs_eoi (0)
+#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
+
+/* IRQ shared by multiple guests? */
+#define _XENIRQSTAT_shared (1)
+#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
+
+/*
+ * Set the current VCPU's I/O privilege level.
+ * @arg == pointer to physdev_set_iopl structure.
+ */
+#define PHYSDEVOP_set_iopl 6
+struct physdev_set_iopl {
+ /* IN */
+ uint32_t iopl;
+};
+typedef struct physdev_set_iopl physdev_set_iopl_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
+
+/*
+ * Set the current VCPU's I/O-port permissions bitmap.
+ * @arg == pointer to physdev_set_iobitmap structure.
+ */
+#define PHYSDEVOP_set_iobitmap 7
+struct physdev_set_iobitmap {
+ /* IN */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
+ XEN_GUEST_HANDLE(uint8) bitmap;
+#else
+ uint8_t *bitmap;
+#endif
+ uint32_t nr_ports;
+};
+typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
+
+/*
+ * Read or write an IO-APIC register.
+ * @arg == pointer to physdev_apic structure.
+ */
+#define PHYSDEVOP_apic_read 8
+#define PHYSDEVOP_apic_write 9
+struct physdev_apic {
+ /* IN */
+ unsigned long apic_physbase;
+ uint32_t reg;
+ /* IN or OUT */
+ uint32_t value;
+};
+typedef struct physdev_apic physdev_apic_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
+
+/*
+ * Allocate or free a physical upcall vector for the specified IRQ line.
+ * @arg == pointer to physdev_irq structure.
+ */
+#define PHYSDEVOP_alloc_irq_vector 10
+#define PHYSDEVOP_free_irq_vector 11
+struct physdev_irq {
+ /* IN */
+ uint32_t irq;
+ /* IN or OUT */
+ uint32_t vector;
+};
+typedef struct physdev_irq physdev_irq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
+
+#define MAP_PIRQ_TYPE_MSI 0x0
+#define MAP_PIRQ_TYPE_GSI 0x1
+#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+#define MAP_PIRQ_TYPE_MSI_SEG 0x3
+#define MAP_PIRQ_TYPE_MULTI_MSI 0x4
+
+#define PHYSDEVOP_map_pirq 13
+struct physdev_map_pirq {
+ domid_t domid;
+ /* IN */
+ int type;
+ /* IN (ignored for ..._MULTI_MSI) */
+ int index;
+ /* IN or OUT */
+ int pirq;
+ /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */
+ int bus;
+ /* IN */
+ int devfn;
+ /* IN (also OUT for ..._MULTI_MSI) */
+ int entry_nr;
+ /* IN */
+ uint64_t table_base;
+};
+typedef struct physdev_map_pirq physdev_map_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
+
+#define PHYSDEVOP_unmap_pirq 14
+struct physdev_unmap_pirq {
+ domid_t domid;
+ /* IN */
+ int pirq;
+};
+
+typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
+
+#define PHYSDEVOP_manage_pci_add 15
+#define PHYSDEVOP_manage_pci_remove 16
+struct physdev_manage_pci {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+
+typedef struct physdev_manage_pci physdev_manage_pci_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
+
+#define PHYSDEVOP_restore_msi 19
+struct physdev_restore_msi {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_restore_msi physdev_restore_msi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t);
+
+#define PHYSDEVOP_manage_pci_add_ext 20
+struct physdev_manage_pci_ext {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t is_extfn;
+ uint32_t is_virtfn;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+};
+
+typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t);
+
+/*
+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
+ * hypercall since 0x00030202.
+ */
+struct physdev_op {
+ uint32_t cmd;
+ union {
+ physdev_irq_status_query_t irq_status_query;
+ physdev_set_iopl_t set_iopl;
+ physdev_set_iobitmap_t set_iobitmap;
+ physdev_apic_t apic_op;
+ physdev_irq_t irq_op;
+ } u;
+};
+typedef struct physdev_op physdev_op_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
+
+#define PHYSDEVOP_setup_gsi 21
+struct physdev_setup_gsi {
+ int gsi;
+ /* IN */
+ uint8_t triggering;
+ /* IN */
+ uint8_t polarity;
+ /* IN */
+};
+
+typedef struct physdev_setup_gsi physdev_setup_gsi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t);
+
+/* leave PHYSDEVOP 22 free */
+
+/* type is MAP_PIRQ_TYPE_GSI or MAP_PIRQ_TYPE_MSI
+ * the hypercall returns a free pirq */
+#define PHYSDEVOP_get_free_pirq 23
+struct physdev_get_free_pirq {
+ /* IN */
+ int type;
+ /* OUT */
+ uint32_t pirq;
+};
+
+typedef struct physdev_get_free_pirq physdev_get_free_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_get_free_pirq_t);
+
+#define XEN_PCI_MMCFG_RESERVED 0x1
+
+#define PHYSDEVOP_pci_mmcfg_reserved 24
+struct physdev_pci_mmcfg_reserved {
+ uint64_t address;
+ uint16_t segment;
+ uint8_t start_bus;
+ uint8_t end_bus;
+ uint32_t flags;
+};
+typedef struct physdev_pci_mmcfg_reserved physdev_pci_mmcfg_reserved_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_mmcfg_reserved_t);
+
+#define XEN_PCI_DEV_EXTFN 0x1
+#define XEN_PCI_DEV_VIRTFN 0x2
+#define XEN_PCI_DEV_PXM 0x4
+
+#define PHYSDEVOP_pci_device_add 25
+struct physdev_pci_device_add {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t flags;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+ /*
+ * Optional parameters array.
+ * First element ([0]) is PXM domain associated with the device (if
+ * XEN_PCI_DEV_PXM is set)
+ */
+ uint32_t optarr[XEN_FLEX_ARRAY_DIM];
+};
+typedef struct physdev_pci_device_add physdev_pci_device_add_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t);
+
+#define PHYSDEVOP_pci_device_remove 26
+#define PHYSDEVOP_restore_msi_ext 27
+/*
+ * Dom0 should use these two to announce MMIO resources assigned to
+ * MSI-X capable devices won't (prepare) or may (release) change.
+ */
+#define PHYSDEVOP_prepare_msix 30
+#define PHYSDEVOP_release_msix 31
+struct physdev_pci_device {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_pci_device physdev_pci_device_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
+
+#define PHYSDEVOP_DBGP_RESET_PREPARE 1
+#define PHYSDEVOP_DBGP_RESET_DONE 2
+
+#define PHYSDEVOP_DBGP_BUS_UNKNOWN 0
+#define PHYSDEVOP_DBGP_BUS_PCI 1
+
+#define PHYSDEVOP_dbgp_op 29
+struct physdev_dbgp_op {
+ /* IN */
+ uint8_t op;
+ uint8_t bus;
+ union {
+ physdev_pci_device_t pci;
+ } u;
+};
+typedef struct physdev_dbgp_op physdev_dbgp_op_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_t);
+
+/*
+ * Notify that some PIRQ-bound event channels have been unmasked.
+ * ** This command is obsolete since interface version 0x00030202 and is **
+ * ** unsupported by newer versions of Xen. **
+ */
+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
+/*
+ * These all-capitals physdev operation names are superceded by the new names
+ * (defined above) since interface version 0x00030202. The guard above was
+ * added post-4.5 only though and hence shouldn't check for 0x00030202.
+ */
+#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
+#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
+#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
+#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
+#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
+#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
+#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
+#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
+#endif
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040200
+#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1
+#else
+#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v2
+#endif
+
+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/sched.h b/include/hw/xen/interface/sched.h
new file mode 100644
index 0000000000..b4362c6a1d
--- /dev/null
+++ b/include/hw/xen/interface/sched.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * sched.h
+ *
+ * Scheduler state interactions
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_SCHED_H__
+#define __XEN_PUBLIC_SCHED_H__
+
+#include "event_channel.h"
+
+/*
+ * `incontents 150 sched Guest Scheduler Operations
+ *
+ * The SCHEDOP interface provides mechanisms for a guest to interact
+ * with the scheduler, including yield, blocking and shutting itself
+ * down.
+ */
+
+/*
+ * The prototype for this hypercall is:
+ * ` long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...)
+ *
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == Operation-specific extra argument(s), as described below.
+ * ... == Additional Operation-specific extra arguments, described below.
+ *
+ * Versions of Xen prior to 3.0.2 provided only the following legacy version
+ * of this hypercall, supporting only the commands yield, block and shutdown:
+ * long sched_op(int cmd, unsigned long arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
+ * == SHUTDOWN_* code (SCHEDOP_shutdown)
+ *
+ * This legacy version is available to new guests as:
+ * ` long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg)
+ */
+
+/* ` enum sched_op { // SCHEDOP_* => struct sched_* */
+/*
+ * Voluntarily yield the CPU.
+ * @arg == NULL.
+ */
+#define SCHEDOP_yield 0
+
+/*
+ * Block execution of this VCPU until an event is received for processing.
+ * If called with event upcalls masked, this operation will atomically
+ * reenable event delivery and check for pending events before blocking the
+ * VCPU. This avoids a "wakeup waiting" race.
+ * @arg == NULL.
+ */
+#define SCHEDOP_block 1
+
+/*
+ * Halt execution of this domain (all VCPUs) and notify the system controller.
+ * @arg == pointer to sched_shutdown_t structure.
+ *
+ * If the sched_shutdown_t reason is SHUTDOWN_suspend then
+ * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN
+ * of the guest's start info page. RDX/EDX is the third hypercall
+ * argument.
+ *
+ * In addition, which reason is SHUTDOWN_suspend this hypercall
+ * returns 1 if suspend was cancelled or the domain was merely
+ * checkpointed, and 0 if it is resuming in a new domain.
+ */
+#define SCHEDOP_shutdown 2
+
+/*
+ * Poll a set of event-channel ports. Return when one or more are pending. An
+ * optional timeout may be specified.
+ * @arg == pointer to sched_poll_t structure.
+ */
+#define SCHEDOP_poll 3
+
+/*
+ * Declare a shutdown for another domain. The main use of this function is
+ * in interpreting shutdown requests and reasons for fully-virtualized
+ * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
+ * @arg == pointer to sched_remote_shutdown_t structure.
+ */
+#define SCHEDOP_remote_shutdown 4
+
+/*
+ * Latch a shutdown code, so that when the domain later shuts down it
+ * reports this code to the control tools.
+ * @arg == sched_shutdown_t, as for SCHEDOP_shutdown.
+ */
+#define SCHEDOP_shutdown_code 5
+
+/*
+ * Setup, poke and destroy a domain watchdog timer.
+ * @arg == pointer to sched_watchdog_t structure.
+ * With id == 0, setup a domain watchdog timer to cause domain shutdown
+ * after timeout, returns watchdog id.
+ * With id != 0 and timeout == 0, destroy domain watchdog timer.
+ * With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
+ */
+#define SCHEDOP_watchdog 6
+
+/*
+ * Override the current vcpu affinity by pinning it to one physical cpu or
+ * undo this override restoring the previous affinity.
+ * @arg == pointer to sched_pin_override_t structure.
+ *
+ * A negative pcpu value will undo a previous pin override and restore the
+ * previous cpu affinity.
+ * This call is allowed for the hardware domain only and requires the cpu
+ * to be part of the domain's cpupool.
+ */
+#define SCHEDOP_pin_override 7
+/* ` } */
+
+struct sched_shutdown {
+ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+};
+typedef struct sched_shutdown sched_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
+
+struct sched_poll {
+ XEN_GUEST_HANDLE(evtchn_port_t) ports;
+ unsigned int nr_ports;
+ uint64_t timeout;
+};
+typedef struct sched_poll sched_poll_t;
+DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
+
+struct sched_remote_shutdown {
+ domid_t domain_id; /* Remote domain ID */
+ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+};
+typedef struct sched_remote_shutdown sched_remote_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
+
+struct sched_watchdog {
+ uint32_t id; /* watchdog ID */
+ uint32_t timeout; /* timeout */
+};
+typedef struct sched_watchdog sched_watchdog_t;
+DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
+
+struct sched_pin_override {
+ int32_t pcpu;
+};
+typedef struct sched_pin_override sched_pin_override_t;
+DEFINE_XEN_GUEST_HANDLE(sched_pin_override_t);
+
+/*
+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
+ * software to determine the appropriate action. For the most part, Xen does
+ * not care about the shutdown code.
+ */
+/* ` enum sched_shutdown_reason { */
+#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
+#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
+#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
+#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
+#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
+
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
+#define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */
+/* ` } */
+
+#endif /* __XEN_PUBLIC_SCHED_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/trace.h b/include/hw/xen/interface/trace.h
new file mode 100644
index 0000000000..62a179971d
--- /dev/null
+++ b/include/hw/xen/interface/trace.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * include/public/trace.h
+ *
+ * Mark Williamson, (C) 2004 Intel Research Cambridge
+ * Copyright (C) 2005 Bin Ren
+ */
+
+#ifndef __XEN_PUBLIC_TRACE_H__
+#define __XEN_PUBLIC_TRACE_H__
+
+#define TRACE_EXTRA_MAX 7
+#define TRACE_EXTRA_SHIFT 28
+
+/* Trace classes */
+#define TRC_CLS_SHIFT 16
+#define TRC_GEN 0x0001f000 /* General trace */
+#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
+#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
+#define TRC_HVM 0x0008f000 /* Xen HVM trace */
+#define TRC_MEM 0x0010f000 /* Xen memory trace */
+#define TRC_PV 0x0020f000 /* Xen PV traces */
+#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */
+#define TRC_HW 0x0080f000 /* Xen hardware-related traces */
+#define TRC_GUEST 0x0800f000 /* Guest-generated traces */
+#define TRC_ALL 0x0ffff000
+#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
+#define TRC_HD_CYCLE_FLAG (1UL<<31)
+#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) )
+#define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX)
+
+/* Trace subclasses */
+#define TRC_SUBCLS_SHIFT 12
+
+/* trace subclasses for SVM */
+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
+#define TRC_HVM_EMUL 0x00084000 /* emulated devices */
+
+#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
+#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */
+#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
+
+/*
+ * The highest 3 bits of the last 12 bits of TRC_SCHED_CLASS above are
+ * reserved for encoding what scheduler produced the information. The
+ * actual event is encoded in the last 9 bits.
+ *
+ * This means we have 8 scheduling IDs available (which means at most 8
+ * schedulers generating events) and, in each scheduler, up to 512
+ * different events.
+ */
+#define TRC_SCHED_ID_BITS 3
+#define TRC_SCHED_ID_SHIFT (TRC_SUBCLS_SHIFT - TRC_SCHED_ID_BITS)
+#define TRC_SCHED_ID_MASK (((1UL<<TRC_SCHED_ID_BITS) - 1) << TRC_SCHED_ID_SHIFT)
+#define TRC_SCHED_EVT_MASK (~(TRC_SCHED_ID_MASK))
+
+/* Per-scheduler IDs, to identify scheduler specific events */
+#define TRC_SCHED_CSCHED 0
+#define TRC_SCHED_CSCHED2 1
+/* #define XEN_SCHEDULER_SEDF 2 (Removed) */
+#define TRC_SCHED_ARINC653 3
+#define TRC_SCHED_RTDS 4
+#define TRC_SCHED_SNULL 5
+
+/* Per-scheduler tracing */
+#define TRC_SCHED_CLASS_EVT(_c, _e) \
+ ( ( TRC_SCHED_CLASS | \
+ ((TRC_SCHED_##_c << TRC_SCHED_ID_SHIFT) & TRC_SCHED_ID_MASK) ) + \
+ (_e & TRC_SCHED_EVT_MASK) )
+
+/* Trace classes for DOM0 operations */
+#define TRC_DOM0_DOMOPS 0x00041000 /* Domains manipulations */
+
+/* Trace classes for Hardware */
+#define TRC_HW_PM 0x00801000 /* Power management traces */
+#define TRC_HW_IRQ 0x00802000 /* Traces relating to the handling of IRQs */
+
+/* Trace events per class */
+#define TRC_LOST_RECORDS (TRC_GEN + 1)
+#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2)
+#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3)
+
+#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
+#define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2)
+#define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1)
+#define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2)
+#define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3)
+#define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4)
+#define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5)
+#define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6)
+#define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7)
+#define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8)
+#define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9)
+#define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10)
+#define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11)
+#define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12)
+#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13)
+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14)
+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
+#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED_VERBOSE + 16)
+#define TRC_SCHED_SWITCH_INFCONT (TRC_SCHED_VERBOSE + 17)
+
+#define TRC_DOM0_DOM_ADD (TRC_DOM0_DOMOPS + 1)
+#define TRC_DOM0_DOM_REM (TRC_DOM0_DOMOPS + 2)
+
+#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
+#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
+#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
+#define TRC_MEM_SET_P2M_ENTRY (TRC_MEM + 4)
+#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5)
+#define TRC_MEM_POD_POPULATE (TRC_MEM + 16)
+#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17)
+#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
+
+#define TRC_PV_ENTRY 0x00201000 /* Hypervisor entry points for PV guests. */
+#define TRC_PV_SUBCALL 0x00202000 /* Sub-call in a multicall hypercall */
+
+#define TRC_PV_HYPERCALL (TRC_PV_ENTRY + 1)
+#define TRC_PV_TRAP (TRC_PV_ENTRY + 3)
+#define TRC_PV_PAGE_FAULT (TRC_PV_ENTRY + 4)
+#define TRC_PV_FORCED_INVALID_OP (TRC_PV_ENTRY + 5)
+#define TRC_PV_EMULATE_PRIVOP (TRC_PV_ENTRY + 6)
+#define TRC_PV_EMULATE_4GB (TRC_PV_ENTRY + 7)
+#define TRC_PV_MATH_STATE_RESTORE (TRC_PV_ENTRY + 8)
+#define TRC_PV_PAGING_FIXUP (TRC_PV_ENTRY + 9)
+#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV_ENTRY + 10)
+#define TRC_PV_PTWR_EMULATION (TRC_PV_ENTRY + 11)
+#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV_ENTRY + 12)
+#define TRC_PV_HYPERCALL_V2 (TRC_PV_ENTRY + 13)
+#define TRC_PV_HYPERCALL_SUBCALL (TRC_PV_SUBCALL + 14)
+
+/*
+ * TRC_PV_HYPERCALL_V2 format
+ *
+ * Only some of the hypercall argument are recorded. Bit fields A0 to
+ * A5 in the first extra word are set if the argument is present and
+ * the arguments themselves are packed sequentially in the following
+ * words.
+ *
+ * The TRC_64_FLAG bit is not set for these events (even if there are
+ * 64-bit arguments in the record).
+ *
+ * Word
+ * 0 bit 31 30|29 28|27 26|25 24|23 22|21 20|19 ... 0
+ * A5 |A4 |A3 |A2 |A1 |A0 |Hypercall op
+ * 1 First 32 bit (or low word of first 64 bit) arg in record
+ * 2 Second 32 bit (or high word of first 64 bit) arg in record
+ * ...
+ *
+ * A0-A5 bitfield values:
+ *
+ * 00b Argument not present
+ * 01b 32-bit argument present
+ * 10b 64-bit argument present
+ * 11b Reserved
+ */
+#define TRC_PV_HYPERCALL_V2_ARG_32(i) (0x1 << (20 + 2*(i)))
+#define TRC_PV_HYPERCALL_V2_ARG_64(i) (0x2 << (20 + 2*(i)))
+#define TRC_PV_HYPERCALL_V2_ARG_MASK (0xfff00000)
+
+#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1)
+#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2)
+#define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3)
+#define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4)
+#define TRC_SHADOW_MMIO (TRC_SHADOW + 5)
+#define TRC_SHADOW_FIXUP (TRC_SHADOW + 6)
+#define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7)
+#define TRC_SHADOW_EMULATE (TRC_SHADOW + 8)
+#define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9)
+#define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10)
+#define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11)
+#define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12)
+#define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13)
+#define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14)
+#define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15)
+
+/* trace events per subclass */
+#define TRC_HVM_NESTEDFLAG (0x400)
+#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
+#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
+#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
+#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
+#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
+#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
+#define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
+#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
+#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
+#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
+#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
+#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
+#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
+#define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
+#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
+#define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
+#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
+#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
+#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
+#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
+#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
+#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
+#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
+#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
+#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
+#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
+#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
+#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
+#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
+#define TRC_HVM_IOPORT_READ (TRC_HVM_HANDLER + 0x16)
+#define TRC_HVM_IOMEM_READ (TRC_HVM_HANDLER + 0x17)
+#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
+#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
+#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
+#define TRC_HVM_RDTSC (TRC_HVM_HANDLER + 0x1a)
+#define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_NPF (TRC_HVM_HANDLER + 0x21)
+#define TRC_HVM_REALMODE_EMULATE (TRC_HVM_HANDLER + 0x22)
+#define TRC_HVM_TRAP (TRC_HVM_HANDLER + 0x23)
+#define TRC_HVM_TRAP_DEBUG (TRC_HVM_HANDLER + 0x24)
+#define TRC_HVM_VLAPIC (TRC_HVM_HANDLER + 0x25)
+#define TRC_HVM_XCR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x26)
+#define TRC_HVM_XCR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x27)
+
+#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
+#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
+
+/* Trace events for emulated devices */
+#define TRC_HVM_EMUL_HPET_START_TIMER (TRC_HVM_EMUL + 0x1)
+#define TRC_HVM_EMUL_PIT_START_TIMER (TRC_HVM_EMUL + 0x2)
+#define TRC_HVM_EMUL_RTC_START_TIMER (TRC_HVM_EMUL + 0x3)
+#define TRC_HVM_EMUL_LAPIC_START_TIMER (TRC_HVM_EMUL + 0x4)
+#define TRC_HVM_EMUL_HPET_STOP_TIMER (TRC_HVM_EMUL + 0x5)
+#define TRC_HVM_EMUL_PIT_STOP_TIMER (TRC_HVM_EMUL + 0x6)
+#define TRC_HVM_EMUL_RTC_STOP_TIMER (TRC_HVM_EMUL + 0x7)
+#define TRC_HVM_EMUL_LAPIC_STOP_TIMER (TRC_HVM_EMUL + 0x8)
+#define TRC_HVM_EMUL_PIT_TIMER_CB (TRC_HVM_EMUL + 0x9)
+#define TRC_HVM_EMUL_LAPIC_TIMER_CB (TRC_HVM_EMUL + 0xA)
+#define TRC_HVM_EMUL_PIC_INT_OUTPUT (TRC_HVM_EMUL + 0xB)
+#define TRC_HVM_EMUL_PIC_KICK (TRC_HVM_EMUL + 0xC)
+#define TRC_HVM_EMUL_PIC_INTACK (TRC_HVM_EMUL + 0xD)
+#define TRC_HVM_EMUL_PIC_POSEDGE (TRC_HVM_EMUL + 0xE)
+#define TRC_HVM_EMUL_PIC_NEGEDGE (TRC_HVM_EMUL + 0xF)
+#define TRC_HVM_EMUL_PIC_PEND_IRQ_CALL (TRC_HVM_EMUL + 0x10)
+#define TRC_HVM_EMUL_LAPIC_PIC_INTR (TRC_HVM_EMUL + 0x11)
+
+/* trace events for per class */
+#define TRC_PM_FREQ_CHANGE (TRC_HW_PM + 0x01)
+#define TRC_PM_IDLE_ENTRY (TRC_HW_PM + 0x02)
+#define TRC_PM_IDLE_EXIT (TRC_HW_PM + 0x03)
+
+/* Trace events for IRQs */
+#define TRC_HW_IRQ_MOVE_CLEANUP_DELAY (TRC_HW_IRQ + 0x1)
+#define TRC_HW_IRQ_MOVE_CLEANUP (TRC_HW_IRQ + 0x2)
+#define TRC_HW_IRQ_BIND_VECTOR (TRC_HW_IRQ + 0x3)
+#define TRC_HW_IRQ_CLEAR_VECTOR (TRC_HW_IRQ + 0x4)
+#define TRC_HW_IRQ_MOVE_FINISH (TRC_HW_IRQ + 0x5)
+#define TRC_HW_IRQ_ASSIGN_VECTOR (TRC_HW_IRQ + 0x6)
+#define TRC_HW_IRQ_UNMAPPED_VECTOR (TRC_HW_IRQ + 0x7)
+#define TRC_HW_IRQ_HANDLED (TRC_HW_IRQ + 0x8)
+
+/*
+ * Event Flags
+ *
+ * Some events (e.g, TRC_PV_TRAP and TRC_HVM_IOMEM_READ) have multiple
+ * record formats. These event flags distinguish between the
+ * different formats.
+ */
+#define TRC_64_FLAG 0x100 /* Addresses are 64 bits (instead of 32 bits) */
+
+/* This structure represents a single trace buffer record. */
+struct t_rec {
+ uint32_t event:28;
+ uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */
+ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */
+ union {
+ struct {
+ uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */
+ uint32_t extra_u32[7]; /* event data items */
+ } cycles;
+ struct {
+ uint32_t extra_u32[7]; /* event data items */
+ } nocycles;
+ } u;
+};
+
+/*
+ * This structure contains the metadata for a single trace buffer. The head
+ * field, indexes into an array of struct t_rec's.
+ */
+struct t_buf {
+ /* Assume the data buffer size is X. X is generally not a power of 2.
+ * CONS and PROD are incremented modulo (2*X):
+ * 0 <= cons < 2*X
+ * 0 <= prod < 2*X
+ * This is done because addition modulo X breaks at 2^32 when X is not a
+ * power of 2:
+ * (((2^32 - 1) % X) + 1) % X != (2^32) % X
+ */
+ uint32_t cons; /* Offset of next item to be consumed by control tools. */
+ uint32_t prod; /* Offset of next item to be produced by Xen. */
+ /* Records follow immediately after the meta-data header. */
+};
+
+/* Structure used to pass MFNs to the trace buffers back to trace consumers.
+ * Offset is an offset into the mapped structure where the mfn list will be held.
+ * MFNs will be at ((unsigned long *)(t_info))+(t_info->cpu_offset[cpu]).
+ */
+struct t_info {
+ uint16_t tbuf_size; /* Size in pages of each trace buffer */
+ uint16_t mfn_offset[]; /* Offset within t_info structure of the page list per cpu */
+ /* MFN lists immediately after the header */
+};
+
+#endif /* __XEN_PUBLIC_TRACE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/vcpu.h b/include/hw/xen/interface/vcpu.h
new file mode 100644
index 0000000000..81a3b3a743
--- /dev/null
+++ b/include/hw/xen/interface/vcpu.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * vcpu.h
+ *
+ * VCPU initialisation, query, and hotplug.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VCPU_H__
+#define __XEN_PUBLIC_VCPU_H__
+
+#include "xen.h"
+
+/*
+ * Prototype for this hypercall is:
+ * long vcpu_op(int cmd, unsigned int vcpuid, void *extra_args)
+ * @cmd == VCPUOP_??? (VCPU operation).
+ * @vcpuid == VCPU to operate on.
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Initialise a VCPU. Each VCPU can be initialised only once. A
+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
+ *
+ * @extra_arg == For PV or ARM guests this is a pointer to a vcpu_guest_context
+ * structure containing the initial state for the VCPU. For x86
+ * HVM based guests this is a pointer to a vcpu_hvm_context
+ * structure.
+ */
+#define VCPUOP_initialise 0
+
+/*
+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
+ * if the VCPU has not been initialised (VCPUOP_initialise).
+ */
+#define VCPUOP_up 1
+
+/*
+ * Bring down a VCPU (i.e., make it non-runnable).
+ * There are a few caveats that callers should observe:
+ * 1. This operation may return, and VCPU_is_up may return false, before the
+ * VCPU stops running (i.e., the command is asynchronous). It is a good
+ * idea to ensure that the VCPU has entered a non-critical loop before
+ * bringing it down. Alternatively, this operation is guaranteed
+ * synchronous if invoked by the VCPU itself.
+ * 2. After a VCPU is initialised, there is currently no way to drop all its
+ * references to domain memory. Even a VCPU that is down still holds
+ * memory references via its pagetable base pointer and GDT. It is good
+ * practise to move a VCPU onto an 'idle' or default page table, LDT and
+ * GDT before bringing it down.
+ */
+#define VCPUOP_down 2
+
+/* Returns 1 if the given VCPU is up. */
+#define VCPUOP_is_up 3
+
+/*
+ * Return information about the state and running time of a VCPU.
+ * @extra_arg == pointer to vcpu_runstate_info structure.
+ */
+#define VCPUOP_get_runstate_info 4
+struct vcpu_runstate_info {
+ /* VCPU's current state (RUNSTATE_*). */
+ int state;
+ /* When was current state entered (system time, ns)? */
+ uint64_t state_entry_time;
+ /*
+ * Update indicator set in state_entry_time:
+ * When activated via VMASST_TYPE_runstate_update_flag, set during
+ * updates in guest memory mapped copy of vcpu_runstate_info.
+ */
+#define XEN_RUNSTATE_UPDATE (xen_mk_ullong(1) << 63)
+ /*
+ * Time spent in each RUNSTATE_* (ns). The sum of these times is
+ * guaranteed not to drift from system time.
+ */
+ uint64_t time[4];
+};
+typedef struct vcpu_runstate_info vcpu_runstate_info_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
+
+/* VCPU is currently running on a physical CPU. */
+#define RUNSTATE_running 0
+
+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
+#define RUNSTATE_runnable 1
+
+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
+#define RUNSTATE_blocked 2
+
+/*
+ * VCPU is not runnable, but it is not blocked.
+ * This is a 'catch all' state for things like hotplug and pauses by the
+ * system administrator (or for critical sections in the hypervisor).
+ * RUNSTATE_blocked dominates this state (it is the preferred state).
+ */
+#define RUNSTATE_offline 3
+
+/*
+ * Register a shared memory area from which the guest may obtain its own
+ * runstate information without needing to execute a hypercall.
+ * Notes:
+ * 1. The registered address may be virtual or physical or guest handle,
+ * depending on the platform. Virtual address or guest handle should be
+ * registered on x86 systems.
+ * 2. Only one shared area may be registered per VCPU. The shared area is
+ * updated by the hypervisor each time the VCPU is scheduled. Thus
+ * runstate.state will always be RUNSTATE_running and
+ * runstate.state_entry_time will indicate the system time at which the
+ * VCPU was last scheduled to run.
+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
+ */
+#define VCPUOP_register_runstate_memory_area 5
+struct vcpu_register_runstate_memory_area {
+ union {
+ XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
+ struct vcpu_runstate_info *v;
+ uint64_t p;
+ } addr;
+};
+typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
+
+/*
+ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
+ * which can be set via these commands. Periods smaller than one millisecond
+ * may not be supported.
+ */
+#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
+#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
+struct vcpu_set_periodic_timer {
+ uint64_t period_ns;
+};
+typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
+
+/*
+ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
+ * timer which can be set via these commands.
+ */
+#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
+#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
+struct vcpu_set_singleshot_timer {
+ uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
+ uint32_t flags; /* VCPU_SSHOTTMR_??? */
+};
+typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
+
+/* Flags to VCPUOP_set_singleshot_timer. */
+ /* Require the timeout to be in the future (return -ETIME if it's passed). */
+#define _VCPU_SSHOTTMR_future (0)
+#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future)
+
+/*
+ * Register a memory location in the guest address space for the
+ * vcpu_info structure. This allows the guest to place the vcpu_info
+ * structure in a convenient place, such as in a per-cpu data area.
+ * The pointer need not be page aligned, but the structure must not
+ * cross a page boundary.
+ *
+ * This may be called only once per vcpu.
+ */
+#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
+struct vcpu_register_vcpu_info {
+ uint64_t mfn; /* mfn of page to place vcpu_info */
+ uint32_t offset; /* offset within page */
+ uint32_t rsvd; /* unused */
+};
+typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
+
+/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
+#define VCPUOP_send_nmi 11
+
+/*
+ * Get the physical ID information for a pinned vcpu's underlying physical
+ * processor. The physical ID informmation is architecture-specific.
+ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id.
+ * This command returns -EINVAL if it is not a valid operation for this VCPU.
+ */
+#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
+struct vcpu_get_physid {
+ uint64_t phys_id;
+};
+typedef struct vcpu_get_physid vcpu_get_physid_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
+#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid))
+#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32))
+
+/*
+ * Register a memory location to get a secondary copy of the vcpu time
+ * parameters. The master copy still exists as part of the vcpu shared
+ * memory area, and this secondary copy is updated whenever the master copy
+ * is updated (and using the same versioning scheme for synchronisation).
+ *
+ * The intent is that this copy may be mapped (RO) into userspace so
+ * that usermode can compute system time using the time info and the
+ * tsc. Usermode will see an array of vcpu_time_info structures, one
+ * for each vcpu, and choose the right one by an existing mechanism
+ * which allows it to get the current vcpu number (such as via a
+ * segment limit). It can then apply the normal algorithm to compute
+ * system time from the tsc.
+ *
+ * @extra_arg == pointer to vcpu_register_time_info_memory_area structure.
+ */
+#define VCPUOP_register_vcpu_time_memory_area 13
+DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t);
+struct vcpu_register_time_memory_area {
+ union {
+ XEN_GUEST_HANDLE(vcpu_time_info_t) h;
+ struct vcpu_time_info *v;
+ uint64_t p;
+ } addr;
+};
+typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t);
+
+#endif /* __XEN_PUBLIC_VCPU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/version.h b/include/hw/xen/interface/version.h
new file mode 100644
index 0000000000..9c78b4f3b6
--- /dev/null
+++ b/include/hw/xen/interface/version.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * version.h
+ *
+ * Xen version, type, and compile information.
+ *
+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VERSION_H__
+#define __XEN_PUBLIC_VERSION_H__
+
+#include "xen.h"
+
+/* NB. All ops return zero on success, except XENVER_{version,pagesize}
+ * XENVER_{version,pagesize,build_id} */
+
+/* arg == NULL; returns major:minor (16:16). */
+#define XENVER_version 0
+
+/* arg == xen_extraversion_t. */
+#define XENVER_extraversion 1
+typedef char xen_extraversion_t[16];
+#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
+
+/* arg == xen_compile_info_t. */
+#define XENVER_compile_info 2
+struct xen_compile_info {
+ char compiler[64];
+ char compile_by[16];
+ char compile_domain[32];
+ char compile_date[32];
+};
+typedef struct xen_compile_info xen_compile_info_t;
+
+#define XENVER_capabilities 3
+typedef char xen_capabilities_info_t[1024];
+#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
+
+#define XENVER_changeset 4
+typedef char xen_changeset_info_t[64];
+#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
+
+#define XENVER_platform_parameters 5
+struct xen_platform_parameters {
+ xen_ulong_t virt_start;
+};
+typedef struct xen_platform_parameters xen_platform_parameters_t;
+
+#define XENVER_get_features 6
+struct xen_feature_info {
+ unsigned int submap_idx; /* IN: which 32-bit submap to return */
+ uint32_t submap; /* OUT: 32-bit submap */
+};
+typedef struct xen_feature_info xen_feature_info_t;
+
+/* Declares the features reported by XENVER_get_features. */
+#include "features.h"
+
+/* arg == NULL; returns host memory page size. */
+#define XENVER_pagesize 7
+
+/* arg == xen_domain_handle_t.
+ *
+ * The toolstack fills it out for guest consumption. It is intended to hold
+ * the UUID of the guest.
+ */
+#define XENVER_guest_handle 8
+
+#define XENVER_commandline 9
+typedef char xen_commandline_t[1024];
+
+/*
+ * Return value is the number of bytes written, or XEN_Exx on error.
+ * Calling with empty parameter returns the size of build_id.
+ */
+#define XENVER_build_id 10
+struct xen_build_id {
+ uint32_t len; /* IN: size of buf[]. */
+ unsigned char buf[XEN_FLEX_ARRAY_DIM];
+ /* OUT: Variable length buffer with build_id. */
+};
+typedef struct xen_build_id xen_build_id_t;
+
+#endif /* __XEN_PUBLIC_VERSION_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/xen-compat.h b/include/hw/xen/interface/xen-compat.h
new file mode 100644
index 0000000000..97fe698498
--- /dev/null
+++ b/include/hw/xen/interface/xen-compat.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * xen-compat.h
+ *
+ * Guest OS interface to Xen. Compatibility layer.
+ *
+ * Copyright (c) 2006, Christian Limpach
+ */
+
+#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
+#define __XEN_PUBLIC_XEN_COMPAT_H__
+
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040e00
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Xen is built with matching headers and implements the latest interface. */
+#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
+#elif !defined(__XEN_INTERFACE_VERSION__)
+/* Guests which do not specify a version get the legacy interface. */
+#define __XEN_INTERFACE_VERSION__ 0x00000000
+#endif
+
+#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
+#error "These header files do not support the requested interface version."
+#endif
+
+#define COMPAT_FLEX_ARRAY_DIM XEN_FLEX_ARRAY_DIM
+
+#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
diff --git a/include/hw/xen/interface/xen.h b/include/hw/xen/interface/xen.h
new file mode 100644
index 0000000000..920567e006
--- /dev/null
+++ b/include/hw/xen/interface/xen.h
@@ -0,0 +1,1032 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * xen.h
+ *
+ * Guest OS interface to Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_XEN_H__
+#define __XEN_PUBLIC_XEN_H__
+
+#include "xen-compat.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+#include "arch-x86/xen.h"
+#elif defined(__arm__) || defined (__aarch64__)
+#include "arch-arm.h"
+#else
+#error "Unsupported architecture"
+#endif
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+DEFINE_XEN_GUEST_HANDLE(char);
+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
+DEFINE_XEN_GUEST_HANDLE(int);
+__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
+DEFINE_XEN_GUEST_HANDLE(long);
+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
+#endif
+DEFINE_XEN_GUEST_HANDLE(void);
+
+DEFINE_XEN_GUEST_HANDLE(uint64_t);
+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
+DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
+
+/* Define a variable length array (depends on compiler). */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+#define XEN_FLEX_ARRAY_DIM
+#elif defined(__GNUC__)
+#define XEN_FLEX_ARRAY_DIM 0
+#else
+#define XEN_FLEX_ARRAY_DIM 1 /* variable size */
+#endif
+
+/* Turn a plain number into a C unsigned (long (long)) constant. */
+#define __xen_mk_uint(x) x ## U
+#define __xen_mk_ulong(x) x ## UL
+#ifndef __xen_mk_ullong
+# define __xen_mk_ullong(x) x ## ULL
+#endif
+#define xen_mk_uint(x) __xen_mk_uint(x)
+#define xen_mk_ulong(x) __xen_mk_ulong(x)
+#define xen_mk_ullong(x) __xen_mk_ullong(x)
+
+#else
+
+/* In assembly code we cannot use C numeric constant suffixes. */
+#define xen_mk_uint(x) x
+#define xen_mk_ulong(x) x
+#define xen_mk_ullong(x) x
+
+#endif
+
+/*
+ * HYPERCALLS
+ */
+
+/* `incontents 100 hcalls List of hypercalls
+ * ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*()
+ */
+
+#define __HYPERVISOR_set_trap_table 0
+#define __HYPERVISOR_mmu_update 1
+#define __HYPERVISOR_set_gdt 2
+#define __HYPERVISOR_stack_switch 3
+#define __HYPERVISOR_set_callbacks 4
+#define __HYPERVISOR_fpu_taskswitch 5
+#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
+#define __HYPERVISOR_platform_op 7
+#define __HYPERVISOR_set_debugreg 8
+#define __HYPERVISOR_get_debugreg 9
+#define __HYPERVISOR_update_descriptor 10
+#define __HYPERVISOR_memory_op 12
+#define __HYPERVISOR_multicall 13
+#define __HYPERVISOR_update_va_mapping 14
+#define __HYPERVISOR_set_timer_op 15
+#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
+#define __HYPERVISOR_xen_version 17
+#define __HYPERVISOR_console_io 18
+#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
+#define __HYPERVISOR_grant_table_op 20
+#define __HYPERVISOR_vm_assist 21
+#define __HYPERVISOR_update_va_mapping_otherdomain 22
+#define __HYPERVISOR_iret 23 /* x86 only */
+#define __HYPERVISOR_vcpu_op 24
+#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
+#define __HYPERVISOR_mmuext_op 26
+#define __HYPERVISOR_xsm_op 27
+#define __HYPERVISOR_nmi_op 28
+#define __HYPERVISOR_sched_op 29
+#define __HYPERVISOR_callback_op 30
+#define __HYPERVISOR_xenoprof_op 31
+#define __HYPERVISOR_event_channel_op 32
+#define __HYPERVISOR_physdev_op 33
+#define __HYPERVISOR_hvm_op 34
+#define __HYPERVISOR_sysctl 35
+#define __HYPERVISOR_domctl 36
+#define __HYPERVISOR_kexec_op 37
+#define __HYPERVISOR_tmem_op 38
+#define __HYPERVISOR_argo_op 39
+#define __HYPERVISOR_xenpmu_op 40
+#define __HYPERVISOR_dm_op 41
+#define __HYPERVISOR_hypfs_op 42
+
+/* Architecture-specific hypercall definitions. */
+#define __HYPERVISOR_arch_0 48
+#define __HYPERVISOR_arch_1 49
+#define __HYPERVISOR_arch_2 50
+#define __HYPERVISOR_arch_3 51
+#define __HYPERVISOR_arch_4 52
+#define __HYPERVISOR_arch_5 53
+#define __HYPERVISOR_arch_6 54
+#define __HYPERVISOR_arch_7 55
+
+/* ` } */
+
+/*
+ * HYPERCALL COMPATIBILITY.
+ */
+
+/* New sched_op hypercall introduced in 0x00030101. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030101
+#undef __HYPERVISOR_sched_op
+#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
+#endif
+
+/* New event-channel and physdev hypercalls introduced in 0x00030202. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030202
+#undef __HYPERVISOR_event_channel_op
+#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
+#undef __HYPERVISOR_physdev_op
+#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
+#endif
+
+/* New platform_op hypercall introduced in 0x00030204. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030204
+#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
+#endif
+
+/*
+ * VIRTUAL INTERRUPTS
+ *
+ * Virtual interrupts that a guest OS may receive from Xen.
+ *
+ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
+ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
+ * The latter can be allocated only once per guest: they must initially be
+ * allocated to VCPU0 but can subsequently be re-bound.
+ */
+/* ` enum virq { */
+#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
+#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
+#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
+#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
+#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
+#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
+#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
+#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
+#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
+#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occurred */
+#define VIRQ_ARGO 11 /* G. Argo interdomain message notification */
+#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
+#define VIRQ_XENPMU 13 /* V. PMC interrupt */
+
+/* Architecture-specific VIRQ definitions. */
+#define VIRQ_ARCH_0 16
+#define VIRQ_ARCH_1 17
+#define VIRQ_ARCH_2 18
+#define VIRQ_ARCH_3 19
+#define VIRQ_ARCH_4 20
+#define VIRQ_ARCH_5 21
+#define VIRQ_ARCH_6 22
+#define VIRQ_ARCH_7 23
+/* ` } */
+
+#define NR_VIRQS 24
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_mmu_update(const struct mmu_update reqs[],
+ * ` unsigned count, unsigned *done_out,
+ * ` unsigned foreigndom)
+ * `
+ * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
+ * @count is the length of the above array.
+ * @pdone is an output parameter indicating number of completed operations
+ * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this
+ * hypercall invocation. Can be DOMID_SELF.
+ * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced
+ * in this hypercall invocation. The value of this field
+ * (x) encodes the PFD as follows:
+ * x == 0 => PFD == DOMID_SELF
+ * x != 0 => PFD == x - 1
+ *
+ * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
+ * -------------
+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
+ * Updates an entry in a page table belonging to PFD. If updating an L1 table,
+ * and the new table entry is valid/present, the mapped frame must belong to
+ * FD. If attempting to map an I/O page then the caller assumes the privilege
+ * of the FD.
+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
+ * ptr[:2] -- Machine address of the page-table entry to modify.
+ * val -- Value to write.
+ *
+ * There also certain implicit requirements when using this hypercall. The
+ * pages that make up a pagetable must be mapped read-only in the guest.
+ * This prevents uncontrolled guest updates to the pagetable. Xen strictly
+ * enforces this, and will disallow any pagetable update which will end up
+ * mapping pagetable page RW, and will disallow using any writable page as a
+ * pagetable. In practice it means that when constructing a page table for a
+ * process, thread, etc, we MUST be very dilligient in following these rules:
+ * 1). Start with top-level page (PGD or in Xen language: L4). Fill out
+ * the entries.
+ * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD
+ * or L2).
+ * 3). Start filling out the PTE table (L1) with the PTE entries. Once
+ * done, make sure to set each of those entries to RO (so writeable bit
+ * is unset). Once that has been completed, set the PMD (L2) for this
+ * PTE table as RO.
+ * 4). When completed with all of the PMD (L2) entries, and all of them have
+ * been set to RO, make sure to set RO the PUD (L3). Do the same
+ * operation on PGD (L4) pagetable entries that have a PUD (L3) entry.
+ * 5). Now before you can use those pages (so setting the cr3), you MUST also
+ * pin them so that the hypervisor can verify the entries. This is done
+ * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame
+ * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op(
+ * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be
+ * issued.
+ * For 32-bit guests, the L4 is not used (as there is less pagetables), so
+ * instead use L3.
+ * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE
+ * hypercall. Also if so desired the OS can also try to write to the PTE
+ * and be trapped by the hypervisor (as the PTE entry is RO).
+ *
+ * To deallocate the pages, the operations are the reverse of the steps
+ * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
+ * pagetable MUST not be in use (meaning that the cr3 is not set to it).
+ *
+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
+ * Updates an entry in the machine->pseudo-physical mapping table.
+ * ptr[:2] -- Machine address within the frame whose mapping to modify.
+ * The frame must belong to the FD, if one is specified.
+ * val -- Value to write into the mapping entry.
+ *
+ * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
+ * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
+ * with those in @val.
+ *
+ * ptr[1:0] == MMU_PT_UPDATE_NO_TRANSLATE:
+ * As MMU_NORMAL_PT_UPDATE above, but @val is not translated though FD
+ * page tables.
+ *
+ * @val is usually the machine frame number along with some attributes.
+ * The attributes by default follow the architecture defined bits. Meaning that
+ * if this is a X86_64 machine and four page table layout is used, the layout
+ * of val is:
+ * - 63 if set means No execute (NX)
+ * - 46-13 the machine frame number
+ * - 12 available for guest
+ * - 11 available for guest
+ * - 10 available for guest
+ * - 9 available for guest
+ * - 8 global
+ * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages)
+ * - 6 dirty
+ * - 5 accessed
+ * - 4 page cached disabled
+ * - 3 page write through
+ * - 2 userspace accessible
+ * - 1 writeable
+ * - 0 present
+ *
+ * The one bits that does not fit with the default layout is the PAGE_PSE
+ * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the
+ * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB
+ * (or 2MB) instead of using the PAGE_PSE bit.
+ *
+ * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen
+ * using it as the Page Attribute Table (PAT) bit - for details on it please
+ * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
+ * pages instead of using MTRRs.
+ *
+ * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
+ * PAT4 PAT0
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
+ * +-----+-----+----+----+----+-----+----+----+
+ * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
+ * +-----+-----+----+----+----+-----+----+----+
+ *
+ * The lookup of this index table translates to looking up
+ * Bit 7, Bit 4, and Bit 3 of val entry:
+ *
+ * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3).
+ *
+ * If all bits are off, then we are using PAT0. If bit 3 turned on,
+ * then we are using PAT1, if bit 3 and bit 4, then PAT2..
+ *
+ * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means
+ * that if a guest that follows Linux's PAT setup and would like to set Write
+ * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is
+ * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the
+ * caching as:
+ *
+ * WB = none (so PAT0)
+ * WC = PWT (bit 3 on)
+ * UC = PWT | PCD (bit 3 and 4 are on).
+ *
+ * To make it work with Xen, it needs to translate the WC bit as so:
+ *
+ * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3
+ *
+ * And to translate back it would:
+ *
+ * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
+ */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+#define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */
+ /* val never translated. */
+
+/*
+ * MMU EXTENDED OPERATIONS
+ *
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_mmuext_op(mmuext_op_t uops[],
+ * ` unsigned int count,
+ * ` unsigned int *pdone,
+ * ` unsigned int foreigndom)
+ */
+/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
+ * Where the FD has some effect, it is described below.
+ *
+ * cmd: MMUEXT_(UN)PIN_*_TABLE
+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
+ * The frame must belong to the FD, if one is specified.
+ *
+ * cmd: MMUEXT_NEW_BASEPTR
+ * mfn: Machine frame number of new page-table base to install in MMU.
+ *
+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
+ * mfn: Machine frame number of new page-table base to install in MMU
+ * when in user space.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
+ * No additional arguments. Flushes local TLB.
+ *
+ * cmd: MMUEXT_INVLPG_LOCAL
+ * linear_addr: Linear address to be flushed from the local TLB.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_MULTI
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_INVLPG_MULTI
+ * linear_addr: Linear address to be flushed.
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_ALL
+ * No additional arguments. Flushes all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_INVLPG_ALL
+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE
+ * No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
+ *
+ * cmd: MMUEXT_SET_LDT
+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
+ * nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
+ *
+ * cmd: MMUEXT_[UN]MARK_SUPER
+ * mfn: Machine frame number of head of superpage to be [un]marked.
+ */
+/* ` enum mmuext_cmd { */
+#define MMUEXT_PIN_L1_TABLE 0
+#define MMUEXT_PIN_L2_TABLE 1
+#define MMUEXT_PIN_L3_TABLE 2
+#define MMUEXT_PIN_L4_TABLE 3
+#define MMUEXT_UNPIN_TABLE 4
+#define MMUEXT_NEW_BASEPTR 5
+#define MMUEXT_TLB_FLUSH_LOCAL 6
+#define MMUEXT_INVLPG_LOCAL 7
+#define MMUEXT_TLB_FLUSH_MULTI 8
+#define MMUEXT_INVLPG_MULTI 9
+#define MMUEXT_TLB_FLUSH_ALL 10
+#define MMUEXT_INVLPG_ALL 11
+#define MMUEXT_FLUSH_CACHE 12
+#define MMUEXT_SET_LDT 13
+#define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE 16
+#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
+#define MMUEXT_MARK_SUPER 19
+#define MMUEXT_UNMARK_SUPER 20
+/* ` } */
+
+#ifndef __ASSEMBLY__
+struct mmuext_op {
+ unsigned int cmd; /* => enum mmuext_cmd */
+ union {
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+ * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
+ xen_pfn_t mfn;
+ /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
+ unsigned long linear_addr;
+ } arg1;
+ union {
+ /* SET_LDT */
+ unsigned int nr_ents;
+ /* TLB_FLUSH_MULTI, INVLPG_MULTI */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
+ XEN_GUEST_HANDLE(const_void) vcpumask;
+#else
+ const void *vcpumask;
+#endif
+ /* COPY_PAGE */
+ xen_pfn_t src_mfn;
+ } arg2;
+};
+typedef struct mmuext_op mmuext_op_t;
+DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
+#endif
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val,
+ * ` enum uvm_flags flags)
+ * `
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val,
+ * ` enum uvm_flags flags,
+ * ` domid_t domid)
+ * `
+ * ` @va: The virtual address whose mapping we want to change
+ * ` @val: The new page table entry, must contain a machine address
+ * ` @flags: Control TLB flushes
+ */
+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
+/* ` enum uvm_flags { */
+#define UVMF_NONE (xen_mk_ulong(0)<<0) /* No flushing at all. */
+#define UVMF_TLB_FLUSH (xen_mk_ulong(1)<<0) /* Flush entire TLB(s). */
+#define UVMF_INVLPG (xen_mk_ulong(2)<<0) /* Flush only one entry. */
+#define UVMF_FLUSHTYPE_MASK (xen_mk_ulong(3)<<0)
+#define UVMF_MULTI (xen_mk_ulong(0)<<2) /* Flush subset of TLBs. */
+#define UVMF_LOCAL (xen_mk_ulong(0)<<2) /* Flush local TLB. */
+#define UVMF_ALL (xen_mk_ulong(1)<<2) /* Flush all TLBs. */
+/* ` } */
+
+/*
+ * ` int
+ * ` HYPERVISOR_console_io(unsigned int cmd,
+ * ` unsigned int count,
+ * ` char buffer[]);
+ *
+ * @cmd: Command (see below)
+ * @count: Size of the buffer to read/write
+ * @buffer: Pointer in the guest memory
+ *
+ * List of commands:
+ *
+ * * CONSOLEIO_write: Write the buffer to Xen console.
+ * For the hardware domain, all the characters in the buffer will
+ * be written. Characters will be printed directly to the console.
+ * For all the other domains, only the printable characters will be
+ * written. Characters may be buffered until a newline (i.e '\n') is
+ * found.
+ * @return 0 on success, otherwise return an error code.
+ * * CONSOLEIO_read: Attempts to read up to @count characters from Xen
+ * console. The maximum buffer size (i.e. @count) supported is 2GB.
+ * @return the number of characters read on success, otherwise return
+ * an error code.
+ */
+#define CONSOLEIO_write 0
+#define CONSOLEIO_read 1
+
+/*
+ * Commands to HYPERVISOR_vm_assist().
+ */
+#define VMASST_CMD_enable 0
+#define VMASST_CMD_disable 1
+
+/* x86/32 guests: simulate full 4GB segment limits. */
+#define VMASST_TYPE_4gb_segments 0
+
+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
+#define VMASST_TYPE_4gb_segments_notify 1
+
+/*
+ * x86 guests: support writes to bottom-level PTEs.
+ * NB1. Page-directory entries cannot be written.
+ * NB2. Guest must continue to remove all writable mappings of PTEs.
+ */
+#define VMASST_TYPE_writable_pagetables 2
+
+/* x86/PAE guests: support PDPTs above 4GB. */
+#define VMASST_TYPE_pae_extended_cr3 3
+
+/*
+ * x86 guests: Sane behaviour for virtual iopl
+ * - virtual iopl updated from do_iret() hypercalls.
+ * - virtual iopl reported in bounce frames.
+ * - guest kernels assumed to be level 0 for the purpose of iopl checks.
+ */
+#define VMASST_TYPE_architectural_iopl 4
+
+/*
+ * All guests: activate update indicator in vcpu_runstate_info
+ * Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped
+ * vcpu_runstate_info during updates of the runstate information.
+ */
+#define VMASST_TYPE_runstate_update_flag 5
+
+/*
+ * x86/64 guests: strictly hide M2P from user mode.
+ * This allows the guest to control respective hypervisor behavior:
+ * - when not set, L4 tables get created with the respective slot blank,
+ * and whenever the L4 table gets used as a kernel one the missing
+ * mapping gets inserted,
+ * - when set, L4 tables get created with the respective slot initialized
+ * as before, and whenever the L4 table gets used as a user one the
+ * mapping gets zapped.
+ */
+#define VMASST_TYPE_m2p_strict 32
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
+#define MAX_VMASST_TYPE 3
+#endif
+
+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
+#define DOMID_FIRST_RESERVED xen_mk_uint(0x7FF0)
+
+/* DOMID_SELF is used in certain contexts to refer to oneself. */
+#define DOMID_SELF xen_mk_uint(0x7FF0)
+
+/*
+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
+ * is useful to ensure that no mappings to the OS's own heap are accidentally
+ * installed. (e.g., in Linux this could cause havoc as reference counts
+ * aren't adjusted on the I/O-mapping code path).
+ * This only makes sense as HYPERVISOR_mmu_update()'s and
+ * HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument. For
+ * HYPERVISOR_mmu_update() context it can be specified by any calling domain,
+ * otherwise it's only permitted if the caller is privileged.
+ */
+#define DOMID_IO xen_mk_uint(0x7FF1)
+
+/*
+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
+ * Xen's heap space (e.g., the machine_to_phys table).
+ * This only makes sense as
+ * - HYPERVISOR_mmu_update()'s, HYPERVISOR_mmuext_op()'s, or
+ * HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument,
+ * - with XENMAPSPACE_gmfn_foreign,
+ * and is only permitted if the caller is privileged.
+ */
+#define DOMID_XEN xen_mk_uint(0x7FF2)
+
+/*
+ * DOMID_COW is used as the owner of sharable pages */
+#define DOMID_COW xen_mk_uint(0x7FF3)
+
+/* DOMID_INVALID is used to identify pages with unknown owner. */
+#define DOMID_INVALID xen_mk_uint(0x7FF4)
+
+/* Idle domain. */
+#define DOMID_IDLE xen_mk_uint(0x7FFF)
+
+/* Mask for valid domain id values */
+#define DOMID_MASK xen_mk_uint(0x7FFF)
+
+#ifndef __ASSEMBLY__
+
+typedef uint16_t domid_t;
+
+/*
+ * Send an array of these to HYPERVISOR_mmu_update().
+ * NB. The fields are natural pointer/address size for this architecture.
+ */
+struct mmu_update {
+ uint64_t ptr; /* Machine address of PTE. */
+ uint64_t val; /* New contents of PTE. */
+};
+typedef struct mmu_update mmu_update_t;
+DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_multicall(multicall_entry_t call_list[],
+ * ` uint32_t nr_calls);
+ *
+ * NB. The fields are logically the natural register size for this
+ * architecture. In cases where xen_ulong_t is larger than this then
+ * any unused bits in the upper portion must be zero.
+ */
+struct multicall_entry {
+ xen_ulong_t op, result;
+ xen_ulong_t args[6];
+};
+typedef struct multicall_entry multicall_entry_t;
+DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+/*
+ * Event channel endpoints per domain (when using the 2-level ABI):
+ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
+ */
+#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS
+#endif
+
+struct vcpu_time_info {
+ /*
+ * Updates to the following values are preceded and followed by an
+ * increment of 'version'. The guest can therefore detect updates by
+ * looking for changes to 'version'. If the least-significant bit of
+ * the version number is set then an update is in progress and the guest
+ * must wait to read a consistent set of values.
+ * The correct way to interact with the version number is similar to
+ * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
+ */
+ uint32_t version;
+ uint32_t pad0;
+ uint64_t tsc_timestamp; /* TSC at last update of time vals. */
+ uint64_t system_time; /* Time, in nanosecs, since boot. */
+ /*
+ * Current system time:
+ * system_time +
+ * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
+ * CPU frequency (Hz):
+ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
+ */
+ uint32_t tsc_to_system_mul;
+ int8_t tsc_shift;
+#if __XEN_INTERFACE_VERSION__ > 0x040600
+ uint8_t flags;
+ uint8_t pad1[2];
+#else
+ int8_t pad1[3];
+#endif
+}; /* 32 bytes */
+typedef struct vcpu_time_info vcpu_time_info_t;
+
+#define XEN_PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define XEN_PVCLOCK_GUEST_STOPPED (1 << 1)
+
+struct vcpu_info {
+ /*
+ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
+ * a pending notification for a particular VCPU. It is then cleared
+ * by the guest OS /before/ checking for pending work, thus avoiding
+ * a set-and-check race. Note that the mask is only accessed by Xen
+ * on the CPU that is currently hosting the VCPU. This means that the
+ * pending and mask flags can be updated by the guest without special
+ * synchronisation (i.e., no need for the x86 LOCK prefix).
+ * This may seem suboptimal because if the pending flag is set by
+ * a different CPU then an IPI may be scheduled even when the mask
+ * is set. However, note:
+ * 1. The task of 'interrupt holdoff' is covered by the per-event-
+ * channel mask bits. A 'noisy' event that is continually being
+ * triggered can be masked at source at this very precise
+ * granularity.
+ * 2. The main purpose of the per-VCPU mask is therefore to restrict
+ * reentrant execution: whether for concurrency control, or to
+ * prevent unbounded stack usage. Whatever the purpose, we expect
+ * that the mask will be asserted only for short periods at a time,
+ * and so the likelihood of a 'spurious' IPI is suitably small.
+ * The mask is read before making an event upcall to the guest: a
+ * non-zero mask therefore guarantees that the VCPU will not receive
+ * an upcall activation. The mask is cleared when the VCPU requests
+ * to block: this avoids wakeup-waiting races.
+ */
+ uint8_t evtchn_upcall_pending;
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+ uint8_t evtchn_upcall_mask;
+#else /* XEN_HAVE_PV_UPCALL_MASK */
+ uint8_t pad0;
+#endif /* XEN_HAVE_PV_UPCALL_MASK */
+ xen_ulong_t evtchn_pending_sel;
+ struct arch_vcpu_info arch;
+ vcpu_time_info_t time;
+}; /* 64 bytes (x86) */
+#ifndef __XEN__
+typedef struct vcpu_info vcpu_info_t;
+#endif
+
+/*
+ * `incontents 200 startofday_shared Start-of-day shared data structure
+ * Xen/kernel shared data -- pointer provided in start_info.
+ *
+ * This structure is defined to be both smaller than a page, and the
+ * only data on the shared page, but may vary in actual size even within
+ * compatible Xen versions; guests should not rely on the size
+ * of this structure remaining constant.
+ */
+struct shared_info {
+ struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
+
+ /*
+ * A domain can create "event channels" on which it can send and receive
+ * asynchronous event notifications. There are three classes of event that
+ * are delivered by this mechanism:
+ * 1. Bi-directional inter- and intra-domain connections. Domains must
+ * arrange out-of-band to set up a connection (usually by allocating
+ * an unbound 'listener' port and avertising that via a storage service
+ * such as xenstore).
+ * 2. Physical interrupts. A domain with suitable hardware-access
+ * privileges can bind an event-channel port to a physical interrupt
+ * source.
+ * 3. Virtual interrupts ('events'). A domain can bind an event-channel
+ * port to a virtual interrupt source, such as the virtual-timer
+ * device or the emergency console.
+ *
+ * Event channels are addressed by a "port index". Each channel is
+ * associated with two bits of information:
+ * 1. PENDING -- notifies the domain that there is a pending notification
+ * to be processed. This bit is cleared by the guest.
+ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
+ * will cause an asynchronous upcall to be scheduled. This bit is only
+ * updated by the guest. It is read-only within Xen. If a channel
+ * becomes pending while the channel is masked then the 'edge' is lost
+ * (i.e., when the channel is unmasked, the guest must manually handle
+ * pending notifications as no upcall will be scheduled by Xen).
+ *
+ * To expedite scanning of pending notifications, any 0->1 pending
+ * transition on an unmasked channel causes a corresponding bit in a
+ * per-vcpu selector word to be set. Each bit in the selector covers a
+ * 'C long' in the PENDING bitfield array.
+ */
+ xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
+ xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
+
+ /*
+ * Wallclock time: updated by control software or RTC emulation.
+ * Guests should base their gettimeofday() syscall on this
+ * wallclock-base value.
+ * The values of wc_sec and wc_nsec are offsets from the Unix epoch
+ * adjusted by the domain's 'time offset' (in seconds) as set either
+ * by XEN_DOMCTL_settimeoffset, or adjusted via a guest write to the
+ * emulated RTC.
+ */
+ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
+ uint32_t wc_sec;
+ uint32_t wc_nsec;
+#if !defined(__i386__)
+ uint32_t wc_sec_hi;
+# define xen_wc_sec_hi wc_sec_hi
+#elif !defined(__XEN__) && !defined(__XEN_TOOLS__)
+# define xen_wc_sec_hi arch.wc_sec_hi
+#endif
+
+ struct arch_shared_info arch;
+
+};
+#ifndef __XEN__
+typedef struct shared_info shared_info_t;
+#endif
+
+/*
+ * `incontents 200 startofday Start-of-day memory layout
+ *
+ * 1. The domain is started within contiguous virtual-memory region.
+ * 2. The contiguous region ends on an aligned 4MB boundary.
+ * 3. This the order of bootstrap elements in the initial virtual region:
+ * a. relocated kernel image
+ * b. initial ram disk [mod_start, mod_len]
+ * (may be omitted)
+ * c. list of allocated page frames [mfn_list, nr_pages]
+ * (unless relocated due to XEN_ELFNOTE_INIT_P2M)
+ * d. start_info_t structure [register rSI (x86)]
+ * in case of dom0 this page contains the console info, too
+ * e. unless dom0: xenstore ring page
+ * f. unless dom0: console ring page
+ * g. bootstrap page tables [pt_base and CR3 (x86)]
+ * h. bootstrap stack [register ESP (x86)]
+ * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
+ * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * layout for the domain. In particular, the bootstrap virtual-memory
+ * region is a 1:1 mapping to the first section of the pseudo-physical map.
+ * 6. All bootstrap elements are mapped read-writable for the guest OS. The
+ * only exception is the bootstrap page table, which is mapped read-only.
+ * 7. There is guaranteed to be at least 512kB padding after the final
+ * bootstrap element. If necessary, the bootstrap virtual region is
+ * extended by an extra 4MB to ensure this.
+ *
+ * Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page
+ * table layout") a bug caused the pt_base (3.g above) and cr3 to not point
+ * to the start of the guest page tables (it was offset by two pages).
+ * This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU
+ * or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got
+ * allocated in the order: 'first L1','first L2', 'first L3', so the offset
+ * to the page table base is by two pages back. The initial domain if it is
+ * 32-bit and runs under a 64-bit hypervisor should _NOT_ use two of the
+ * pages preceding pt_base and mark them as reserved/unused.
+ */
+#ifdef XEN_HAVE_PV_GUEST_ENTRY
+struct start_info {
+ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
+ char magic[32]; /* "xen-<version>-<platform>". */
+ unsigned long nr_pages; /* Total pages allocated to this domain. */
+ unsigned long shared_info; /* MACHINE address of shared info struct. */
+ uint32_t flags; /* SIF_xxx flags. */
+ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
+ uint32_t store_evtchn; /* Event channel for store communication. */
+ union {
+ struct {
+ xen_pfn_t mfn; /* MACHINE page number of console page. */
+ uint32_t evtchn; /* Event channel for console page. */
+ } domU;
+ struct {
+ uint32_t info_off; /* Offset of console_info struct. */
+ uint32_t info_size; /* Size of console_info struct from start.*/
+ } dom0;
+ } console;
+ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
+ unsigned long pt_base; /* VIRTUAL address of page directory. */
+ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
+ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module */
+ /* (PFN of pre-loaded module if */
+ /* SIF_MOD_START_PFN set in flags). */
+ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
+#define MAX_GUEST_CMDLINE 1024
+ int8_t cmd_line[MAX_GUEST_CMDLINE];
+ /* The pfn range here covers both page table and p->m table frames. */
+ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
+ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */
+};
+typedef struct start_info start_info_t;
+
+/* New console union for dom0 introduced in 0x00030203. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030203
+#define console_mfn console.domU.mfn
+#define console_evtchn console.domU.evtchn
+#endif
+#endif /* XEN_HAVE_PV_GUEST_ENTRY */
+
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
+ /* P->M making the 3 level tree obsolete? */
+#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
+
+/*
+ * A multiboot module is a package containing modules very similar to a
+ * multiboot module array. The only differences are:
+ * - the array of module descriptors is by convention simply at the beginning
+ * of the multiboot module,
+ * - addresses in the module descriptors are based on the beginning of the
+ * multiboot module,
+ * - the number of modules is determined by a termination descriptor that has
+ * mod_start == 0.
+ *
+ * This permits to both build it statically and reference it in a configuration
+ * file, and let the PV guest easily rebase the addresses to virtual addresses
+ * and at the same time count the number of modules.
+ */
+struct xen_multiboot_mod_list
+{
+ /* Address of first byte of the module */
+ uint32_t mod_start;
+ /* Address of last byte of the module (inclusive) */
+ uint32_t mod_end;
+ /* Address of zero-terminated command line */
+ uint32_t cmdline;
+ /* Unused, must be zero */
+ uint32_t pad;
+};
+/*
+ * `incontents 200 startofday_dom0_console Dom0_console
+ *
+ * The console structure in start_info.console.dom0
+ *
+ * This structure includes a variety of information required to
+ * have a working VGA/VESA console.
+ */
+typedef struct dom0_vga_console_info {
+ uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
+#define XEN_VGATYPE_TEXT_MODE_3 0x03
+#define XEN_VGATYPE_VESA_LFB 0x23
+#define XEN_VGATYPE_EFI_LFB 0x70
+
+ union {
+ struct {
+ /* Font height, in pixels. */
+ uint16_t font_height;
+ /* Cursor location (column, row). */
+ uint16_t cursor_x, cursor_y;
+ /* Number of rows and columns (dimensions in characters). */
+ uint16_t rows, columns;
+ } text_mode_3;
+
+ struct {
+ /* Width and height, in pixels. */
+ uint16_t width, height;
+ /* Bytes per scan line. */
+ uint16_t bytes_per_line;
+ /* Bits per pixel. */
+ uint16_t bits_per_pixel;
+ /* LFB physical address, and size (in units of 64kB). */
+ uint32_t lfb_base;
+ uint32_t lfb_size;
+ /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
+ uint8_t red_pos, red_size;
+ uint8_t green_pos, green_size;
+ uint8_t blue_pos, blue_size;
+ uint8_t rsvd_pos, rsvd_size;
+#if __XEN_INTERFACE_VERSION__ >= 0x00030206
+ /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
+ uint32_t gbl_caps;
+ /* Mode attributes (offset 0x0, VESA command 0x4f01). */
+ uint16_t mode_attrs;
+ uint16_t pad;
+#endif
+#if __XEN_INTERFACE_VERSION__ >= 0x00040d00
+ /* high 32 bits of lfb_base */
+ uint32_t ext_lfb_base;
+#endif
+ } vesa_lfb;
+ } u;
+} dom0_vga_console_info_t;
+#define xen_vga_console_info dom0_vga_console_info
+#define xen_vga_console_info_t dom0_vga_console_info_t
+
+typedef uint8_t xen_domain_handle_t[16];
+
+__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
+__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
+__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
+__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
+
+typedef struct {
+ uint8_t a[16];
+} xen_uuid_t;
+
+/*
+ * XEN_DEFINE_UUID(0x00112233, 0x4455, 0x6677, 0x8899,
+ * 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff)
+ * will construct UUID 00112233-4455-6677-8899-aabbccddeeff presented as
+ * {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ * 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff};
+ *
+ * NB: This is compatible with Linux kernel and with libuuid, but it is not
+ * compatible with Microsoft, as they use mixed-endian encoding (some
+ * components are little-endian, some are big-endian).
+ */
+#define XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ {{((a) >> 24) & 0xFF, ((a) >> 16) & 0xFF, \
+ ((a) >> 8) & 0xFF, ((a) >> 0) & 0xFF, \
+ ((b) >> 8) & 0xFF, ((b) >> 0) & 0xFF, \
+ ((c) >> 8) & 0xFF, ((c) >> 0) & 0xFF, \
+ ((d) >> 8) & 0xFF, ((d) >> 0) & 0xFF, \
+ e1, e2, e3, e4, e5, e6}}
+
+#if defined(__STDC_VERSION__) ? __STDC_VERSION__ >= 199901L : defined(__GNUC__)
+#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ ((xen_uuid_t)XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6))
+#else
+#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6)
+#endif /* __STDC_VERSION__ / __GNUC__ */
+
+#endif /* !__ASSEMBLY__ */
+
+/* Default definitions for macros used by domctl/sysctl. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#ifndef int64_aligned_t
+#define int64_aligned_t int64_t
+#endif
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+#ifndef XEN_GUEST_HANDLE_64
+#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
+#endif
+
+#ifndef __ASSEMBLY__
+struct xenctl_bitmap {
+ XEN_GUEST_HANDLE_64(uint8) bitmap;
+ uint32_t nr_bits;
+};
+typedef struct xenctl_bitmap xenctl_bitmap_t;
+#endif
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+#endif /* __XEN_PUBLIC_XEN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/start_info.h b/include/hw/xen/start_info.h
new file mode 100644
index 0000000000..6ed4877794
--- /dev/null
+++ b/include/hw/xen/start_info.h
@@ -0,0 +1,146 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2016, Citrix Systems, Inc.
+ */
+
+#ifndef XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H
+#define XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H
+
+/*
+ * Start of day structure passed to PVH guests and to HVM guests in %ebx.
+ *
+ * NOTE: nothing will be loaded at physical address 0, so a 0 value in any
+ * of the address fields should be treated as not present.
+ *
+ * 0 +----------------+
+ * | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE
+ * | | ("xEn3" with the 0x80 bit of the "E" set).
+ * 4 +----------------+
+ * | version | Version of this structure. Current version is 1. New
+ * | | versions are guaranteed to be backwards-compatible.
+ * 8 +----------------+
+ * | flags | SIF_xxx flags.
+ * 12 +----------------+
+ * | nr_modules | Number of modules passed to the kernel.
+ * 16 +----------------+
+ * | modlist_paddr | Physical address of an array of modules
+ * | | (layout of the structure below).
+ * 24 +----------------+
+ * | cmdline_paddr | Physical address of the command line,
+ * | | a zero-terminated ASCII string.
+ * 32 +----------------+
+ * | rsdp_paddr | Physical address of the RSDP ACPI data structure.
+ * 40 +----------------+
+ * | memmap_paddr | Physical address of the (optional) memory map. Only
+ * | | present in version 1 and newer of the structure.
+ * 48 +----------------+
+ * | memmap_entries | Number of entries in the memory map table. Only
+ * | | present in version 1 and newer of the structure.
+ * | | Zero if there is no memory map being provided.
+ * 52 +----------------+
+ * | reserved | Version 1 and newer only.
+ * 56 +----------------+
+ *
+ * The layout of each entry in the module structure is the following:
+ *
+ * 0 +----------------+
+ * | paddr | Physical address of the module.
+ * 8 +----------------+
+ * | size | Size of the module in bytes.
+ * 16 +----------------+
+ * | cmdline_paddr | Physical address of the command line,
+ * | | a zero-terminated ASCII string.
+ * 24 +----------------+
+ * | reserved |
+ * 32 +----------------+
+ *
+ * The layout of each entry in the memory map table is as follows:
+ *
+ * 0 +----------------+
+ * | addr | Base address
+ * 8 +----------------+
+ * | size | Size of mapping in bytes
+ * 16 +----------------+
+ * | type | Type of mapping as defined between the hypervisor
+ * | | and guest it's starting. E820_TYPE_xxx, for example.
+ * 20 +----------------|
+ * | reserved |
+ * 24 +----------------+
+ *
+ * The address and sizes are always a 64bit little endian unsigned integer.
+ *
+ * NB: Xen on x86 will always try to place all the data below the 4GiB
+ * boundary.
+ *
+ * Version numbers of the hvm_start_info structure have evolved like this:
+ *
+ * Version 0:
+ *
+ * Version 1: Added the memmap_paddr/memmap_entries fields (plus 4 bytes of
+ * padding) to the end of the hvm_start_info struct. These new
+ * fields can be used to pass a memory map to the guest. The
+ * memory map is optional and so guests that understand version 1
+ * of the structure must check that memmap_entries is non-zero
+ * before trying to read the memory map.
+ */
+#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
+
+/*
+ * C representation of the x86/HVM start info layout.
+ *
+ * The canonical definition of this layout is above, this is just a way to
+ * represent the layout described there using C types.
+ */
+struct hvm_start_info {
+ uint32_t magic; /* Contains the magic value 0x336ec578 */
+ /* ("xEn3" with the 0x80 bit of the "E" set).*/
+ uint32_t version; /* Version of this structure. */
+ uint32_t flags; /* SIF_xxx flags. */
+ uint32_t nr_modules; /* Number of modules passed to the kernel. */
+ uint64_t modlist_paddr; /* Physical address of an array of */
+ /* hvm_modlist_entry. */
+ uint64_t cmdline_paddr; /* Physical address of the command line. */
+ uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */
+ /* structure. */
+ uint64_t memmap_paddr; /* Physical address of an array of */
+ /* hvm_memmap_table_entry. Only present in */
+ /* version 1 and newer of the structure */
+ uint32_t memmap_entries; /* Number of entries in the memmap table. */
+ /* Only present in version 1 and newer of */
+ /* the structure. Value will be zero if */
+ /* there is no memory map being provided. */
+ uint32_t reserved;
+};
+
+struct hvm_modlist_entry {
+ uint64_t paddr; /* Physical address of the module. */
+ uint64_t size; /* Size of the module in bytes. */
+ uint64_t cmdline_paddr; /* Physical address of the command line. */
+ uint64_t reserved;
+};
+
+struct hvm_memmap_table_entry {
+ uint64_t addr; /* Base address of the memory region */
+ uint64_t size; /* Size of the memory region in bytes */
+ uint32_t type; /* Mapping type */
+ uint32_t reserved;
+};
+
+#endif /* XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H */
diff --git a/include/hw/xen/xen-backend.h b/include/hw/xen/xen-backend.h
index 010d712638..0f01631ae7 100644
--- a/include/hw/xen/xen-backend.h
+++ b/include/hw/xen/xen-backend.h
@@ -31,7 +31,9 @@ void xen_backend_set_device(XenBackendInstance *backend,
XenDevice *xen_backend_get_device(XenBackendInstance *backend);
void xen_backend_register(const XenBackendInfo *info);
+const char **xen_backend_get_types(unsigned int *nr);
+bool xen_backend_exists(const char *type, const char *name);
void xen_backend_device_create(XenBus *xenbus, const char *type,
const char *name, QDict *opts, Error **errp);
bool xen_backend_try_device_destroy(XenDevice *xendev, Error **errp);
diff --git a/include/hw/xen/xen-block.h b/include/hw/xen/xen-block.h
index 11d351b4b3..d692ea7580 100644
--- a/include/hw/xen/xen-block.h
+++ b/include/hw/xen/xen-block.h
@@ -12,6 +12,7 @@
#include "hw/block/block.h"
#include "hw/block/dataplane/xen-block.h"
#include "sysemu/iothread.h"
+#include "qom/object.h"
typedef enum XenBlockVdevType {
XEN_BLOCK_VDEV_TYPE_INVALID,
@@ -46,7 +47,7 @@ typedef struct XenBlockIOThread {
char *id;
} XenBlockIOThread;
-typedef struct XenBlockDevice {
+struct XenBlockDevice {
XenDevice xendev;
XenBlockProperties props;
const char *device_type;
@@ -54,41 +55,35 @@ typedef struct XenBlockDevice {
XenBlockDataPlane *dataplane;
XenBlockDrive *drive;
XenBlockIOThread *iothread;
-} XenBlockDevice;
+};
+typedef struct XenBlockDevice XenBlockDevice;
typedef void (*XenBlockDeviceRealize)(XenBlockDevice *blockdev, Error **errp);
-typedef void (*XenBlockDeviceUnrealize)(XenBlockDevice *blockdev, Error **errp);
+typedef void (*XenBlockDeviceUnrealize)(XenBlockDevice *blockdev);
-typedef struct XenBlockDeviceClass {
+struct XenBlockDeviceClass {
/*< private >*/
XenDeviceClass parent_class;
/*< public >*/
XenBlockDeviceRealize realize;
XenBlockDeviceUnrealize unrealize;
-} XenBlockDeviceClass;
+};
#define TYPE_XEN_BLOCK_DEVICE "xen-block"
-#define XEN_BLOCK_DEVICE(obj) \
- OBJECT_CHECK(XenBlockDevice, (obj), TYPE_XEN_BLOCK_DEVICE)
-#define XEN_BLOCK_DEVICE_CLASS(class) \
- OBJECT_CLASS_CHECK(XenBlockDeviceClass, (class), TYPE_XEN_BLOCK_DEVICE)
-#define XEN_BLOCK_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XenBlockDeviceClass, (obj), TYPE_XEN_BLOCK_DEVICE)
-
-typedef struct XenDiskDevice {
+OBJECT_DECLARE_TYPE(XenBlockDevice, XenBlockDeviceClass, XEN_BLOCK_DEVICE)
+
+struct XenDiskDevice {
XenBlockDevice blockdev;
-} XenDiskDevice;
+};
#define TYPE_XEN_DISK_DEVICE "xen-disk"
-#define XEN_DISK_DEVICE(obj) \
- OBJECT_CHECK(XenDiskDevice, (obj), TYPE_XEN_DISK_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(XenDiskDevice, XEN_DISK_DEVICE)
-typedef struct XenCDRomDevice {
+struct XenCDRomDevice {
XenBlockDevice blockdev;
-} XenCDRomDevice;
+};
#define TYPE_XEN_CDROM_DEVICE "xen-cdrom"
-#define XEN_CDROM_DEVICE(obj) \
- OBJECT_CHECK(XenCDRomDevice, (obj), TYPE_XEN_CDROM_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(XenCDRomDevice, XEN_CDROM_DEVICE)
#endif /* HW_XEN_BLOCK_H */
diff --git a/include/hw/xen/xen-bus-helper.h b/include/hw/xen/xen-bus-helper.h
index 4c0f747445..d8dcc2f010 100644
--- a/include/hw/xen/xen-bus-helper.h
+++ b/include/hw/xen/xen-bus-helper.h
@@ -8,38 +8,40 @@
#ifndef HW_XEN_BUS_HELPER_H
#define HW_XEN_BUS_HELPER_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
const char *xs_strstate(enum xenbus_state state);
-void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid,
- const char *node, struct xs_permissions perms[],
- unsigned int nr_perms, Error **errp);
-void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_create(struct qemu_xs_handle *h, xs_transaction_t tid,
+ const char *node, unsigned int owner, unsigned int domid,
+ unsigned int perms, Error **errp);
+void xs_node_destroy(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, Error **errp);
/* Write to node/key unless node is empty, in which case write to key */
-void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_vprintf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
- GCC_FMT_ATTR(6, 0);
-void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid,
+ G_GNUC_PRINTF(6, 0);
+void xs_node_printf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
- GCC_FMT_ATTR(6, 7);
+ G_GNUC_PRINTF(6, 7);
/* Read from node/key unless node is empty, in which case read from key */
-int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid,
+int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
- const char *fmt, va_list ap);
-int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid,
+ const char *fmt, va_list ap)
+ G_GNUC_SCANF(6, 0);
+int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
- const char *fmt, ...);
+ const char *fmt, ...)
+ G_GNUC_SCANF(6, 7);
/* Watch node/key unless node is empty, in which case watch key */
-void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key,
- char *token, Error **errp);
-void xs_node_unwatch(struct xs_handle *xsh, const char *node, const char *key,
- const char *token, Error **errp);
+struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node,
+ const char *key, xs_watch_fn fn,
+ void *opaque, Error **errp);
+void xs_node_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
#endif /* HW_XEN_BUS_HELPER_H */
diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h
index 3183f10e3c..38d40afa37 100644
--- a/include/hw/xen/xen-bus.h
+++ b/include/hw/xen/xen-bus.h
@@ -8,77 +8,72 @@
#ifndef HW_XEN_BUS_H
#define HW_XEN_BUS_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
#include "hw/sysbus.h"
#include "qemu/notify.h"
+#include "qom/object.h"
-typedef void (*XenWatchHandler)(void *opaque);
-
-typedef struct XenWatch XenWatch;
+typedef struct XenEventChannel XenEventChannel;
-typedef struct XenDevice {
+struct XenDevice {
DeviceState qdev;
domid_t frontend_id;
char *name;
+ struct qemu_xs_handle *xsh;
char *backend_path, *frontend_path;
enum xenbus_state backend_state, frontend_state;
Notifier exit;
- XenWatch *backend_state_watch, *frontend_state_watch;
+ struct qemu_xs_watch *backend_state_watch, *frontend_state_watch;
bool backend_online;
- XenWatch *backend_online_watch;
+ struct qemu_xs_watch *backend_online_watch;
xengnttab_handle *xgth;
- bool feature_grant_copy;
- xenevtchn_handle *xeh;
- NotifierList event_notifiers;
-} XenDevice;
+ bool inactive;
+ QLIST_HEAD(, XenEventChannel) event_channels;
+ QLIST_ENTRY(XenDevice) list;
+};
+typedef struct XenDevice XenDevice;
+typedef char *(*XenDeviceGetFrontendPath)(XenDevice *xendev, Error **errp);
typedef char *(*XenDeviceGetName)(XenDevice *xendev, Error **errp);
typedef void (*XenDeviceRealize)(XenDevice *xendev, Error **errp);
typedef void (*XenDeviceFrontendChanged)(XenDevice *xendev,
enum xenbus_state frontend_state,
Error **errp);
-typedef void (*XenDeviceUnrealize)(XenDevice *xendev, Error **errp);
+typedef void (*XenDeviceUnrealize)(XenDevice *xendev);
-typedef struct XenDeviceClass {
+struct XenDeviceClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
const char *backend;
const char *device;
+ XenDeviceGetFrontendPath get_frontend_path;
XenDeviceGetName get_name;
XenDeviceRealize realize;
XenDeviceFrontendChanged frontend_changed;
XenDeviceUnrealize unrealize;
-} XenDeviceClass;
+};
#define TYPE_XEN_DEVICE "xen-device"
-#define XEN_DEVICE(obj) \
- OBJECT_CHECK(XenDevice, (obj), TYPE_XEN_DEVICE)
-#define XEN_DEVICE_CLASS(class) \
- OBJECT_CLASS_CHECK(XenDeviceClass, (class), TYPE_XEN_DEVICE)
-#define XEN_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XenDeviceClass, (obj), TYPE_XEN_DEVICE)
-
-typedef struct XenBus {
+OBJECT_DECLARE_TYPE(XenDevice, XenDeviceClass, XEN_DEVICE)
+
+struct XenBus {
BusState qbus;
domid_t backend_id;
- struct xs_handle *xsh;
- NotifierList watch_notifiers;
- XenWatch *backend_watch;
-} XenBus;
+ struct qemu_xs_handle *xsh;
+ unsigned int backend_types;
+ struct qemu_xs_watch **backend_watch;
+ QLIST_HEAD(, XenDevice) inactive_devices;
+};
-typedef struct XenBusClass {
+struct XenBusClass {
/*< private >*/
BusClass parent_class;
-} XenBusClass;
+};
#define TYPE_XEN_BUS "xen-bus"
-#define XEN_BUS(obj) \
- OBJECT_CHECK(XenBus, (obj), TYPE_XEN_BUS)
-#define XEN_BUS_CLASS(class) \
- OBJECT_CLASS_CHECK(XenBusClass, (class), TYPE_XEN_BUS)
-#define XEN_BUS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XenBusClass, (obj), TYPE_XEN_BUS)
+OBJECT_DECLARE_TYPE(XenBus, XenBusClass,
+ XEN_BUS)
void xen_bus_init(void);
@@ -88,20 +83,21 @@ enum xenbus_state xen_device_backend_get_state(XenDevice *xendev);
void xen_device_backend_printf(XenDevice *xendev, const char *key,
const char *fmt, ...)
- GCC_FMT_ATTR(3, 4);
+ G_GNUC_PRINTF(3, 4);
void xen_device_frontend_printf(XenDevice *xendev, const char *key,
const char *fmt, ...)
- GCC_FMT_ATTR(3, 4);
+ G_GNUC_PRINTF(3, 4);
int xen_device_frontend_scanf(XenDevice *xendev, const char *key,
- const char *fmt, ...);
+ const char *fmt, ...)
+ G_GNUC_SCANF(3, 4);
void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs,
Error **errp);
void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot,
Error **errp);
-void xen_device_unmap_grant_refs(XenDevice *xendev, void *map,
+void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, uint32_t *refs,
unsigned int nr_refs, Error **errp);
typedef struct XenDeviceGrantCopySegment {
@@ -119,19 +115,22 @@ void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain,
XenDeviceGrantCopySegment segs[],
unsigned int nr_segs, Error **errp);
-typedef struct XenEventChannel XenEventChannel;
-
-typedef void (*XenEventHandler)(void *opaque);
+typedef bool (*XenEventHandler)(void *opaque);
XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,
unsigned int port,
XenEventHandler handler,
void *opaque, Error **errp);
+void xen_device_set_event_channel_context(XenDevice *xendev,
+ XenEventChannel *channel,
+ AioContext *ctx,
+ Error **errp);
void xen_device_notify_event_channel(XenDevice *xendev,
XenEventChannel *channel,
Error **errp);
void xen_device_unbind_event_channel(XenDevice *xendev,
XenEventChannel *channel,
Error **errp);
+unsigned int xen_event_channel_get_local_port(XenEventChannel *channel);
#endif /* HW_XEN_BUS_H */
diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
new file mode 100644
index 0000000000..65a51aac2e
--- /dev/null
+++ b/include/hw/xen/xen-hvm-common.h
@@ -0,0 +1,98 @@
+#ifndef HW_XEN_HVM_COMMON_H
+#define HW_XEN_HVM_COMMON_H
+
+#include "qemu/units.h"
+
+#include "cpu.h"
+#include "hw/pci/pci.h"
+#include "hw/hw.h"
+#include "hw/xen/xen_native.h"
+#include "hw/xen/xen-legacy-backend.h"
+#include "sysemu/runstate.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/xen.h"
+#include "sysemu/xen-mapcache.h"
+#include "qemu/error-report.h"
+#include <xen/hvm/ioreq.h>
+
+extern MemoryRegion xen_memory;
+extern MemoryListener xen_io_listener;
+extern DeviceListener xen_device_listener;
+
+//#define DEBUG_XEN_HVM
+
+#ifdef DEBUG_XEN_HVM
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
+{
+ return shared_page->vcpu_ioreq[i].vp_eport;
+}
+static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
+{
+ return &shared_page->vcpu_ioreq[vcpu];
+}
+
+#define BUFFER_IO_MAX_DELAY 100
+
+typedef struct XenPhysmap {
+ hwaddr start_addr;
+ ram_addr_t size;
+ const char *name;
+ hwaddr phys_offset;
+
+ QLIST_ENTRY(XenPhysmap) list;
+} XenPhysmap;
+
+typedef struct XenPciDevice {
+ PCIDevice *pci_dev;
+ uint32_t sbdf;
+ QLIST_ENTRY(XenPciDevice) entry;
+} XenPciDevice;
+
+typedef struct XenIOState {
+ ioservid_t ioservid;
+ shared_iopage_t *shared_page;
+ buffered_iopage_t *buffered_io_page;
+ xenforeignmemory_resource_handle *fres;
+ QEMUTimer *buffered_io_timer;
+ CPUState **cpu_by_vcpu_id;
+ /* the evtchn port for polling the notification, */
+ evtchn_port_t *ioreq_local_port;
+ /* evtchn remote and local ports for buffered io */
+ evtchn_port_t bufioreq_remote_port;
+ evtchn_port_t bufioreq_local_port;
+ /* the evtchn fd for polling */
+ xenevtchn_handle *xce_handle;
+ /* which vcpu we are serving */
+ int send_vcpu;
+
+ struct xs_handle *xenstore;
+ MemoryListener memory_listener;
+ MemoryListener io_listener;
+ QLIST_HEAD(, XenPciDevice) dev_list;
+ DeviceListener device_listener;
+
+ Notifier exit;
+} XenIOState;
+
+void xen_exit_notifier(Notifier *n, void *data);
+
+void xen_region_add(MemoryListener *listener, MemoryRegionSection *section);
+void xen_region_del(MemoryListener *listener, MemoryRegionSection *section);
+void xen_io_add(MemoryListener *listener, MemoryRegionSection *section);
+void xen_io_del(MemoryListener *listener, MemoryRegionSection *section);
+void xen_device_realize(DeviceListener *listener, DeviceState *dev);
+void xen_device_unrealize(DeviceListener *listener, DeviceState *dev);
+
+void xen_hvm_change_state_handler(void *opaque, bool running, RunState rstate);
+void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
+ const MemoryListener *xen_memory_listener);
+
+void cpu_ioreq_pio(ioreq_t *req);
+#endif /* HW_XEN_HVM_COMMON_H */
diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h
index 20cb47b5bf..2cca174778 100644
--- a/include/hw/xen/xen-legacy-backend.h
+++ b/include/hw/xen/xen-legacy-backend.h
@@ -1,20 +1,21 @@
-#ifndef QEMU_HW_XEN_BACKEND_H
-#define QEMU_HW_XEN_BACKEND_H
+#ifndef HW_XEN_LEGACY_BACKEND_H
+#define HW_XEN_LEGACY_BACKEND_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_pvdev.h"
-#include "sysemu/sysemu.h"
#include "net/net.h"
+#include "qom/object.h"
#define TYPE_XENSYSDEV "xen-sysdev"
#define TYPE_XENSYSBUS "xen-sysbus"
#define TYPE_XENBACKEND "xen-backend"
-#define XENBACKEND_DEVICE(obj) \
- OBJECT_CHECK(XenLegacyDevice, (obj), TYPE_XENBACKEND)
+typedef struct XenLegacyDevice XenLegacyDevice;
+DECLARE_INSTANCE_CHECKER(XenLegacyDevice, XENBACKEND,
+ TYPE_XENBACKEND)
/* variables */
-extern struct xs_handle *xenstore;
+extern struct qemu_xs_handle *xenstore;
extern const char *xen_protocol;
extern DeviceState *xen_sysdev;
extern BusState *xen_sysbus;
@@ -29,9 +30,6 @@ int xenstore_write_be_int64(struct XenLegacyDevice *xendev, const char *node,
char *xenstore_read_be_str(struct XenLegacyDevice *xendev, const char *node);
int xenstore_read_be_int(struct XenLegacyDevice *xendev, const char *node,
int *ival);
-void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev);
-void xenstore_update_be(char *watch, char *type, int dom,
- struct XenDevOps *ops);
char *xenstore_read_fe_str(struct XenLegacyDevice *xendev, const char *node);
int xenstore_read_fe_int(struct XenLegacyDevice *xendev, const char *node,
int *ival);
@@ -41,8 +39,7 @@ int xenstore_read_fe_uint64(struct XenLegacyDevice *xendev, const char *node,
void xen_be_check_state(struct XenLegacyDevice *xendev);
/* xen backend driver bits */
-int xen_be_init(void);
-void xen_be_register_common(void);
+void xen_be_init(void);
int xen_be_register(const char *type, struct XenDevOps *ops);
int xen_be_set_state(struct XenLegacyDevice *xendev, enum xenbus_state state);
int xen_be_bind_evtchn(struct XenLegacyDevice *xendev);
@@ -51,18 +48,7 @@ void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev,
void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot);
void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
- unsigned int nr_refs);
-
-typedef struct XenGrantCopySegment {
- union {
- void *virt;
- struct {
- uint32_t ref;
- off_t offset;
- } foreign;
- } source, dest;
- size_t len;
-} XenGrantCopySegment;
+ uint32_t *refs, unsigned int nr_refs);
int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
bool to_domain, XenGrantCopySegment segs[],
@@ -75,9 +61,9 @@ static inline void *xen_be_map_grant_ref(struct XenLegacyDevice *xendev,
}
static inline void xen_be_unmap_grant_ref(struct XenLegacyDevice *xendev,
- void *ptr)
+ void *ptr, uint32_t ref)
{
- return xen_be_unmap_grant_refs(xendev, ptr, 1);
+ return xen_be_unmap_grant_refs(xendev, ptr, &ref, 1);
}
/* actual backend drivers */
@@ -95,10 +81,8 @@ extern struct XenDevOps xen_usb_ops; /* xen-usb.c */
/* configuration (aka xenbus setup) */
void xen_config_cleanup(void);
-int xen_config_dev_blk(DriveInfo *disk);
-int xen_config_dev_nic(NICInfo *nic);
int xen_config_dev_vfb(int vdev, const char *type);
int xen_config_dev_vkbd(int vdev);
int xen_config_dev_console(int vdev);
-#endif /* QEMU_HW_XEN_BACKEND_H */
+#endif /* HW_XEN_LEGACY_BACKEND_H */
diff --git a/include/hw/xen/xen-x86.h b/include/hw/xen/xen-x86.h
new file mode 100644
index 0000000000..85e3db1b8d
--- /dev/null
+++ b/include/hw/xen/xen-x86.h
@@ -0,0 +1,15 @@
+/*
+ * Xen X86-specific
+ *
+ * Copyright 2020 Red Hat, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef QEMU_HW_XEN_X86_H
+#define QEMU_HW_XEN_X86_H
+
+#include "hw/i386/pc.h"
+
+void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory);
+
+#endif /* QEMU_HW_XEN_X86_H */
diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
index ba039c146d..37ecc91fc3 100644
--- a/include/hw/xen/xen.h
+++ b/include/hw/xen/xen.h
@@ -1,50 +1,50 @@
-#ifndef QEMU_HW_XEN_H
-#define QEMU_HW_XEN_H
-
/*
* public xen header
* stuff needed outside xen-*.c, i.e. interfaces to qemu.
* must not depend on any xen headers being present in
* /usr/include/xen, so it can be included unconditionally.
*/
+#ifndef QEMU_HW_XEN_H
+#define QEMU_HW_XEN_H
+
+/*
+ * C files using Xen toolstack libraries will have included those headers
+ * already via xen_native.h, and having __XEM_TOOLS__ defined will have
+ * automatically set __XEN_INTERFACE_VERSION__ to the latest supported
+ * by the *system* Xen headers which were transitively included.
+ *
+ * C files which are part of the internal emulation, and which did not
+ * include xen_native.h, may need this defined so that the Xen headers
+ * imported to include/hw/xen/interface/ will expose the appropriate API
+ * version.
+ *
+ * This is why there's a rule that xen_native.h must be included first.
+ */
+#ifndef __XEN_INTERFACE_VERSION__
+#define __XEN_INTERFACE_VERSION__ 0x00040e00
+#endif
-#include "qemu-common.h"
#include "exec/cpu-common.h"
-#include "hw/irq.h"
/* xen-machine.c */
enum xen_mode {
- XEN_EMULATE = 0, // xen emulation, using xenner (default)
- XEN_ATTACH // attach to xen domain created by libxl
+ XEN_DISABLED = 0, /* xen support disabled (default) */
+ XEN_ATTACH, /* attach to xen domain created by libxl */
+ XEN_EMULATE, /* emulate Xen within QEMU */
};
extern uint32_t xen_domid;
extern enum xen_mode xen_mode;
extern bool xen_domid_restrict;
-extern bool xen_allowed;
-
-static inline bool xen_enabled(void)
-{
- return xen_allowed;
-}
-
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num);
-void xen_piix3_set_irq(void *opaque, int irq_num, int level);
-void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len);
+int xen_set_pci_link_route(uint8_t link, uint8_t irq);
+void xen_intx_set_irq(void *opaque, int irq_num, int level);
void xen_hvm_inject_msi(uint64_t addr, uint32_t data);
int xen_is_pirq_msi(uint32_t msi_data);
qemu_irq *xen_interrupt_controller_init(void);
-void xenstore_store_pv_console_info(int i, struct Chardev *chr);
-
-void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory);
-
-void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
- struct MemoryRegion *mr, Error **errp);
-void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
-
void xen_register_framebuffer(struct MemoryRegion *mr);
#endif /* QEMU_HW_XEN_H */
diff --git a/include/hw/xen/xen_backend_ops.h b/include/hw/xen/xen_backend_ops.h
new file mode 100644
index 0000000000..90cca85f52
--- /dev/null
+++ b/include/hw/xen/xen_backend_ops.h
@@ -0,0 +1,408 @@
+/*
+ * QEMU Xen backend support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_XEN_BACKEND_OPS_H
+#define QEMU_XEN_BACKEND_OPS_H
+
+#include "hw/xen/xen.h"
+#include "hw/xen/interface/xen.h"
+#include "hw/xen/interface/io/xenbus.h"
+
+/*
+ * For the time being, these operations map fairly closely to the API of
+ * the actual Xen libraries, e.g. libxenevtchn. As we complete the migration
+ * from XenLegacyDevice back ends to the new XenDevice model, they may
+ * evolve to slightly higher-level APIs.
+ *
+ * The internal emulations do not emulate the Xen APIs entirely faithfully;
+ * only enough to be used by the Xen backend devices. For example, only one
+ * event channel can be bound to each handle, since that's sufficient for
+ * the device support (only the true Xen HVM backend uses more). And the
+ * behaviour of unmask() and pending() is different too because the device
+ * backends don't care.
+ */
+
+typedef struct xenevtchn_handle xenevtchn_handle;
+typedef int xenevtchn_port_or_error_t;
+typedef uint32_t evtchn_port_t;
+typedef uint16_t domid_t;
+typedef uint32_t grant_ref_t;
+
+#define XEN_PAGE_SHIFT 12
+#define XEN_PAGE_SIZE (1UL << XEN_PAGE_SHIFT)
+#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE - 1))
+
+#ifndef xen_rmb
+#define xen_rmb() smp_rmb()
+#endif
+#ifndef xen_wmb
+#define xen_wmb() smp_wmb()
+#endif
+#ifndef xen_mb
+#define xen_mb() smp_mb()
+#endif
+
+struct evtchn_backend_ops {
+ xenevtchn_handle *(*open)(void);
+ int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid,
+ evtchn_port_t guest_port);
+ int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port);
+ int (*close)(struct xenevtchn_handle *xc);
+ int (*get_fd)(struct xenevtchn_handle *xc);
+ int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port);
+ int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port);
+ int (*pending)(struct xenevtchn_handle *xc);
+};
+
+extern struct evtchn_backend_ops *xen_evtchn_ops;
+
+static inline xenevtchn_handle *qemu_xen_evtchn_open(void)
+{
+ if (!xen_evtchn_ops) {
+ return NULL;
+ }
+ return xen_evtchn_ops->open();
+}
+
+static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc,
+ uint32_t domid,
+ evtchn_port_t guest_port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port);
+}
+
+static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->unbind(xc, port);
+}
+
+static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->close(xc);
+}
+
+static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->get_fd(xc);
+}
+
+static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->notify(xc, port);
+}
+
+static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->unmask(xc, port);
+}
+
+static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->pending(xc);
+}
+
+typedef struct xengntdev_handle xengnttab_handle;
+
+typedef struct XenGrantCopySegment {
+ union {
+ void *virt;
+ struct {
+ uint32_t ref;
+ off_t offset;
+ } foreign;
+ } source, dest;
+ size_t len;
+} XenGrantCopySegment;
+
+#define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE (1U << 0)
+
+struct gnttab_backend_ops {
+ uint32_t features;
+ xengnttab_handle *(*open)(void);
+ int (*close)(xengnttab_handle *xgt);
+ int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid,
+ XenGrantCopySegment *segs, uint32_t nr_segs,
+ Error **errp);
+ int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants);
+ void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid,
+ uint32_t *refs, int prot);
+ int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs,
+ uint32_t count);
+};
+
+extern struct gnttab_backend_ops *xen_gnttab_ops;
+
+static inline bool qemu_xen_gnttab_can_map_multi(void)
+{
+ return xen_gnttab_ops &&
+ !!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE);
+}
+
+static inline xengnttab_handle *qemu_xen_gnttab_open(void)
+{
+ if (!xen_gnttab_ops) {
+ return NULL;
+ }
+ return xen_gnttab_ops->open();
+}
+
+static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->close(xgt);
+}
+
+static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt,
+ bool to_domain, uint32_t domid,
+ XenGrantCopySegment *segs,
+ uint32_t nr_segs, Error **errp)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+
+ return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs,
+ errp);
+}
+
+static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt,
+ uint32_t nr_grants)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->set_max_grants(xgt, nr_grants);
+}
+
+static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt,
+ uint32_t count, uint32_t domid,
+ uint32_t *refs, int prot)
+{
+ if (!xen_gnttab_ops) {
+ return NULL;
+ }
+ return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot);
+}
+
+static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt,
+ void *start_address, uint32_t *refs,
+ uint32_t count)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->unmap(xgt, start_address, refs, count);
+}
+
+struct foreignmem_backend_ops {
+ void *(*map)(uint32_t dom, void *addr, int prot, size_t pages,
+ xen_pfn_t *pfns, int *errs);
+ int (*unmap)(void *addr, size_t pages);
+};
+
+extern struct foreignmem_backend_ops *xen_foreignmem_ops;
+
+static inline void *qemu_xen_foreignmem_map(uint32_t dom, void *addr, int prot,
+ size_t pages, xen_pfn_t *pfns,
+ int *errs)
+{
+ if (!xen_foreignmem_ops) {
+ return NULL;
+ }
+ return xen_foreignmem_ops->map(dom, addr, prot, pages, pfns, errs);
+}
+
+static inline int qemu_xen_foreignmem_unmap(void *addr, size_t pages)
+{
+ if (!xen_foreignmem_ops) {
+ return -ENOSYS;
+ }
+ return xen_foreignmem_ops->unmap(addr, pages);
+}
+
+typedef void (*xs_watch_fn)(void *opaque, const char *path);
+
+struct qemu_xs_handle;
+struct qemu_xs_watch;
+typedef uint32_t xs_transaction_t;
+
+#define XBT_NULL 0
+
+#define XS_PERM_NONE 0x00
+#define XS_PERM_READ 0x01
+#define XS_PERM_WRITE 0x02
+
+struct xenstore_backend_ops {
+ struct qemu_xs_handle *(*open)(void);
+ void (*close)(struct qemu_xs_handle *h);
+ char *(*get_domain_path)(struct qemu_xs_handle *h, unsigned int domid);
+ char **(*directory)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *num);
+ void *(*read)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *len);
+ bool (*write)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, const void *data, unsigned int len);
+ bool (*create)(struct qemu_xs_handle *h, xs_transaction_t t,
+ unsigned int owner, unsigned int domid,
+ unsigned int perms, const char *path);
+ bool (*destroy)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path);
+ struct qemu_xs_watch *(*watch)(struct qemu_xs_handle *h, const char *path,
+ xs_watch_fn fn, void *opaque);
+ void (*unwatch)(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
+ xs_transaction_t (*transaction_start)(struct qemu_xs_handle *h);
+ bool (*transaction_end)(struct qemu_xs_handle *h, xs_transaction_t t,
+ bool abort);
+};
+
+extern struct xenstore_backend_ops *xen_xenstore_ops;
+
+static inline struct qemu_xs_handle *qemu_xen_xs_open(void)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->open();
+}
+
+static inline void qemu_xen_xs_close(struct qemu_xs_handle *h)
+{
+ if (!xen_xenstore_ops) {
+ return;
+ }
+ xen_xenstore_ops->close(h);
+}
+
+static inline char *qemu_xen_xs_get_domain_path(struct qemu_xs_handle *h,
+ unsigned int domid)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->get_domain_path(h, domid);
+}
+
+static inline char **qemu_xen_xs_directory(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ unsigned int *num)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->directory(h, t, path, num);
+}
+
+static inline void *qemu_xen_xs_read(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ unsigned int *len)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->read(h, t, path, len);
+}
+
+static inline bool qemu_xen_xs_write(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ const void *data, unsigned int len)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->write(h, t, path, data, len);
+}
+
+static inline bool qemu_xen_xs_create(struct qemu_xs_handle *h,
+ xs_transaction_t t, unsigned int owner,
+ unsigned int domid, unsigned int perms,
+ const char *path)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->create(h, t, owner, domid, perms, path);
+}
+
+static inline bool qemu_xen_xs_destroy(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->destroy(h, t, path);
+}
+
+static inline struct qemu_xs_watch *qemu_xen_xs_watch(struct qemu_xs_handle *h,
+ const char *path,
+ xs_watch_fn fn,
+ void *opaque)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->watch(h, path, fn, opaque);
+}
+
+static inline void qemu_xen_xs_unwatch(struct qemu_xs_handle *h,
+ struct qemu_xs_watch *w)
+{
+ if (!xen_xenstore_ops) {
+ return;
+ }
+ xen_xenstore_ops->unwatch(h, w);
+}
+
+static inline xs_transaction_t qemu_xen_xs_transaction_start(struct qemu_xs_handle *h)
+{
+ if (!xen_xenstore_ops) {
+ return XBT_NULL;
+ }
+ return xen_xenstore_ops->transaction_start(h);
+}
+
+static inline bool qemu_xen_xs_transaction_end(struct qemu_xs_handle *h,
+ xs_transaction_t t, bool abort)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->transaction_end(h, t, abort);
+}
+
+void setup_xen_backend_ops(void);
+
+#endif /* QEMU_XEN_BACKEND_OPS_H */
diff --git a/include/hw/xen/xen_igd.h b/include/hw/xen/xen_igd.h
new file mode 100644
index 0000000000..7ffca06c10
--- /dev/null
+++ b/include/hw/xen/xen_igd.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2007, Neocleus Corporation.
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Alex Novik <alex@neocleus.com>
+ * Allen Kay <allen.m.kay@intel.com>
+ * Guy Zana <guy@neocleus.com>
+ */
+#ifndef XEN_IGD_H
+#define XEN_IGD_H
+
+#include "hw/xen/xen-host-pci-device.h"
+
+typedef struct XenPCIPassthroughState XenPCIPassthroughState;
+
+bool xen_igd_gfx_pt_enabled(void);
+void xen_igd_gfx_pt_set(bool value, Error **errp);
+
+uint32_t igd_read_opregion(XenPCIPassthroughState *s);
+void xen_igd_reserve_slot(PCIBus *pci_bus);
+void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val);
+void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
+ XenHostPCIDevice *dev);
+
+static inline bool is_igd_vga_passthrough(XenHostPCIDevice *dev)
+{
+ return (xen_igd_gfx_pt_enabled()
+ && ((dev->class_code >> 0x8) == PCI_CLASS_DISPLAY_VGA));
+}
+
+#endif
diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_native.h
index 9a8155e172..1a5ad693a4 100644
--- a/include/hw/xen/xen_common.h
+++ b/include/hw/xen/xen_native.h
@@ -1,5 +1,9 @@
-#ifndef QEMU_HW_XEN_COMMON_H
-#define QEMU_HW_XEN_COMMON_H
+#ifndef QEMU_HW_XEN_NATIVE_H
+#define QEMU_HW_XEN_NATIVE_H
+
+#ifdef __XEN_INTERFACE_VERSION__
+#error In Xen native files, include xen_native.h before other Xen headers
+#endif
/*
* If we have new enough libxenctrl then we do not want/need these compat
@@ -12,71 +16,19 @@
#include <xenctrl.h>
#include <xenstore.h>
-#include <xen/io/xenbus.h>
-#include "hw/hw.h"
#include "hw/xen/xen.h"
-#include "hw/pci/pci.h"
-#include "qemu/queue.h"
+#include "hw/pci/pci_device.h"
#include "hw/xen/trace.h"
extern xc_interface *xen_xc;
/*
- * We don't support Xen prior to 4.2.0.
+ * We don't support Xen prior to 4.7.1.
*/
-/* Xen 4.2 through 4.6 */
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
-
-typedef xc_interface xenforeignmemory_handle;
-typedef xc_evtchn xenevtchn_handle;
-typedef xc_gnttab xengnttab_handle;
-typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
-
-#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
-#define xenevtchn_close(h) xc_evtchn_close(h)
-#define xenevtchn_fd(h) xc_evtchn_fd(h)
-#define xenevtchn_pending(h) xc_evtchn_pending(h)
-#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
-#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
-#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
-#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
-
-#define xengnttab_open(l, f) xc_gnttab_open(l, f)
-#define xengnttab_close(h) xc_gnttab_close(h)
-#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
-#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
-#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
-#define xengnttab_map_grant_refs(h, c, d, r, p) \
- xc_gnttab_map_grant_refs(h, c, d, r, p)
-#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
- xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
-
-#define xenforeignmemory_open(l, f) xen_xc
-#define xenforeignmemory_close(h)
-
-static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
- int prot, size_t pages,
- const xen_pfn_t arr[/*pages*/],
- int err[/*pages*/])
-{
- if (err)
- return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
- else
- return xc_map_foreign_pages(h, dom, prot, arr, pages);
-}
-
-#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
-
-#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
-
-#include <xenevtchn.h>
-#include <xengnttab.h>
#include <xenforeignmemory.h>
-#endif
-
extern xenforeignmemory_handle *xen_fmem;
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
@@ -136,6 +88,12 @@ static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
return NULL;
}
+static inline int xenforeignmemory_unmap_resource(
+ xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
+{
+ return 0;
+}
+
#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
@@ -178,8 +136,6 @@ static inline xendevicemodel_handle *xendevicemodel_open(
return xen_xc;
}
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
-
static inline int xendevicemodel_create_ioreq_server(
xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
ioservid_t *id)
@@ -241,8 +197,6 @@ static inline int xendevicemodel_set_ioreq_server_state(
return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
}
-#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
-
static inline int xendevicemodel_set_pci_intx_level(
xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
@@ -312,12 +266,6 @@ static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
device, intx, level);
}
-static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
- uint8_t irq)
-{
- return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
-}
-
static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
uint32_t msi_data)
{
@@ -354,7 +302,7 @@ static inline int xen_restrict(domid_t domid)
void destroy_hvm_domain(bool reboot);
/* shutdown/destroy current domain because of an error */
-void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
#ifdef HVM_PARAM_VMPORT_REGS_PFN
static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
@@ -376,15 +324,6 @@ static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
}
#endif
-/* Xen before 4.6 */
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
-
-#ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
-#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
-#endif
-
-#endif
-
static inline int xen_get_default_ioreq_server_info(domid_t dom,
xen_pfn_t *ioreq_pfn,
xen_pfn_t *bufioreq_pfn,
@@ -422,84 +361,6 @@ static inline int xen_get_default_ioreq_server_info(domid_t dom,
return 0;
}
-/* Xen before 4.5 */
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
-
-#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
-#define HVM_PARAM_BUFIOREQ_EVTCHN 26
-#endif
-
-#define IOREQ_TYPE_PCI_CONFIG 2
-
-typedef uint16_t ioservid_t;
-
-static inline void xen_map_memory_section(domid_t dom,
- ioservid_t ioservid,
- MemoryRegionSection *section)
-{
-}
-
-static inline void xen_unmap_memory_section(domid_t dom,
- ioservid_t ioservid,
- MemoryRegionSection *section)
-{
-}
-
-static inline void xen_map_io_section(domid_t dom,
- ioservid_t ioservid,
- MemoryRegionSection *section)
-{
-}
-
-static inline void xen_unmap_io_section(domid_t dom,
- ioservid_t ioservid,
- MemoryRegionSection *section)
-{
-}
-
-static inline void xen_map_pcidev(domid_t dom,
- ioservid_t ioservid,
- PCIDevice *pci_dev)
-{
-}
-
-static inline void xen_unmap_pcidev(domid_t dom,
- ioservid_t ioservid,
- PCIDevice *pci_dev)
-{
-}
-
-static inline void xen_create_ioreq_server(domid_t dom,
- ioservid_t *ioservid)
-{
-}
-
-static inline void xen_destroy_ioreq_server(domid_t dom,
- ioservid_t ioservid)
-{
-}
-
-static inline int xen_get_ioreq_server_info(domid_t dom,
- ioservid_t ioservid,
- xen_pfn_t *ioreq_pfn,
- xen_pfn_t *bufioreq_pfn,
- evtchn_port_t *bufioreq_evtchn)
-{
- return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
- bufioreq_pfn,
- bufioreq_evtchn);
-}
-
-static inline int xen_set_ioreq_server_state(domid_t dom,
- ioservid_t ioservid,
- bool enable)
-{
- return 0;
-}
-
-/* Xen 4.5 */
-#else
-
static bool use_default_ioreq_server;
static inline void xen_map_memory_section(domid_t dom,
@@ -602,8 +463,8 @@ static inline void xen_unmap_pcidev(domid_t dom,
PCI_FUNC(pci_dev->devfn));
}
-static inline void xen_create_ioreq_server(domid_t dom,
- ioservid_t *ioservid)
+static inline int xen_create_ioreq_server(domid_t dom,
+ ioservid_t *ioservid)
{
int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
HVM_IOREQSRV_BUFIOREQ_ATOMIC,
@@ -611,12 +472,14 @@ static inline void xen_create_ioreq_server(domid_t dom,
if (rc == 0) {
trace_xen_ioreq_server_create(*ioservid);
- return;
+ return rc;
}
*ioservid = 0;
use_default_ioreq_server = true;
trace_xen_default_ioreq_server();
+
+ return rc;
}
static inline void xen_destroy_ioreq_server(domid_t dom,
@@ -660,33 +523,28 @@ static inline int xen_set_ioreq_server_state(domid_t dom,
enable);
}
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41500
+static inline int xendevicemodel_set_irq_level(xendevicemodel_handle *dmod,
+ domid_t domid, uint32_t irq,
+ unsigned int level)
+{
+ return -1;
+}
#endif
-/* Xen before 4.8 */
-
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
-
-struct xengnttab_grant_copy_segment {
- union xengnttab_copy_ptr {
- void *virt;
- struct {
- uint32_t ref;
- uint16_t offset;
- uint16_t domid;
- } foreign;
- } source, dest;
- uint16_t len;
- uint16_t flags;
- int16_t status;
-};
-
-typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41700
+#define GUEST_VIRTIO_MMIO_BASE xen_mk_ullong(0x02000000)
+#define GUEST_VIRTIO_MMIO_SIZE xen_mk_ullong(0x00100000)
+#define GUEST_VIRTIO_MMIO_SPI_FIRST 33
+#define GUEST_VIRTIO_MMIO_SPI_LAST 43
+#endif
-static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
- xengnttab_grant_copy_segment_t *segs)
-{
- return -ENOSYS;
-}
+#if defined(__i386__) || defined(__x86_64__)
+#define GUEST_RAM_BANKS 2
+#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
+#define GUEST_RAM0_SIZE 0xc0000000ULL
+#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
+#define GUEST_RAM1_SIZE 0xfe00000000ULL
#endif
-#endif /* QEMU_HW_XEN_COMMON_H */
+#endif /* QEMU_HW_XEN_NATIVE_H */
diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h
index 83e5174d90..ddad4b9f36 100644
--- a/include/hw/xen/xen_pvdev.h
+++ b/include/hw/xen/xen_pvdev.h
@@ -1,7 +1,9 @@
#ifndef QEMU_HW_XEN_PVDEV_H
#define QEMU_HW_XEN_PVDEV_H
-#include "hw/xen/xen_common.h"
+#include "hw/qdev-core.h"
+#include "hw/xen/xen_backend_ops.h"
+
/* ------------------------------------------------------------- */
#define XEN_BUFSIZE 1024
@@ -38,6 +40,7 @@ struct XenLegacyDevice {
char name[64];
int debug;
+ struct qemu_xs_watch *watch;
enum xenbus_state be_state;
enum xenbus_state fe_state;
int online;
@@ -63,7 +66,6 @@ int xenstore_write_int64(const char *base, const char *node, int64_t ival);
char *xenstore_read_str(const char *base, const char *node);
int xenstore_read_int(const char *base, const char *node, int *ival);
int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval);
-void xenstore_update(void *unused);
const char *xenbus_strstate(enum xenbus_state state);
@@ -76,6 +78,6 @@ void xen_pv_unbind_evtchn(struct XenLegacyDevice *xendev);
int xen_pv_send_notify(struct XenLegacyDevice *xendev);
void xen_pv_printf(struct XenLegacyDevice *xendev, int msg_level,
- const char *fmt, ...) GCC_FMT_ATTR(3, 4);
+ const char *fmt, ...) G_GNUC_PRINTF(3, 4);
#endif /* QEMU_HW_XEN_PVDEV_H */
diff --git a/include/hw/xtensa/mx_pic.h b/include/hw/xtensa/mx_pic.h
new file mode 100644
index 0000000000..500424c8d3
--- /dev/null
+++ b/include/hw/xtensa/mx_pic.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef XTENSA_MX_PIC_H
+#define XTENSA_MX_PIC_H
+
+#include "exec/memory.h"
+
+struct XtensaMxPic;
+typedef struct XtensaMxPic XtensaMxPic;
+
+XtensaMxPic *xtensa_mx_pic_init(unsigned n_extint);
+void xtensa_mx_pic_reset(void *opaque);
+MemoryRegion *xtensa_mx_pic_register_cpu(XtensaMxPic *mx,
+ qemu_irq *irq,
+ qemu_irq runstall);
+qemu_irq *xtensa_mx_pic_get_extints(XtensaMxPic *mx);
+
+#endif
diff --git a/include/hw/xtensa/xtensa-isa.h b/include/hw/xtensa/xtensa-isa.h
index bd68ada640..a289531bdc 100644
--- a/include/hw/xtensa/xtensa-isa.h
+++ b/include/hw/xtensa/xtensa-isa.h
@@ -22,8 +22,8 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef XTENSA_LIBISA_H
-#define XTENSA_LIBISA_H
+#ifndef HW_XTENSA_XTENSA_ISA_H
+#define HW_XTENSA_XTENSA_ISA_H
#ifdef __cplusplus
extern "C" {
@@ -833,4 +833,4 @@ int xtensa_funcUnit_num_copies(xtensa_isa isa, xtensa_funcUnit fun);
#ifdef __cplusplus
}
#endif
-#endif /* XTENSA_LIBISA_H */
+#endif /* HW_XTENSA_XTENSA_ISA_H */
diff --git a/include/io/channel-buffer.h b/include/io/channel-buffer.h
index 3f4b3f29e1..c9463ee655 100644
--- a/include/io/channel-buffer.h
+++ b/include/io/channel-buffer.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,12 +22,11 @@
#define QIO_CHANNEL_BUFFER_H
#include "io/channel.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_BUFFER "qio-channel-buffer"
-#define QIO_CHANNEL_BUFFER(obj) \
- OBJECT_CHECK(QIOChannelBuffer, (obj), TYPE_QIO_CHANNEL_BUFFER)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelBuffer, QIO_CHANNEL_BUFFER)
-typedef struct QIOChannelBuffer QIOChannelBuffer;
/**
* QIOChannelBuffer:
diff --git a/include/io/channel-command.h b/include/io/channel-command.h
index 336d47fa5c..98934e6d9e 100644
--- a/include/io/channel-command.h
+++ b/include/io/channel-command.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,12 +22,11 @@
#define QIO_CHANNEL_COMMAND_H
#include "io/channel.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_COMMAND "qio-channel-command"
-#define QIO_CHANNEL_COMMAND(obj) \
- OBJECT_CHECK(QIOChannelCommand, (obj), TYPE_QIO_CHANNEL_COMMAND)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelCommand, QIO_CHANNEL_COMMAND)
-typedef struct QIOChannelCommand QIOChannelCommand;
/**
@@ -42,36 +41,14 @@ struct QIOChannelCommand {
QIOChannel parent;
int writefd;
int readfd;
- pid_t pid;
+ GPid pid;
+#ifdef WIN32
+ bool blocking;
+#endif
};
/**
- * qio_channel_command_new_pid:
- * @writefd: the FD connected to the command's stdin
- * @readfd: the FD connected to the command's stdout
- * @pid: the PID of the running child command
- * @errp: pointer to a NULL-initialized error object
- *
- * Create a channel for performing I/O with the
- * previously spawned command identified by @pid.
- * The two file descriptors provide the connection
- * to command's stdio streams, either one or which
- * may be -1 to indicate that stream is not open.
- *
- * The channel will take ownership of the process
- * @pid and will kill it when closing the channel.
- * Similarly it will take responsibility for
- * closing the file descriptors @writefd and @readfd.
- *
- * Returns: the command channel object, or NULL on error
- */
-QIOChannelCommand *
-qio_channel_command_new_pid(int writefd,
- int readfd,
- pid_t pid);
-
-/**
* qio_channel_command_new_spawn:
* @argv: the NULL terminated list of command arguments
* @flags: the I/O mode, one of O_RDONLY, O_WRONLY, O_RDWR
diff --git a/include/io/channel-file.h b/include/io/channel-file.h
index ebfe54ec70..d373a4e44d 100644
--- a/include/io/channel-file.h
+++ b/include/io/channel-file.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,12 +22,11 @@
#define QIO_CHANNEL_FILE_H
#include "io/channel.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_FILE "qio-channel-file"
-#define QIO_CHANNEL_FILE(obj) \
- OBJECT_CHECK(QIOChannelFile, (obj), TYPE_QIO_CHANNEL_FILE)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelFile, QIO_CHANNEL_FILE)
-typedef struct QIOChannelFile QIOChannelFile;
/**
* QIOChannelFile:
@@ -70,6 +69,24 @@ QIOChannelFile *
qio_channel_file_new_fd(int fd);
/**
+ * qio_channel_file_new_dupfd:
+ * @fd: the file descriptor
+ * @errp: pointer to initialized error object
+ *
+ * Create a new IO channel object for a file represented by the @fd
+ * parameter. Like qio_channel_file_new_fd(), but the @fd is first
+ * duplicated with dup().
+ *
+ * The channel will own the duplicated file descriptor and will take
+ * responsibility for closing it, the original FD is owned by the
+ * caller.
+ *
+ * Returns: the new channel object
+ */
+QIOChannelFile *
+qio_channel_file_new_dupfd(int fd, Error **errp);
+
+/**
* qio_channel_file_new_path:
* @path: the file path
* @flags: the open flags (O_RDONLY|O_WRONLY|O_RDWR, etc)
diff --git a/include/io/channel-null.h b/include/io/channel-null.h
new file mode 100644
index 0000000000..f6d54e63cf
--- /dev/null
+++ b/include/io/channel-null.h
@@ -0,0 +1,55 @@
+/*
+ * QEMU I/O channels null driver
+ *
+ * Copyright (c) 2022 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QIO_CHANNEL_FILE_H
+#define QIO_CHANNEL_FILE_H
+
+#include "io/channel.h"
+#include "qom/object.h"
+
+#define TYPE_QIO_CHANNEL_NULL "qio-channel-null"
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelNull, QIO_CHANNEL_NULL)
+
+
+/**
+ * QIOChannelNull:
+ *
+ * The QIOChannelNull object provides a channel implementation
+ * that discards all writes and returns EOF for all reads.
+ */
+
+struct QIOChannelNull {
+ QIOChannel parent;
+ bool closed;
+};
+
+
+/**
+ * qio_channel_null_new:
+ *
+ * Create a new IO channel object that discards all writes
+ * and returns EOF for all reads.
+ *
+ * Returns: the new channel object
+ */
+QIOChannelNull *
+qio_channel_null_new(void);
+
+#endif /* QIO_CHANNEL_NULL_H */
diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
index d7134d2cd6..ab15577d38 100644
--- a/include/io/channel-socket.h
+++ b/include/io/channel-socket.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,12 +24,11 @@
#include "io/channel.h"
#include "io/task.h"
#include "qemu/sockets.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_SOCKET "qio-channel-socket"
-#define QIO_CHANNEL_SOCKET(obj) \
- OBJECT_CHECK(QIOChannelSocket, (obj), TYPE_QIO_CHANNEL_SOCKET)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelSocket, QIO_CHANNEL_SOCKET)
-typedef struct QIOChannelSocket QIOChannelSocket;
/**
* QIOChannelSocket:
@@ -48,6 +47,8 @@ struct QIOChannelSocket {
socklen_t localAddrLen;
struct sockaddr_storage remoteAddr;
socklen_t remoteAddrLen;
+ ssize_t zero_copy_queued;
+ ssize_t zero_copy_sent;
};
@@ -123,6 +124,7 @@ void qio_channel_socket_connect_async(QIOChannelSocket *ioc,
* qio_channel_socket_listen_sync:
* @ioc: the socket channel object
* @addr: the address to listen to
+ * @num: the expected amount of connections
* @errp: pointer to a NULL-initialized error object
*
* Attempt to listen to the address @addr. This method
@@ -132,12 +134,14 @@ void qio_channel_socket_connect_async(QIOChannelSocket *ioc,
*/
int qio_channel_socket_listen_sync(QIOChannelSocket *ioc,
SocketAddress *addr,
+ int num,
Error **errp);
/**
* qio_channel_socket_listen_async:
* @ioc: the socket channel object
* @addr: the address to listen to
+ * @num: the expected amount of connections
* @callback: the function to invoke on completion
* @opaque: user data to pass to @callback
* @destroy: the function to free @opaque
@@ -153,6 +157,7 @@ int qio_channel_socket_listen_sync(QIOChannelSocket *ioc,
*/
void qio_channel_socket_listen_async(QIOChannelSocket *ioc,
SocketAddress *addr,
+ int num,
QIOTaskFunc callback,
gpointer opaque,
GDestroyNotify destroy,
diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
index fdbdf12feb..26c67f17e2 100644
--- a/include/io/channel-tls.h
+++ b/include/io/channel-tls.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,12 +24,11 @@
#include "io/channel.h"
#include "io/task.h"
#include "crypto/tlssession.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_TLS "qio-channel-tls"
-#define QIO_CHANNEL_TLS(obj) \
- OBJECT_CHECK(QIOChannelTLS, (obj), TYPE_QIO_CHANNEL_TLS)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelTLS, QIO_CHANNEL_TLS)
-typedef struct QIOChannelTLS QIOChannelTLS;
/**
* QIOChannelTLS
@@ -49,6 +48,7 @@ struct QIOChannelTLS {
QIOChannel *master;
QCryptoTLSSession *session;
QIOChannelShutdown shutdown;
+ guint hs_ioc_tag;
};
/**
diff --git a/include/io/channel-util.h b/include/io/channel-util.h
index c0b79cf603..fa18a3756d 100644
--- a/include/io/channel-util.h
+++ b/include/io/channel-util.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -49,4 +49,27 @@
QIOChannel *qio_channel_new_fd(int fd,
Error **errp);
+/**
+ * qio_channel_util_set_aio_fd_handler:
+ * @read_fd: the file descriptor for the read handler
+ * @read_ctx: the AioContext for the read handler
+ * @io_read: the read handler
+ * @write_fd: the file descriptor for the write handler
+ * @write_ctx: the AioContext for the write handler
+ * @io_write: the write handler
+ * @opaque: the opaque argument to the read and write handler
+ *
+ * Set the read and write handlers when @read_ctx and @write_ctx are non-NULL,
+ * respectively. To leave a handler in its current state, pass a NULL
+ * AioContext. To clear a handler, pass a non-NULL AioContext and a NULL
+ * handler.
+ */
+void qio_channel_util_set_aio_fd_handler(int read_fd,
+ AioContext *read_ctx,
+ IOHandler *io_read,
+ int write_fd,
+ AioContext *write_ctx,
+ IOHandler *io_write,
+ void *opaque);
+
#endif /* QIO_CHANNEL_UTIL_H */
diff --git a/include/io/channel-watch.h b/include/io/channel-watch.h
index 63bc4ae2d9..a36aab8f8f 100644
--- a/include/io/channel-watch.h
+++ b/include/io/channel-watch.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/io/channel-websock.h b/include/io/channel-websock.h
index a7e5e92e61..e180827c57 100644
--- a/include/io/channel-websock.h
+++ b/include/io/channel-websock.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,12 +24,11 @@
#include "io/channel.h"
#include "qemu/buffer.h"
#include "io/task.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_WEBSOCK "qio-channel-websock"
-#define QIO_CHANNEL_WEBSOCK(obj) \
- OBJECT_CHECK(QIOChannelWebsock, (obj), TYPE_QIO_CHANNEL_WEBSOCK)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelWebsock, QIO_CHANNEL_WEBSOCK)
-typedef struct QIOChannelWebsock QIOChannelWebsock;
typedef union QIOChannelWebsockMask QIOChannelWebsockMask;
union QIOChannelWebsockMask {
diff --git a/include/io/channel.h b/include/io/channel.h
index da2f138200..7986c49c71 100644
--- a/include/io/channel.h
+++ b/include/io/channel.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,30 +21,30 @@
#ifndef QIO_CHANNEL_H
#define QIO_CHANNEL_H
-#include "qemu-common.h"
#include "qom/object.h"
-#include "qemu/coroutine.h"
+#include "qemu/coroutine-core.h"
#include "block/aio.h"
#define TYPE_QIO_CHANNEL "qio-channel"
-#define QIO_CHANNEL(obj) \
- OBJECT_CHECK(QIOChannel, (obj), TYPE_QIO_CHANNEL)
-#define QIO_CHANNEL_CLASS(klass) \
- OBJECT_CLASS_CHECK(QIOChannelClass, klass, TYPE_QIO_CHANNEL)
-#define QIO_CHANNEL_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QIOChannelClass, obj, TYPE_QIO_CHANNEL)
+OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass,
+ QIO_CHANNEL)
-typedef struct QIOChannel QIOChannel;
-typedef struct QIOChannelClass QIOChannelClass;
#define QIO_CHANNEL_ERR_BLOCK -2
+#define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1
+
+#define QIO_CHANNEL_READ_FLAG_MSG_PEEK 0x1
+
typedef enum QIOChannelFeature QIOChannelFeature;
enum QIOChannelFeature {
QIO_CHANNEL_FEATURE_FD_PASS,
QIO_CHANNEL_FEATURE_SHUTDOWN,
QIO_CHANNEL_FEATURE_LISTEN,
+ QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY,
+ QIO_CHANNEL_FEATURE_READ_MSG_PEEK,
+ QIO_CHANNEL_FEATURE_SEEKABLE,
};
@@ -82,9 +82,11 @@ struct QIOChannel {
Object parent;
unsigned int features; /* bitmask of QIOChannelFeatures */
char *name;
- AioContext *ctx;
+ AioContext *read_ctx;
Coroutine *read_coroutine;
+ AioContext *write_ctx;
Coroutine *write_coroutine;
+ bool follow_coroutine_ctx;
#ifdef _WIN32
HANDLE event; /* For use with GSource on Win32 */
#endif
@@ -99,7 +101,8 @@ struct QIOChannel {
* provide additional optional features.
*
* Consult the corresponding public API docs for a description
- * of the semantics of each callback
+ * of the semantics of each callback. io_shutdown in particular
+ * must be thread-safe, terminate quickly and must not block.
*/
struct QIOChannelClass {
ObjectClass parent;
@@ -110,12 +113,14 @@ struct QIOChannelClass {
size_t niov,
int *fds,
size_t nfds,
+ int flags,
Error **errp);
ssize_t (*io_readv)(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds,
size_t *nfds,
+ int flags,
Error **errp);
int (*io_close)(QIOChannel *ioc,
Error **errp);
@@ -126,6 +131,16 @@ struct QIOChannelClass {
Error **errp);
/* Optional callbacks */
+ ssize_t (*io_pwritev)(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ off_t offset,
+ Error **errp);
+ ssize_t (*io_preadv)(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ off_t offset,
+ Error **errp);
int (*io_shutdown)(QIOChannel *ioc,
QIOChannelShutdown how,
Error **errp);
@@ -138,10 +153,13 @@ struct QIOChannelClass {
int whence,
Error **errp);
void (*io_set_aio_fd_handler)(QIOChannel *ioc,
- AioContext *ctx,
+ AioContext *read_ctx,
IOHandler *io_read,
+ AioContext *write_ctx,
IOHandler *io_write,
void *opaque);
+ int (*io_flush)(QIOChannel *ioc,
+ Error **errp);
};
/* General I/O handling functions */
@@ -188,6 +206,7 @@ void qio_channel_set_name(QIOChannel *ioc,
* @niov: the length of the @iov array
* @fds: pointer to an array that will received file handles
* @nfds: pointer filled with number of elements in @fds on return
+ * @flags: read flags (QIO_CHANNEL_READ_FLAG_*)
* @errp: pointer to a NULL-initialized error object
*
* Read data from the IO channel, storing it in the
@@ -224,6 +243,7 @@ ssize_t qio_channel_readv_full(QIOChannel *ioc,
size_t niov,
int **fds,
size_t *nfds,
+ int flags,
Error **errp);
@@ -234,6 +254,7 @@ ssize_t qio_channel_readv_full(QIOChannel *ioc,
* @niov: the length of the @iov array
* @fds: an array of file handles to send
* @nfds: number of file handles in @fds
+ * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*)
* @errp: pointer to a NULL-initialized error object
*
* Write data to the IO channel, reading it from the
@@ -266,6 +287,7 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc,
size_t niov,
int *fds,
size_t nfds,
+ int flags,
Error **errp);
/**
@@ -293,10 +315,10 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc,
* Returns: 1 if all bytes were read, 0 if end-of-file
* occurs without data, or -1 on error
*/
-int qio_channel_readv_all_eof(QIOChannel *ioc,
- const struct iovec *iov,
- size_t niov,
- Error **errp);
+int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp);
/**
* qio_channel_readv_all:
@@ -320,10 +342,10 @@ int qio_channel_readv_all_eof(QIOChannel *ioc,
*
* Returns: 0 if all bytes were read, or -1 on error
*/
-int qio_channel_readv_all(QIOChannel *ioc,
- const struct iovec *iov,
- size_t niov,
- Error **errp);
+int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp);
/**
@@ -345,10 +367,10 @@ int qio_channel_readv_all(QIOChannel *ioc,
*
* Returns: 0 if all bytes were written, or -1 on error
*/
-int qio_channel_writev_all(QIOChannel *ioc,
- const struct iovec *iov,
- size_t niov,
- Error **erp);
+int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp);
/**
* qio_channel_readv:
@@ -429,10 +451,10 @@ ssize_t qio_channel_write(QIOChannel *ioc,
* Returns: 1 if all bytes were read, 0 if end-of-file occurs
* without data, or -1 on error
*/
-int qio_channel_read_all_eof(QIOChannel *ioc,
- char *buf,
- size_t buflen,
- Error **errp);
+int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc,
+ char *buf,
+ size_t buflen,
+ Error **errp);
/**
* qio_channel_read_all:
@@ -449,10 +471,10 @@ int qio_channel_read_all_eof(QIOChannel *ioc,
*
* Returns: 0 if all bytes were read, or -1 on error
*/
-int qio_channel_read_all(QIOChannel *ioc,
- char *buf,
- size_t buflen,
- Error **errp);
+int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc,
+ char *buf,
+ size_t buflen,
+ Error **errp);
/**
* qio_channel_write_all:
@@ -468,10 +490,10 @@ int qio_channel_read_all(QIOChannel *ioc,
*
* Returns: 0 if all bytes were written, or -1 on error
*/
-int qio_channel_write_all(QIOChannel *ioc,
- const char *buf,
- size_t buflen,
- Error **errp);
+int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc,
+ const char *buf,
+ size_t buflen,
+ Error **errp);
/**
* qio_channel_set_blocking:
@@ -491,6 +513,21 @@ int qio_channel_set_blocking(QIOChannel *ioc,
Error **errp);
/**
+ * qio_channel_set_follow_coroutine_ctx:
+ * @ioc: the channel object
+ * @enabled: whether or not to follow the coroutine's AioContext
+ *
+ * If @enabled is true, calls to qio_channel_yield() use the current
+ * coroutine's AioContext. Usually this is desirable.
+ *
+ * If @enabled is false, calls to qio_channel_yield() use the global iohandler
+ * AioContext. This is may be used by coroutines that run in the main loop and
+ * do not wish to respond to I/O during nested event loops. This is the
+ * default for compatibility with code that is not aware of AioContexts.
+ */
+void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled);
+
+/**
* qio_channel_close:
* @ioc: the channel object
* @errp: pointer to a NULL-initialized error object
@@ -503,6 +540,78 @@ int qio_channel_close(QIOChannel *ioc,
Error **errp);
/**
+ * qio_channel_pwritev
+ * @ioc: the channel object
+ * @iov: the array of memory regions to write data from
+ * @niov: the length of the @iov array
+ * @offset: offset in the channel where writes should begin
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Not all implementations will support this facility, so may report
+ * an error. To avoid errors, the caller may check for the feature
+ * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
+ *
+ * Behaves as qio_channel_writev_full, apart from not supporting
+ * sending of file handles as well as beginning the write at the
+ * passed @offset
+ *
+ */
+ssize_t qio_channel_pwritev(QIOChannel *ioc, const struct iovec *iov,
+ size_t niov, off_t offset, Error **errp);
+
+/**
+ * qio_channel_pwrite
+ * @ioc: the channel object
+ * @buf: the memory region to write data into
+ * @buflen: the number of bytes to @buf
+ * @offset: offset in the channel where writes should begin
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Not all implementations will support this facility, so may report
+ * an error. To avoid errors, the caller may check for the feature
+ * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
+ *
+ */
+ssize_t qio_channel_pwrite(QIOChannel *ioc, char *buf, size_t buflen,
+ off_t offset, Error **errp);
+
+/**
+ * qio_channel_preadv
+ * @ioc: the channel object
+ * @iov: the array of memory regions to read data into
+ * @niov: the length of the @iov array
+ * @offset: offset in the channel where writes should begin
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Not all implementations will support this facility, so may report
+ * an error. To avoid errors, the caller may check for the feature
+ * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
+ *
+ * Behaves as qio_channel_readv_full, apart from not supporting
+ * receiving of file handles as well as beginning the read at the
+ * passed @offset
+ *
+ */
+ssize_t qio_channel_preadv(QIOChannel *ioc, const struct iovec *iov,
+ size_t niov, off_t offset, Error **errp);
+
+/**
+ * qio_channel_pread
+ * @ioc: the channel object
+ * @buf: the memory region to write data into
+ * @buflen: the number of bytes to @buf
+ * @offset: offset in the channel where writes should begin
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Not all implementations will support this facility, so may report
+ * an error. To avoid errors, the caller may check for the feature
+ * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
+ *
+ */
+ssize_t qio_channel_pread(QIOChannel *ioc, char *buf, size_t buflen,
+ off_t offset, Error **errp);
+
+/**
* qio_channel_shutdown:
* @ioc: the channel object
* @how: the direction to shutdown
@@ -517,6 +626,8 @@ int qio_channel_close(QIOChannel *ioc,
* QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling
* this method.
*
+ * This function is thread-safe, terminates quickly and does not block.
+ *
* Returns: 0 on success, -1 on error
*/
int qio_channel_shutdown(QIOChannel *ioc,
@@ -694,41 +805,6 @@ GSource *qio_channel_add_watch_source(QIOChannel *ioc,
GMainContext *context);
/**
- * qio_channel_attach_aio_context:
- * @ioc: the channel object
- * @ctx: the #AioContext to set the handlers on
- *
- * Request that qio_channel_yield() sets I/O handlers on
- * the given #AioContext. If @ctx is %NULL, qio_channel_yield()
- * uses QEMU's main thread event loop.
- *
- * You can move a #QIOChannel from one #AioContext to another even if
- * I/O handlers are set for a coroutine. However, #QIOChannel provides
- * no synchronization between the calls to qio_channel_yield() and
- * qio_channel_attach_aio_context().
- *
- * Therefore you should first call qio_channel_detach_aio_context()
- * to ensure that the coroutine is not entered concurrently. Then,
- * while the coroutine has yielded, call qio_channel_attach_aio_context(),
- * and then aio_co_schedule() to place the coroutine on the new
- * #AioContext. The calls to qio_channel_detach_aio_context()
- * and qio_channel_attach_aio_context() should be protected with
- * aio_context_acquire() and aio_context_release().
- */
-void qio_channel_attach_aio_context(QIOChannel *ioc,
- AioContext *ctx);
-
-/**
- * qio_channel_detach_aio_context:
- * @ioc: the channel object
- *
- * Disable any I/O handlers set by qio_channel_yield(). With the
- * help of aio_co_schedule(), this allows moving a coroutine that was
- * paused by qio_channel_yield() to another context.
- */
-void qio_channel_detach_aio_context(QIOChannel *ioc);
-
-/**
* qio_channel_yield:
* @ioc: the channel object
* @condition: the I/O condition to wait for
@@ -739,10 +815,23 @@ void qio_channel_detach_aio_context(QIOChannel *ioc);
* addition, no two coroutine can be waiting on the same condition
* and channel at the same time.
*
- * This must only be called from coroutine context
+ * This must only be called from coroutine context. It is safe to
+ * reenter the coroutine externally while it is waiting; in this
+ * case the function will return even if @condition is not yet
+ * available.
*/
-void qio_channel_yield(QIOChannel *ioc,
- GIOCondition condition);
+void coroutine_fn qio_channel_yield(QIOChannel *ioc,
+ GIOCondition condition);
+
+/**
+ * qio_channel_wake_read:
+ * @ioc: the channel object
+ *
+ * If qio_channel_yield() is currently waiting for the channel to become
+ * readable, interrupt it and reenter immediately. This function is safe to call
+ * from any thread.
+ */
+void qio_channel_wake_read(QIOChannel *ioc);
/**
* qio_channel_wait:
@@ -762,8 +851,9 @@ void qio_channel_wait(QIOChannel *ioc,
/**
* qio_channel_set_aio_fd_handler:
* @ioc: the channel object
- * @ctx: the AioContext to set the handlers on
+ * @read_ctx: the AioContext to set the read handler on or NULL
* @io_read: the read handler
+ * @write_ctx: the AioContext to set the write handler on or NULL
* @io_write: the write handler
* @opaque: the opaque value passed to the handler
*
@@ -771,11 +861,124 @@ void qio_channel_wait(QIOChannel *ioc,
* be used by channel implementations to forward the handlers
* to another channel (e.g. from #QIOChannelTLS to the
* underlying socket).
+ *
+ * When @read_ctx is NULL, don't touch the read handler. When @write_ctx is
+ * NULL, don't touch the write handler. Note that setting the read handler
+ * clears the write handler, and vice versa, if they share the same AioContext.
+ * Therefore the caller must pass both handlers together when sharing the same
+ * AioContext.
*/
void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
- AioContext *ctx,
+ AioContext *read_ctx,
IOHandler *io_read,
+ AioContext *write_ctx,
IOHandler *io_write,
void *opaque);
+/**
+ * qio_channel_readv_full_all_eof:
+ * @ioc: the channel object
+ * @iov: the array of memory regions to read data to
+ * @niov: the length of the @iov array
+ * @fds: an array of file handles to read
+ * @nfds: number of file handles in @fds
+ * @errp: pointer to a NULL-initialized error object
+ *
+ *
+ * Performs same function as qio_channel_readv_all_eof.
+ * Additionally, attempts to read file descriptors shared
+ * over the channel. The function will wait for all
+ * requested data to be read, yielding from the current
+ * coroutine if required. data refers to both file
+ * descriptors and the iovs.
+ *
+ * Returns: 1 if all bytes were read, 0 if end-of-file
+ * occurs without data, or -1 on error
+ */
+
+int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ int **fds, size_t *nfds,
+ Error **errp);
+
+/**
+ * qio_channel_readv_full_all:
+ * @ioc: the channel object
+ * @iov: the array of memory regions to read data to
+ * @niov: the length of the @iov array
+ * @fds: an array of file handles to read
+ * @nfds: number of file handles in @fds
+ * @errp: pointer to a NULL-initialized error object
+ *
+ *
+ * Performs same function as qio_channel_readv_all_eof.
+ * Additionally, attempts to read file descriptors shared
+ * over the channel. The function will wait for all
+ * requested data to be read, yielding from the current
+ * coroutine if required. data refers to both file
+ * descriptors and the iovs.
+ *
+ * Returns: 0 if all bytes were read, or -1 on error
+ */
+
+int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ int **fds, size_t *nfds,
+ Error **errp);
+
+/**
+ * qio_channel_writev_full_all:
+ * @ioc: the channel object
+ * @iov: the array of memory regions to write data from
+ * @niov: the length of the @iov array
+ * @fds: an array of file handles to send
+ * @nfds: number of file handles in @fds
+ * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*)
+ * @errp: pointer to a NULL-initialized error object
+ *
+ *
+ * Behaves like qio_channel_writev_full but will attempt
+ * to send all data passed (file handles and memory regions).
+ * The function will wait for all requested data
+ * to be written, yielding from the current coroutine
+ * if required.
+ *
+ * If QIO_CHANNEL_WRITE_FLAG_ZERO_COPY is passed in flags,
+ * instead of waiting for all requested data to be written,
+ * this function will wait until it's all queued for writing.
+ * In this case, if the buffer gets changed between queueing and
+ * sending, the updated buffer will be sent. If this is not a
+ * desired behavior, it's suggested to call qio_channel_flush()
+ * before reusing the buffer.
+ *
+ * Returns: 0 if all bytes were written, or -1 on error
+ */
+
+int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ int *fds, size_t nfds,
+ int flags, Error **errp);
+
+/**
+ * qio_channel_flush:
+ * @ioc: the channel object
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Will block until every packet queued with
+ * qio_channel_writev_full() + QIO_CHANNEL_WRITE_FLAG_ZERO_COPY
+ * is sent, or return in case of any error.
+ *
+ * If not implemented, acts as a no-op, and returns 0.
+ *
+ * Returns -1 if any error is found,
+ * 1 if every send failed to use zero copy.
+ * 0 otherwise.
+ */
+
+int qio_channel_flush(QIOChannel *ioc,
+ Error **errp);
+
#endif /* QIO_CHANNEL_H */
diff --git a/include/io/dns-resolver.h b/include/io/dns-resolver.h
index 1a162185cc..558ea517de 100644
--- a/include/io/dns-resolver.h
+++ b/include/io/dns-resolver.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,21 +21,14 @@
#ifndef QIO_DNS_RESOLVER_H
#define QIO_DNS_RESOLVER_H
-#include "qemu-common.h"
#include "qapi/qapi-types-sockets.h"
#include "qom/object.h"
#include "io/task.h"
#define TYPE_QIO_DNS_RESOLVER "qio-dns-resolver"
-#define QIO_DNS_RESOLVER(obj) \
- OBJECT_CHECK(QIODNSResolver, (obj), TYPE_QIO_DNS_RESOLVER)
-#define QIO_DNS_RESOLVER_CLASS(klass) \
- OBJECT_CLASS_CHECK(QIODNSResolverClass, klass, TYPE_QIO_DNS_RESOLVER)
-#define QIO_DNS_RESOLVER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QIODNSResolverClass, obj, TYPE_QIO_DNS_RESOLVER)
+OBJECT_DECLARE_SIMPLE_TYPE(QIODNSResolver,
+ QIO_DNS_RESOLVER)
-typedef struct QIODNSResolver QIODNSResolver;
-typedef struct QIODNSResolverClass QIODNSResolverClass;
/**
* QIODNSResolver:
@@ -140,9 +133,6 @@ struct QIODNSResolver {
Object parent;
};
-struct QIODNSResolverClass {
- ObjectClass parent;
-};
/**
diff --git a/include/io/net-listener.h b/include/io/net-listener.h
index 8081ac58a2..ab9f291ed6 100644
--- a/include/io/net-listener.h
+++ b/include/io/net-listener.h
@@ -22,17 +22,12 @@
#define QIO_NET_LISTENER_H
#include "io/channel-socket.h"
+#include "qom/object.h"
#define TYPE_QIO_NET_LISTENER "qio-net-listener"
-#define QIO_NET_LISTENER(obj) \
- OBJECT_CHECK(QIONetListener, (obj), TYPE_QIO_NET_LISTENER)
-#define QIO_NET_LISTENER_CLASS(klass) \
- OBJECT_CLASS_CHECK(QIONetListenerClass, klass, TYPE_QIO_NET_LISTENER)
-#define QIO_NET_LISTENER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QIONetListenerClass, obj, TYPE_QIO_NET_LISTENER)
+OBJECT_DECLARE_SIMPLE_TYPE(QIONetListener,
+ QIO_NET_LISTENER)
-typedef struct QIONetListener QIONetListener;
-typedef struct QIONetListenerClass QIONetListenerClass;
typedef void (*QIONetListenerClientFunc)(QIONetListener *listener,
QIOChannelSocket *sioc,
@@ -63,9 +58,6 @@ struct QIONetListener {
GDestroyNotify io_notify;
};
-struct QIONetListenerClass {
- ObjectClass parent;
-};
/**
@@ -95,6 +87,7 @@ void qio_net_listener_set_name(QIONetListener *listener,
* qio_net_listener_open_sync:
* @listener: the network listener object
* @addr: the address to listen on
+ * @num: the amount of expected connections
* @errp: pointer to a NULL initialized error object
*
* Synchronously open a listening connection on all
@@ -104,6 +97,7 @@ void qio_net_listener_set_name(QIONetListener *listener,
*/
int qio_net_listener_open_sync(QIONetListener *listener,
SocketAddress *addr,
+ int num,
Error **errp);
/**
diff --git a/include/io/task.h b/include/io/task.h
index 9e09b95b2e..0b5342ee84 100644
--- a/include/io/task.h
+++ b/include/io/task.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,9 +21,6 @@
#ifndef QIO_TASK_H
#define QIO_TASK_H
-#include "qemu-common.h"
-#include "qom/object.h"
-
typedef struct QIOTask QIOTask;
typedef void (*QIOTaskFunc)(QIOTask *task,
@@ -120,7 +117,7 @@ typedef void (*QIOTaskWorker)(QIOTask *task,
* gboolean myobject_operation_timer(gpointer opaque)
* {
* QIOTask *task = QIO_TASK(opaque);
- * Error *err;*
+ * Error *err = NULL;
*
* ...check something important...
* if (err) {
@@ -148,11 +145,11 @@ typedef void (*QIOTaskWorker)(QIOTask *task,
* The QIOTask module can also be used to perform operations
* in a background thread context, while still reporting the
* results in the main event thread. This allows code which
- * cannot easily be rewritten to be asychronous (such as DNS
+ * cannot easily be rewritten to be asynchronous (such as DNS
* lookups) to be easily run non-blocking. Reporting the
* results in the main thread context means that the caller
* typically does not need to be concerned about thread
- * safety wrt the QEMU global mutex.
+ * safety wrt the BQL.
*
* For example, the socket_listen() method will block the caller
* while DNS lookups take place if given a name, instead of IP
@@ -232,7 +229,8 @@ QIOTask *qio_task_new(Object *source,
*
* Run a task in a background thread. When @worker
* returns it will call qio_task_complete() in
- * the event thread context that provided.
+ * the thread that is running the main loop associated
+ * with @context.
*/
void qio_task_run_in_thread(QIOTask *task,
QIOTaskWorker worker,
@@ -240,6 +238,32 @@ void qio_task_run_in_thread(QIOTask *task,
GDestroyNotify destroy,
GMainContext *context);
+
+/**
+ * qio_task_wait_thread:
+ * @task: the task struct
+ *
+ * Wait for completion of a task that was previously
+ * invoked using qio_task_run_in_thread. This MUST
+ * ONLY be invoked if the task has not already
+ * completed, since after the completion callback
+ * is invoked, @task will have been freed.
+ *
+ * To avoid racing with execution of the completion
+ * callback provided with qio_task_new, this method
+ * MUST ONLY be invoked from the thread that is
+ * running the main loop associated with @context
+ * parameter to qio_task_run_in_thread.
+ *
+ * When the thread has completed, the completion
+ * callback provided to qio_task_new will be invoked.
+ * When that callback returns @task will be freed,
+ * so @task must not be referenced after this
+ * method completes.
+ */
+void qio_task_wait_thread(QIOTask *task);
+
+
/**
* qio_task_complete:
* @task: the task struct
diff --git a/include/libdecnumber/dconfig.h b/include/libdecnumber/dconfig.h
index 0f7dccef1f..2bc0ba7f14 100644
--- a/include/libdecnumber/dconfig.h
+++ b/include/libdecnumber/dconfig.h
@@ -28,7 +28,7 @@
02110-1301, USA. */
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define WORDS_BIGENDIAN 1
#else
#define WORDS_BIGENDIAN 0
diff --git a/include/libdecnumber/decNumber.h b/include/libdecnumber/decNumber.h
index aa115fed07..41bc2a0d36 100644
--- a/include/libdecnumber/decNumber.h
+++ b/include/libdecnumber/decNumber.h
@@ -116,12 +116,16 @@
decNumber * decNumberFromUInt32(decNumber *, uint32_t);
decNumber *decNumberFromInt64(decNumber *, int64_t);
decNumber *decNumberFromUInt64(decNumber *, uint64_t);
+ decNumber *decNumberFromInt128(decNumber *, uint64_t, int64_t);
+ decNumber *decNumberFromUInt128(decNumber *, uint64_t, uint64_t);
decNumber * decNumberFromString(decNumber *, const char *, decContext *);
char * decNumberToString(const decNumber *, char *);
char * decNumberToEngString(const decNumber *, char *);
uint32_t decNumberToUInt32(const decNumber *, decContext *);
int32_t decNumberToInt32(const decNumber *, decContext *);
int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set);
+ void decNumberIntegralToInt128(const decNumber *dn, decContext *set,
+ uint64_t *plow, uint64_t *phigh);
uint8_t * decNumberGetBCD(const decNumber *, uint8_t *);
decNumber * decNumberSetBCD(decNumber *, const uint8_t *, uint32_t);
diff --git a/include/libdecnumber/decNumberLocal.h b/include/libdecnumber/decNumberLocal.h
index 12cf1d8b6f..6198ca8593 100644
--- a/include/libdecnumber/decNumberLocal.h
+++ b/include/libdecnumber/decNumberLocal.h
@@ -44,6 +44,7 @@
#define DECNLAUTHOR "Mike Cowlishaw" /* Who to blame */
#include "libdecnumber/dconfig.h"
+ #include "libdecnumber/decContext.h"
/* Conditional code flag -- set this to match hardware platform */
/* 1=little-endian, 0=big-endian */
@@ -97,7 +98,7 @@
/* Shared lookup tables */
extern const uByte DECSTICKYTAB[10]; /* re-round digits if sticky */
- extern const uLong DECPOWERS[19]; /* powers of ten table */
+ extern const uLong DECPOWERS[20]; /* powers of ten table */
/* The following are included from decDPD.h */
extern const uShort DPD2BIN[1024]; /* DPD -> 0-999 */
extern const uShort BIN2DPD[1000]; /* 0-999 -> DPD */
diff --git a/include/migration/blocker.h b/include/migration/blocker.h
index acd27018e9..a687ac0efe 100644
--- a/include/migration/blocker.h
+++ b/include/migration/blocker.h
@@ -14,22 +14,84 @@
#ifndef MIGRATION_BLOCKER_H
#define MIGRATION_BLOCKER_H
+#include "qapi/qapi-types-migration.h"
+
+#define MIG_MODE_ALL MIG_MODE__MAX
+
+/**
+ * @migrate_add_blocker - prevent all modes of migration from proceeding
+ *
+ * @reasonp - address of an error to be returned whenever migration is attempted
+ *
+ * @errp - [out] The reason (if any) we cannot block migration right now.
+ *
+ * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
+ *
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free @reasonp, except by
+ * calling migrate_del_blocker.
+ */
+int migrate_add_blocker(Error **reasonp, Error **errp);
+
+/**
+ * @migrate_add_blocker_internal - prevent all modes of migration from
+ * proceeding, but ignore -only-migratable
+ *
+ * @reasonp - address of an error to be returned whenever migration is attempted
+ *
+ * @errp - [out] The reason (if any) we cannot block migration right now.
+ *
+ * @returns - 0 on success, -EBUSY on failure, with errp set.
+ *
+ * Some of the migration blockers can be temporary (e.g., for a few seconds),
+ * so it shouldn't need to conflict with "-only-migratable". For those cases,
+ * we can call this function rather than @migrate_add_blocker().
+ *
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free @reasonp, except by
+ * calling migrate_del_blocker.
+ */
+int migrate_add_blocker_internal(Error **reasonp, Error **errp);
+
+/**
+ * @migrate_del_blocker - remove a migration blocker from all modes and free it.
+ *
+ * @reasonp - address of the error blocking migration
+ *
+ * This function frees *@reasonp and sets it to NULL.
+ */
+void migrate_del_blocker(Error **reasonp);
+
/**
- * @migrate_add_blocker - prevent migration from proceeding
+ * @migrate_add_blocker_normal - prevent normal migration mode from proceeding
*
- * @reason - an error to be returned whenever migration is attempted
+ * @reasonp - address of an error to be returned whenever migration is attempted
*
* @errp - [out] The reason (if any) we cannot block migration right now.
*
* @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
+ *
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free @reasonp, except by
+ * calling migrate_del_blocker.
*/
-int migrate_add_blocker(Error *reason, Error **errp);
+int migrate_add_blocker_normal(Error **reasonp, Error **errp);
/**
- * @migrate_del_blocker - remove a blocking error from migration
+ * @migrate_add_blocker_modes - prevent some modes of migration from proceeding
+ *
+ * @reasonp - address of an error to be returned whenever migration is attempted
+ *
+ * @errp - [out] The reason (if any) we cannot block migration right now.
+ *
+ * @mode - one or more migration modes to be blocked. The list is terminated
+ * by -1 or MIG_MODE_ALL. For the latter, all modes are blocked.
+ *
+ * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
*
- * @reason - the error blocking migration
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free *@reasonp before the blocker is removed.
*/
-void migrate_del_blocker(Error *reason);
+int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...);
#endif
diff --git a/include/migration/client-options.h b/include/migration/client-options.h
new file mode 100644
index 0000000000..59f4b55cf4
--- /dev/null
+++ b/include/migration/client-options.h
@@ -0,0 +1,25 @@
+/*
+ * QEMU public migration capabilities
+ *
+ * Copyright (c) 2012-2023 Red Hat Inc
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_MIGRATION_CLIENT_OPTIONS_H
+#define QEMU_MIGRATION_CLIENT_OPTIONS_H
+
+/* capabilities */
+
+bool migrate_background_snapshot(void);
+bool migrate_dirty_limit(void);
+bool migrate_postcopy_ram(void);
+bool migrate_switchover_ack(void);
+
+/* parameters */
+
+MigMode migrate_mode(void);
+uint64_t migrate_vcpu_dirty_limit_period(void);
+
+#endif
diff --git a/include/migration/colo.h b/include/migration/colo.h
index 99ce17aca7..eaac07f26d 100644
--- a/include/migration/colo.h
+++ b/include/migration/colo.h
@@ -13,7 +13,6 @@
#ifndef QEMU_COLO_H
#define QEMU_COLO_H
-#include "qemu-common.h"
#include "qapi/qapi-types-migration.h"
enum colo_event {
@@ -22,22 +21,35 @@ enum colo_event {
COLO_EVENT_FAILOVER,
};
-void colo_info_init(void);
-
void migrate_start_colo_process(MigrationState *s);
bool migration_in_colo_state(void);
/* loadvm */
-void migration_incoming_enable_colo(void);
+int migration_incoming_enable_colo(void);
void migration_incoming_disable_colo(void);
bool migration_incoming_colo_enabled(void);
-void *colo_process_incoming_thread(void *opaque);
bool migration_incoming_in_colo_state(void);
COLOMode get_colo_mode(void);
/* failover */
-void colo_do_failover(MigrationState *s);
+void colo_do_failover(void);
+
+/*
+ * colo_checkpoint_delay_set
+ *
+ * Handles change of x-checkpoint-delay migration parameter, called from
+ * migrate_params_apply() to notify COLO module about the change.
+ */
+void colo_checkpoint_delay_set(void);
+
+/*
+ * Starts COLO incoming process. Called from process_incoming_migration_co()
+ * after loading the state.
+ *
+ * Called with BQL locked, may temporary release BQL.
+ */
+int coroutine_fn colo_incoming_co(void);
-void colo_checkpoint_notify(void *opaque);
+void colo_shutdown(void);
#endif
diff --git a/include/migration/cpu.h b/include/migration/cpu.h
index a40bd3549f..65abe3c8cc 100644
--- a/include/migration/cpu.h
+++ b/include/migration/cpu.h
@@ -1,7 +1,12 @@
/* Declarations for use for CPU state serialization. */
+
#ifndef MIGRATION_CPU_H
#define MIGRATION_CPU_H
+#include "exec/cpu-defs.h"
+#include "migration/qemu-file-types.h"
+#include "migration/vmstate.h"
+
#if TARGET_LONG_BITS == 64
#define qemu_put_betl qemu_put_be64
#define qemu_get_betl qemu_get_be64
diff --git a/include/migration/failover.h b/include/migration/failover.h
index 4c37218dcc..475f88f533 100644
--- a/include/migration/failover.h
+++ b/include/migration/failover.h
@@ -13,7 +13,6 @@
#ifndef QEMU_FAILOVER_H
#define QEMU_FAILOVER_H
-#include "qemu-common.h"
#include "qapi/qapi-types-migration.h"
void failover_init_state(void);
diff --git a/include/migration/global_state.h b/include/migration/global_state.h
index fd22dd3034..d7c2cd3216 100644
--- a/include/migration/global_state.h
+++ b/include/migration/global_state.h
@@ -14,10 +14,9 @@
#define QEMU_MIGRATION_GLOBAL_STATE_H
#include "qapi/qapi-types-run-state.h"
-#include "sysemu/sysemu.h"
void register_global_state(void);
-int global_state_store(void);
+void global_state_store(void);
void global_state_store_running(void);
bool global_state_received(void);
RunState global_state_get_runstate(void);
diff --git a/include/migration/misc.h b/include/migration/misc.h
index 4ebf24c6c2..c9e200f4eb 100644
--- a/include/migration/misc.h
+++ b/include/migration/misc.h
@@ -15,10 +15,33 @@
#define MIGRATION_MISC_H
#include "qemu/notify.h"
+#include "qapi/qapi-types-migration.h"
+#include "qapi/qapi-types-net.h"
+#include "migration/client-options.h"
/* migration/ram.c */
+typedef enum PrecopyNotifyReason {
+ PRECOPY_NOTIFY_SETUP = 0,
+ PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC = 1,
+ PRECOPY_NOTIFY_AFTER_BITMAP_SYNC = 2,
+ PRECOPY_NOTIFY_COMPLETE = 3,
+ PRECOPY_NOTIFY_CLEANUP = 4,
+ PRECOPY_NOTIFY_MAX = 5,
+} PrecopyNotifyReason;
+
+typedef struct PrecopyNotifyData {
+ enum PrecopyNotifyReason reason;
+} PrecopyNotifyData;
+
+void precopy_infrastructure_init(void);
+void precopy_add_notifier(NotifierWithReturn *n);
+void precopy_remove_notifier(NotifierWithReturn *n);
+int precopy_notify(PrecopyNotifyReason reason, Error **errp);
+
void ram_mig_init(void);
+void qemu_guest_free_page_hint(void *addr, size_t len);
+bool migrate_ram_is_ignored(RAMBlock *block);
/* migration/block.c */
@@ -28,33 +51,68 @@ void blk_mig_init(void);
static inline void blk_mig_init(void) {}
#endif
-#define SELF_ANNOUNCE_ROUNDS 5
-
-static inline
-int64_t self_announce_delay(int round)
-{
- assert(round < SELF_ANNOUNCE_ROUNDS && round > 0);
- /* delay 50ms, 150ms, 250ms, ... */
- return 50 + (SELF_ANNOUNCE_ROUNDS - round - 1) * 100;
-}
-
+AnnounceParameters *migrate_announce_params(void);
/* migration/savevm.c */
void dump_vmstate_json_to_file(FILE *out_fp);
/* migration/migration.c */
void migration_object_init(void);
-void migration_object_finalize(void);
-void qemu_start_incoming_migration(const char *uri, Error **errp);
+void migration_shutdown(void);
bool migration_is_idle(void);
-void add_migration_state_change_notifier(Notifier *notify);
-void remove_migration_state_change_notifier(Notifier *notify);
-bool migration_in_setup(MigrationState *);
-bool migration_has_finished(MigrationState *);
-bool migration_has_failed(MigrationState *);
-/* ...and after the device transmission */
-bool migration_in_postcopy_after_devices(MigrationState *);
-void migration_global_dump(Monitor *mon);
+bool migration_is_active(void);
+bool migration_is_device(void);
+bool migration_thread_is_self(void);
+bool migration_is_setup_or_active(void);
+
+typedef enum MigrationEventType {
+ MIG_EVENT_PRECOPY_SETUP,
+ MIG_EVENT_PRECOPY_DONE,
+ MIG_EVENT_PRECOPY_FAILED,
+ MIG_EVENT_MAX
+} MigrationEventType;
+
+typedef struct MigrationEvent {
+ MigrationEventType type;
+} MigrationEvent;
+
+/*
+ * A MigrationNotifyFunc may return an error code and an Error object,
+ * but only when @e->type is MIG_EVENT_PRECOPY_SETUP. The code is an int
+ * to allow for different failure modes and recovery actions.
+ */
+typedef int (*MigrationNotifyFunc)(NotifierWithReturn *notify,
+ MigrationEvent *e, Error **errp);
+
+/*
+ * Register the notifier @notify to be called when a migration event occurs
+ * for MIG_MODE_NORMAL, as specified by the MigrationEvent passed to @func.
+ * Notifiers may receive events in any of the following orders:
+ * - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_DONE
+ * - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_FAILED
+ * - MIG_EVENT_PRECOPY_FAILED
+ */
+void migration_add_notifier(NotifierWithReturn *notify,
+ MigrationNotifyFunc func);
+
+/*
+ * Same as migration_add_notifier, but applies to be specified @mode.
+ */
+void migration_add_notifier_mode(NotifierWithReturn *notify,
+ MigrationNotifyFunc func, MigMode mode);
+
+void migration_remove_notifier(NotifierWithReturn *notify);
+bool migration_is_running(void);
+void migration_file_set_error(int err);
+
+/* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */
+bool migration_in_incoming_postcopy(void);
+
+/* True if incoming migration entered POSTCOPY_INCOMING_ADVISE */
+bool migration_incoming_postcopy_advised(void);
+
+/* True if background snapshot is active */
+bool migration_in_bg_snapshot(void);
/* migration/block-dirty-bitmap.c */
void dirty_bitmap_mig_init(void);
diff --git a/include/migration/qemu-file-types.h b/include/migration/qemu-file-types.h
index bd6d7dd7f9..adec5abc07 100644
--- a/include/migration/qemu-file-types.h
+++ b/include/migration/qemu-file-types.h
@@ -22,8 +22,10 @@
* THE SOFTWARE.
*/
-#ifndef QEMU_FILE_H
-#define QEMU_FILE_H
+#ifndef MIGRATION_QEMU_FILE_TYPES_H
+#define MIGRATION_QEMU_FILE_TYPES_H
+
+int qemu_file_get_error(QEMUFile *f);
void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size);
void qemu_put_byte(QEMUFile *f, int v);
@@ -33,7 +35,7 @@ void qemu_put_byte(QEMUFile *f, int v);
void qemu_put_be16(QEMUFile *f, unsigned int v);
void qemu_put_be32(QEMUFile *f, unsigned int v);
void qemu_put_be64(QEMUFile *f, uint64_t v);
-size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size);
+size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size);
int qemu_get_byte(QEMUFile *f);
@@ -48,6 +50,8 @@ unsigned int qemu_get_be16(QEMUFile *f);
unsigned int qemu_get_be32(QEMUFile *f);
uint64_t qemu_get_be64(QEMUFile *f);
+bool qemu_file_is_seekable(QEMUFile *f);
+
static inline void qemu_put_be64s(QEMUFile *f, const uint64_t *pv)
{
qemu_put_be64(f, *pv);
@@ -159,6 +163,20 @@ static inline void qemu_get_sbe64s(QEMUFile *f, int64_t *pv)
qemu_get_be64s(f, (uint64_t *)pv);
}
-int qemu_file_rate_limit(QEMUFile *f);
+size_t coroutine_mixed_fn qemu_get_counted_string(QEMUFile *f, char buf[256]);
+
+void qemu_put_counted_string(QEMUFile *f, const char *name);
+
+/**
+ * migration_rate_exceeded: Check if we have exceeded rate for this interval
+ *
+ * Checks if we have already transferred more data that we are allowed
+ * in the current interval.
+ *
+ * @f: QEMUFile used for main migration channel
+ *
+ * Returns if we should stop sending data for this interval.
+ */
+bool migration_rate_exceeded(QEMUFile *f);
#endif
diff --git a/include/migration/register.h b/include/migration/register.h
index d287f4c317..f60e797894 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -14,67 +14,291 @@
#ifndef MIGRATION_REGISTER_H
#define MIGRATION_REGISTER_H
+#include "hw/vmstate-if.h"
+
+/**
+ * struct SaveVMHandlers: handler structure to finely control
+ * migration of complex subsystems and devices, such as RAM, block and
+ * VFIO.
+ */
typedef struct SaveVMHandlers {
- /* This runs inside the iothread lock. */
- SaveStateHandler *save_state;
+ /* The following handlers run inside the BQL. */
+
+ /**
+ * @save_state
+ *
+ * Saves state section on the source using the latest state format
+ * version.
+ *
+ * Legacy method. Should be deprecated when all users are ported
+ * to VMStateDescription.
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ */
+ void (*save_state)(QEMUFile *f, void *opaque);
+
+ /**
+ * @save_prepare
+ *
+ * Called early, even before migration starts, and can be used to
+ * perform early checks.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*save_prepare)(void *opaque, Error **errp);
+
+ /**
+ * @save_setup
+ *
+ * Initializes the data structures on the source and transmits
+ * first section containing information on the device
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*save_setup)(QEMUFile *f, void *opaque, Error **errp);
+
+ /**
+ * @save_cleanup
+ *
+ * Uninitializes the data structures on the source
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ */
void (*save_cleanup)(void *opaque);
+
+ /**
+ * @save_live_complete_postcopy
+ *
+ * Called at the end of postcopy for all postcopyable devices.
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
+
+ /**
+ * @save_live_complete_precopy
+ *
+ * Transmits the last section for the device containing any
+ * remaining data at the end of a precopy phase. When postcopy is
+ * enabled, devices that support postcopy will skip this step,
+ * where the final data will be flushed at the end of postcopy via
+ * @save_live_complete_postcopy instead.
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
- /* This runs both outside and inside the iothread lock. */
+ /* This runs both outside and inside the BQL. */
+
+ /**
+ * @is_active
+ *
+ * Will skip a state section if not active
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns true if state section is active else false
+ */
bool (*is_active)(void *opaque);
+
+ /**
+ * @has_postcopy
+ *
+ * Checks if a device supports postcopy
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns true for postcopy support else false
+ */
bool (*has_postcopy)(void *opaque);
- /* is_active_iterate
- * If it is not NULL then qemu_savevm_state_iterate will skip iteration if
- * it returns false. For example, it is needed for only-postcopy-states,
- * which needs to be handled by qemu_savevm_state_setup and
- * qemu_savevm_state_pending, but do not need iterations until not in
- * postcopy stage.
+ /**
+ * @is_active_iterate
+ *
+ * As #SaveVMHandlers.is_active(), will skip an inactive state
+ * section in qemu_savevm_state_iterate.
+ *
+ * For example, it is needed for only-postcopy-states, which needs
+ * to be handled by qemu_savevm_state_setup() and
+ * qemu_savevm_state_pending(), but do not need iterations until
+ * not in postcopy stage.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns true if state section is active else false
*/
bool (*is_active_iterate)(void *opaque);
- /* This runs outside the iothread lock in the migration case, and
+ /* This runs outside the BQL in the migration case, and
* within the lock in the savevm case. The callback had better only
* use data that is local to the migration thread or protected
* by other locks.
*/
+
+ /**
+ * @save_live_iterate
+ *
+ * Should send a chunk of data until the point that stream
+ * bandwidth limits tell it to stop. Each call generates one
+ * section.
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns 0 to indicate that there is still more data to send,
+ * 1 that there is no more data to send and
+ * negative to indicate an error.
+ */
int (*save_live_iterate)(QEMUFile *f, void *opaque);
- /* This runs outside the iothread lock! */
- int (*save_setup)(QEMUFile *f, void *opaque);
- void (*save_live_pending)(QEMUFile *f, void *opaque,
- uint64_t threshold_size,
- uint64_t *res_precopy_only,
- uint64_t *res_compatible,
- uint64_t *res_postcopy_only);
- /* Note for save_live_pending:
- * - res_precopy_only is for data which must be migrated in precopy phase
- * or in stopped state, in other words - before target vm start
- * - res_compatible is for data which may be migrated in any phase
- * - res_postcopy_only is for data which must be migrated in postcopy phase
- * or in stopped state, in other words - after source vm stop
+ /* This runs outside the BQL! */
+
+ /**
+ * @state_pending_estimate
+ *
+ * This estimates the remaining data to transfer
+ *
+ * Sum of @can_postcopy and @must_postcopy is the whole amount of
+ * pending data.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ * @must_precopy: amount of data that must be migrated in precopy
+ * or in stopped state, i.e. that must be migrated
+ * before target start.
+ * @can_postcopy: amount of data that can be migrated in postcopy
+ * or in stopped state, i.e. after target start.
+ * Some can also be migrated during precopy (RAM).
+ * Some must be migrated after source stops
+ * (block-dirty-bitmap)
+ */
+ void (*state_pending_estimate)(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy);
+
+ /**
+ * @state_pending_exact
+ *
+ * This calculates the exact remaining data to transfer
*
- * Sum of res_postcopy_only, res_compatible and res_postcopy_only is the
- * whole amount of pending data.
+ * Sum of @can_postcopy and @must_postcopy is the whole amount of
+ * pending data.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ * @must_precopy: amount of data that must be migrated in precopy
+ * or in stopped state, i.e. that must be migrated
+ * before target start.
+ * @can_postcopy: amount of data that can be migrated in postcopy
+ * or in stopped state, i.e. after target start.
+ * Some can also be migrated during precopy (RAM).
+ * Some must be migrated after source stops
+ * (block-dirty-bitmap)
*/
+ void (*state_pending_exact)(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy);
+ /**
+ * @load_state
+ *
+ * Load sections generated by any of the save functions that
+ * generate sections.
+ *
+ * Legacy method. Should be deprecated when all users are ported
+ * to VMStateDescription.
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: data pointer passed to register_savevm_live()
+ * @version_id: the maximum version_id supported
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*load_state)(QEMUFile *f, void *opaque, int version_id);
- LoadStateHandler *load_state;
- int (*load_setup)(QEMUFile *f, void *opaque);
+ /**
+ * @load_setup
+ *
+ * Initializes the data structures on the destination.
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: data pointer passed to register_savevm_live()
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*load_setup)(QEMUFile *f, void *opaque, Error **errp);
+
+ /**
+ * @load_cleanup
+ *
+ * Uninitializes the data structures on the destination.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
int (*load_cleanup)(void *opaque);
- /* Called when postcopy migration wants to resume from failure */
+
+ /**
+ * @resume_prepare
+ *
+ * Called when postcopy migration wants to resume from failure
+ *
+ * @s: Current migration state
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
int (*resume_prepare)(MigrationState *s, void *opaque);
+
+ /**
+ * @switchover_ack_needed
+ *
+ * Checks if switchover ack should be used. Called only on
+ * destination.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns true if switchover ack should be used and false
+ * otherwise
+ */
+ bool (*switchover_ack_needed)(void *opaque);
} SaveVMHandlers;
-int register_savevm_live(DeviceState *dev,
- const char *idstr,
- int instance_id,
+/**
+ * register_savevm_live: Register a set of custom migration handlers
+ *
+ * @idstr: state section identifier
+ * @instance_id: instance id
+ * @version_id: version id supported
+ * @ops: SaveVMHandlers structure
+ * @opaque: data pointer passed to SaveVMHandlers handlers
+ */
+int register_savevm_live(const char *idstr,
+ uint32_t instance_id,
int version_id,
- SaveVMHandlers *ops,
+ const SaveVMHandlers *ops,
void *opaque);
-void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
+/**
+ * unregister_savevm: Unregister custom migration handlers
+ *
+ * @obj: object associated with state section
+ * @idstr: state section identifier
+ * @opaque: data pointer passed to register_savevm_live()
+ */
+void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque);
#endif
diff --git a/include/migration/snapshot.h b/include/migration/snapshot.h
index c85b6ec75b..9e4dcaaa75 100644
--- a/include/migration/snapshot.h
+++ b/include/migration/snapshot.h
@@ -15,7 +15,57 @@
#ifndef QEMU_MIGRATION_SNAPSHOT_H
#define QEMU_MIGRATION_SNAPSHOT_H
-int save_snapshot(const char *name, Error **errp);
-int load_snapshot(const char *name, Error **errp);
+#include "qapi/qapi-builtin-types.h"
+#include "qapi/qapi-types-run-state.h"
+
+/**
+ * save_snapshot: Save an internal snapshot.
+ * @name: name of internal snapshot
+ * @overwrite: replace existing snapshot with @name
+ * @vmstate: blockdev node name to store VM state in
+ * @has_devices: whether to use explicit device list
+ * @devices: explicit device list to snapshot
+ * @errp: pointer to error object
+ * On success, return %true.
+ * On failure, store an error through @errp and return %false.
+ */
+bool save_snapshot(const char *name, bool overwrite,
+ const char *vmstate,
+ bool has_devices, strList *devices,
+ Error **errp);
+
+/**
+ * load_snapshot: Load an internal snapshot.
+ * @name: name of internal snapshot
+ * @vmstate: blockdev node name to load VM state from
+ * @has_devices: whether to use explicit device list
+ * @devices: explicit device list to snapshot
+ * @errp: pointer to error object
+ * On success, return %true.
+ * On failure, store an error through @errp and return %false.
+ */
+bool load_snapshot(const char *name,
+ const char *vmstate,
+ bool has_devices, strList *devices,
+ Error **errp);
+
+/**
+ * delete_snapshot: Delete a snapshot.
+ * @name: path to snapshot
+ * @has_devices: whether to use explicit device list
+ * @devices: explicit device list to snapshot
+ * @errp: pointer to error object
+ * On success, return %true.
+ * On failure, store an error through @errp and return %false.
+ */
+bool delete_snapshot(const char *name,
+ bool has_devices, strList *devices,
+ Error **errp);
+
+/**
+ * load_snapshot_resume: Restore runstate after loading snapshot.
+ * @state: state to restore
+ */
+void load_snapshot_resume(RunState state);
#endif
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 067b126cf1..294d2d8486 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -27,8 +27,9 @@
#ifndef QEMU_VMSTATE_H
#define QEMU_VMSTATE_H
+#include "hw/vmstate-if.h"
+
typedef struct VMStateInfo VMStateInfo;
-typedef struct VMStateDescription VMStateDescription;
typedef struct VMStateField VMStateField;
/* VMStateInfo allows customized migration of objects that don't fit in
@@ -40,9 +41,11 @@ typedef struct VMStateField VMStateField;
*/
struct VMStateInfo {
const char *name;
- int (*get)(QEMUFile *f, void *pv, size_t size, const VMStateField *field);
- int (*put)(QEMUFile *f, void *pv, size_t size, const VMStateField *field,
- QJSON *vmdesc);
+ int coroutine_mixed_fn (*get)(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field);
+ int coroutine_mixed_fn (*put)(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field,
+ JSONWriter *vmdesc);
};
enum VMStateFlags {
@@ -146,12 +149,16 @@ enum VMStateFlags {
* VMStateField.struct_version_id to tell which version of the
* structure we are referencing to use. */
VMS_VSTRUCT = 0x8000,
+
+ /* Marker for end of list */
+ VMS_END = 0x10000
};
typedef enum {
MIG_PRI_DEFAULT = 0,
MIG_PRI_IOMMU, /* Must happen before PCI devices */
MIG_PRI_PCI_BUS, /* Must happen before IOMMU */
+ MIG_PRI_VIRTIO_MEM, /* Must happen before IOMMU */
MIG_PRI_GICV3_ITS, /* Must happen before PCI devices */
MIG_PRI_GICV3, /* Must happen before the ITS */
MIG_PRI_MAX,
@@ -176,23 +183,35 @@ struct VMStateField {
struct VMStateDescription {
const char *name;
- int unmigratable;
+ bool unmigratable;
+ /*
+ * This VMSD describes something that should be sent during setup phase
+ * of migration. It plays similar role as save_setup() for explicitly
+ * registered vmstate entries, so it can be seen as a way to describe
+ * save_setup() in VMSD structures.
+ *
+ * Note that for now, a SaveStateEntry cannot have a VMSD and
+ * operations (e.g., save_setup()) set at the same time. Consequently,
+ * save_setup() and a VMSD with early_setup set to true are mutually
+ * exclusive. For this reason, also early_setup VMSDs are migrated in a
+ * QEMU_VM_SECTION_FULL section, while save_setup() data is migrated in
+ * a QEMU_VM_SECTION_START section.
+ */
+ bool early_setup;
int version_id;
int minimum_version_id;
- int minimum_version_id_old;
MigrationPriority priority;
- LoadStateHandler *load_state_old;
int (*pre_load)(void *opaque);
int (*post_load)(void *opaque, int version_id);
int (*pre_save)(void *opaque);
int (*post_save)(void *opaque);
bool (*needed)(void *opaque);
+ bool (*dev_unplug_pending)(void *opaque);
+
const VMStateField *fields;
- const VMStateDescription **subsections;
+ const VMStateDescription * const *subsections;
};
-extern const VMStateDescription vmstate_dummy;
-
extern const VMStateInfo vmstate_info_bool;
extern const VMStateInfo vmstate_info_int8;
@@ -216,7 +235,6 @@ extern const VMStateInfo vmstate_info_uint64;
#define VMS_NULLPTR_MARKER (0x30U) /* '0' */
extern const VMStateInfo vmstate_info_nullptr;
-extern const VMStateInfo vmstate_info_float64;
extern const VMStateInfo vmstate_info_cpudouble;
extern const VMStateInfo vmstate_info_timer;
@@ -225,10 +243,26 @@ extern const VMStateInfo vmstate_info_unused_buffer;
extern const VMStateInfo vmstate_info_tmp;
extern const VMStateInfo vmstate_info_bitmap;
extern const VMStateInfo vmstate_info_qtailq;
+extern const VMStateInfo vmstate_info_gtree;
+extern const VMStateInfo vmstate_info_qlist;
#define type_check_2darray(t1,t2,n,m) ((t1(*)[n][m])0 - (t2*)0)
+/*
+ * Check that type t2 is an array of type t1 of size n,
+ * e.g. if t1 is 'foo' and n is 32 then t2 must be 'foo[32]'
+ */
#define type_check_array(t1,t2,n) ((t1(*)[n])0 - (t2*)0)
#define type_check_pointer(t1,t2) ((t1**)0 - (t2*)0)
+/*
+ * type of element 0 of the specified (array) field of the type.
+ * Note that if the field is a pointer then this will return the
+ * pointed-to type rather than complaining.
+ */
+#define typeof_elt_of_field(type, field) typeof(((type *)0)->field[0])
+/* Check that field f in struct type t2 is an array of t1, of any size */
+#define type_check_varray(t1, t2, f) \
+ (type_check(t1, typeof_elt_of_field(t2, f)) \
+ + QEMU_BUILD_BUG_ON_ZERO(!QEMU_IS_ARRAY(((t2 *)0)->f)))
#define vmstate_offset_value(_state, _field, _type) \
(offsetof(_state, _field) + \
@@ -253,6 +287,10 @@ extern const VMStateInfo vmstate_info_qtailq;
vmstate_offset_array(_state, _field, uint8_t, \
sizeof(typeof_field(_state, _field)))
+#define vmstate_offset_varray(_state, _field, _type) \
+ (offsetof(_state, _field) + \
+ type_check_varray(_type, _state, _field))
+
/* In the macros below, if there is a _version, that means the macro's
* field will be processed only if the version being received is >=
* the _version specified. In general, if you add a new field, you
@@ -347,7 +385,7 @@ extern const VMStateInfo vmstate_info_qtailq;
.info = &(_info), \
.size = sizeof(_type), \
.flags = VMS_VARRAY_UINT32|VMS_MULTIPLY_ELEMENTS, \
- .offset = offsetof(_state, _field), \
+ .offset = vmstate_offset_varray(_state, _field, _type), \
}
#define VMSTATE_ARRAY_TEST(_field, _state, _num, _test, _info, _type) {\
@@ -376,7 +414,7 @@ extern const VMStateInfo vmstate_info_qtailq;
.info = &(_info), \
.size = sizeof(_type), \
.flags = VMS_VARRAY_INT32, \
- .offset = offsetof(_state, _field), \
+ .offset = vmstate_offset_varray(_state, _field, _type), \
}
#define VMSTATE_VARRAY_INT32(_field, _state, _field_num, _version, _info, _type) {\
@@ -409,6 +447,16 @@ extern const VMStateInfo vmstate_info_qtailq;
.offset = vmstate_offset_pointer(_state, _field, _type), \
}
+#define VMSTATE_VARRAY_UINT16_ALLOC(_field, _state, _field_num, _version, _info, _type) {\
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .num_offset = vmstate_offset_value(_state, _field_num, uint16_t),\
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_VARRAY_UINT16 | VMS_POINTER | VMS_ALLOC, \
+ .offset = vmstate_offset_pointer(_state, _field, _type), \
+}
+
#define VMSTATE_VARRAY_UINT16_UNSAFE(_field, _state, _field_num, _version, _info, _type) {\
.name = (stringify(_field)), \
.version_id = (_version), \
@@ -416,7 +464,7 @@ extern const VMStateInfo vmstate_info_qtailq;
.info = &(_info), \
.size = sizeof(_type), \
.flags = VMS_VARRAY_UINT16, \
- .offset = offsetof(_state, _field), \
+ .offset = vmstate_offset_varray(_state, _field, _type), \
}
#define VMSTATE_VSTRUCT_TEST(_field, _state, _test, _version, _vmsd, _type, _struct_version) { \
@@ -520,7 +568,7 @@ extern const VMStateInfo vmstate_info_qtailq;
.vmsd = &(_vmsd), \
.size = sizeof(_type), \
.flags = VMS_STRUCT|VMS_VARRAY_UINT8, \
- .offset = offsetof(_state, _field), \
+ .offset = vmstate_offset_varray(_state, _field, _type), \
}
/* a variable length array (i.e. _type *_field) but we know the
@@ -573,7 +621,7 @@ extern const VMStateInfo vmstate_info_qtailq;
.vmsd = &(_vmsd), \
.size = sizeof(_type), \
.flags = VMS_STRUCT|VMS_VARRAY_INT32, \
- .offset = offsetof(_state, _field), \
+ .offset = vmstate_offset_varray(_state, _field, _type), \
}
#define VMSTATE_STRUCT_VARRAY_UINT32(_field, _state, _field_num, _version, _vmsd, _type) { \
@@ -583,7 +631,7 @@ extern const VMStateInfo vmstate_info_qtailq;
.vmsd = &(_vmsd), \
.size = sizeof(_type), \
.flags = VMS_STRUCT|VMS_VARRAY_UINT32, \
- .offset = offsetof(_state, _field), \
+ .offset = vmstate_offset_varray(_state, _field, _type), \
}
#define VMSTATE_STRUCT_VARRAY_ALLOC(_field, _state, _field_num, _version, _vmsd, _type) {\
@@ -676,8 +724,9 @@ extern const VMStateInfo vmstate_info_qtailq;
* '_state' type
* That the pointer is right at the start of _tmp_type.
*/
-#define VMSTATE_WITH_TMP(_state, _tmp_type, _vmsd) { \
+#define VMSTATE_WITH_TMP_TEST(_state, _test, _tmp_type, _vmsd) { \
.name = "tmp", \
+ .field_exists = (_test), \
.size = sizeof(_tmp_type) + \
QEMU_BUILD_BUG_ON_ZERO(offsetof(_tmp_type, parent) != 0) + \
type_check_pointer(_state, \
@@ -686,6 +735,9 @@ extern const VMStateInfo vmstate_info_qtailq;
.info = &vmstate_info_tmp, \
}
+#define VMSTATE_WITH_TMP(_state, _tmp_type, _vmsd) \
+ VMSTATE_WITH_TMP_TEST(_state, NULL, _tmp_type, _vmsd)
+
#define VMSTATE_UNUSED_BUFFER(_test, _version, _size) { \
.name = "unused", \
.field_exists = (_test), \
@@ -709,8 +761,9 @@ extern const VMStateInfo vmstate_info_qtailq;
/* _field_size should be a int32_t field in the _state struct giving the
* size of the bitmap _field in bits.
*/
-#define VMSTATE_BITMAP(_field, _state, _version, _field_size) { \
+#define VMSTATE_BITMAP_TEST(_field, _state, _test, _version, _field_size) { \
.name = (stringify(_field)), \
+ .field_exists = (_test), \
.version_id = (_version), \
.size_offset = vmstate_offset_value(_state, _field_size, int32_t),\
.info = &vmstate_info_bitmap, \
@@ -718,6 +771,9 @@ extern const VMStateInfo vmstate_info_qtailq;
.offset = offsetof(_state, _field), \
}
+#define VMSTATE_BITMAP(_field, _state, _version, _field_size) \
+ VMSTATE_BITMAP_TEST(_field, _state, NULL, _version, _field_size)
+
/* For migrating a QTAILQ.
* Target QTAILQ needs be properly initialized.
* _type: type of QTAILQ element
@@ -737,6 +793,65 @@ extern const VMStateInfo vmstate_info_qtailq;
.start = offsetof(_type, _next), \
}
+/*
+ * For migrating a GTree whose key is a pointer to _key_type and the
+ * value, a pointer to _val_type
+ * The target tree must have been properly initialized
+ * _vmsd: Start address of the 2 element array containing the data vmsd
+ * and the key vmsd, in that order
+ * _key_type: type of the key
+ * _val_type: type of the value
+ */
+#define VMSTATE_GTREE_V(_field, _state, _version, _vmsd, \
+ _key_type, _val_type) \
+{ \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .vmsd = (_vmsd), \
+ .info = &vmstate_info_gtree, \
+ .start = sizeof(_key_type), \
+ .size = sizeof(_val_type), \
+ .offset = offsetof(_state, _field), \
+}
+
+/*
+ * For migrating a GTree with direct key and the value a pointer
+ * to _val_type
+ * The target tree must have been properly initialized
+ * _vmsd: data vmsd
+ * _val_type: type of the value
+ */
+#define VMSTATE_GTREE_DIRECT_KEY_V(_field, _state, _version, _vmsd, _val_type) \
+{ \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .vmsd = (_vmsd), \
+ .info = &vmstate_info_gtree, \
+ .start = 0, \
+ .size = sizeof(_val_type), \
+ .offset = offsetof(_state, _field), \
+}
+
+/*
+ * For migrating a QLIST
+ * Target QLIST needs be properly initialized.
+ * _type: type of QLIST element
+ * _next: name of QLIST_ENTRY entry field in QLIST element
+ * _vmsd: VMSD for QLIST element
+ * size: size of QLIST element
+ * start: offset of QLIST_ENTRY in QTAILQ element
+ */
+#define VMSTATE_QLIST_V(_field, _state, _version, _vmsd, _type, _next) \
+{ \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .info = &vmstate_info_qlist, \
+ .offset = offsetof(_state, _field), \
+ .start = offsetof(_type, _next), \
+}
+
/* _f : field name
_f_n : num of elements field_name
_n : num of elements
@@ -797,6 +912,19 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_UINT64_V(_f, _s, _v) \
VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64, uint64_t)
+#ifdef CONFIG_LINUX
+
+#define VMSTATE_U8_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint8, __u8)
+#define VMSTATE_U16_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint16, __u16)
+#define VMSTATE_U32_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint32, __u32)
+#define VMSTATE_U64_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64, __u64)
+
+#endif
+
#define VMSTATE_BOOL(_f, _s) \
VMSTATE_BOOL_V(_f, _s, 0)
@@ -818,6 +946,19 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_UINT64(_f, _s) \
VMSTATE_UINT64_V(_f, _s, 0)
+#ifdef CONFIG_LINUX
+
+#define VMSTATE_U8(_f, _s) \
+ VMSTATE_U8_V(_f, _s, 0)
+#define VMSTATE_U16(_f, _s) \
+ VMSTATE_U16_V(_f, _s, 0)
+#define VMSTATE_U32(_f, _s) \
+ VMSTATE_U32_V(_f, _s, 0)
+#define VMSTATE_U64(_f, _s) \
+ VMSTATE_U64_V(_f, _s, 0)
+
+#endif
+
#define VMSTATE_UINT8_EQUAL(_f, _s, _err_hint) \
VMSTATE_SINGLE_FULL(_f, _s, 0, 0, \
vmstate_info_uint8_equal, uint8_t, _err_hint)
@@ -851,6 +992,9 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_INT32_POSITIVE_LE(_f, _s) \
VMSTATE_SINGLE(_f, _s, 0, vmstate_info_int32_le, int32_t)
+#define VMSTATE_BOOL_TEST(_f, _s, _t) \
+ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_bool, bool)
+
#define VMSTATE_INT8_TEST(_f, _s, _t) \
VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_int8, int8_t)
@@ -876,12 +1020,6 @@ extern const VMStateInfo vmstate_info_qtailq;
VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint64, uint64_t)
-#define VMSTATE_FLOAT64_V(_f, _s, _v) \
- VMSTATE_SINGLE(_f, _s, _v, vmstate_info_float64, float64)
-
-#define VMSTATE_FLOAT64(_f, _s) \
- VMSTATE_FLOAT64_V(_f, _s, 0)
-
#define VMSTATE_TIMER_PTR_TEST(_f, _s, _test) \
VMSTATE_POINTER_TEST(_f, _s, _test, vmstate_info_timer, QEMUTimer *)
@@ -993,12 +1131,6 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_INT64_ARRAY(_f, _s, _n) \
VMSTATE_INT64_ARRAY_V(_f, _s, _n, 0)
-#define VMSTATE_FLOAT64_ARRAY_V(_f, _s, _n, _v) \
- VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_float64, float64)
-
-#define VMSTATE_FLOAT64_ARRAY(_f, _s, _n) \
- VMSTATE_FLOAT64_ARRAY_V(_f, _s, _n, 0)
-
#define VMSTATE_CPUDOUBLE_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_cpudouble, CPU_DoubleU)
@@ -1032,6 +1164,20 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_BUFFER_UNSAFE(_field, _state, _version, _size) \
VMSTATE_BUFFER_UNSAFE_INFO(_field, _state, _version, vmstate_info_buffer, _size)
+/*
+ * These VMSTATE_UNUSED*() macros can be used to fill in the holes
+ * when some of the vmstate fields are obsolete to be compatible with
+ * migrations between new/old binaries.
+ *
+ * CAUTION: when using any of the VMSTATE_UNUSED*() macros please be
+ * sure that the size passed in is the size that was actually *sent*
+ * rather than the size of the *structure*. One example is the
+ * boolean type - the size of the structure can vary depending on the
+ * definition of boolean, however the size we actually sent is always
+ * 1 byte (please refer to implementation of VMSTATE_BOOL_V and
+ * vmstate_info_bool). So here we should always pass in size==1
+ * rather than size==sizeof(bool).
+ */
#define VMSTATE_UNUSED_V(_v, _size) \
VMSTATE_UNUSED_BUFFER(NULL, _v, _size)
@@ -1042,37 +1188,79 @@ extern const VMStateInfo vmstate_info_qtailq;
VMSTATE_UNUSED_BUFFER(_test, 0, _size)
#define VMSTATE_END_OF_LIST() \
- {}
+ { \
+ .flags = VMS_END, \
+ }
int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, int version_id);
int vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque, QJSON *vmdesc);
+ void *opaque, JSONWriter *vmdesc);
+int vmstate_save_state_with_err(QEMUFile *f, const VMStateDescription *vmsd,
+ void *opaque, JSONWriter *vmdesc, Error **errp);
int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque, QJSON *vmdesc, int version_id);
+ void *opaque, JSONWriter *vmdesc,
+ int version_id, Error **errp);
-bool vmstate_save_needed(const VMStateDescription *vmsd, void *opaque);
+bool vmstate_section_needed(const VMStateDescription *vmsd, void *opaque);
+
+#define VMSTATE_INSTANCE_ID_ANY -1
/* Returns: 0 on success, -1 on failure */
-int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
+int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id,
const VMStateDescription *vmsd,
void *base, int alias_id,
int required_for_version,
Error **errp);
-/* Returns: 0 on success, -1 on failure */
-static inline int vmstate_register(DeviceState *dev, int instance_id,
+/**
+ * vmstate_register() - legacy function to register state
+ * serialisation description
+ *
+ * New code shouldn't be using this function as QOM-ified devices have
+ * dc->vmsd to store the serialisation description.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+static inline int vmstate_register(VMStateIf *obj, int instance_id,
const VMStateDescription *vmsd,
void *opaque)
{
- return vmstate_register_with_alias_id(dev, instance_id, vmsd,
+ return vmstate_register_with_alias_id(obj, instance_id, vmsd,
+ opaque, -1, 0, NULL);
+}
+
+/**
+ * vmstate_replace_hack_for_ppc() - ppc used to abuse vmstate_register
+ *
+ * Don't even think about using this function in new code.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id,
+ const VMStateDescription *vmsd,
+ void *opaque);
+
+/**
+ * vmstate_register_any() - legacy function to register state
+ * serialisation description and let the function choose the id
+ *
+ * New code shouldn't be using this function as QOM-ified devices have
+ * dc->vmsd to store the serialisation description.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+static inline int vmstate_register_any(VMStateIf *obj,
+ const VMStateDescription *vmsd,
+ void *opaque)
+{
+ return vmstate_register_with_alias_id(obj, VMSTATE_INSTANCE_ID_ANY, vmsd,
opaque, -1, 0, NULL);
}
-void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd,
+void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd,
void *opaque);
-struct MemoryRegion;
void vmstate_register_ram(struct MemoryRegion *memory, DeviceState *dev);
void vmstate_unregister_ram(struct MemoryRegion *memory, DeviceState *dev);
void vmstate_register_ram_global(struct MemoryRegion *memory);
diff --git a/include/monitor/hmp-target.h b/include/monitor/hmp-target.h
index 454e8ed155..d78e979f05 100644
--- a/include/monitor/hmp-target.h
+++ b/include/monitor/hmp-target.h
@@ -25,26 +25,37 @@
#ifndef MONITOR_HMP_TARGET_H
#define MONITOR_HMP_TARGET_H
+#include "cpu.h"
+
#define MD_TLONG 0
#define MD_I32 1
struct MonitorDef {
const char *name;
int offset;
- target_long (*get_value)(const struct MonitorDef *md, int val);
+ target_long (*get_value)(Monitor *mon, const struct MonitorDef *md,
+ int val);
int type;
};
const MonitorDef *target_monitor_defs(void);
int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval);
-CPUArchState *mon_get_cpu_env(void);
-CPUState *mon_get_cpu(void);
+CPUArchState *mon_get_cpu_env(Monitor *mon);
+CPUState *mon_get_cpu(Monitor *mon);
void hmp_info_mem(Monitor *mon, const QDict *qdict);
void hmp_info_tlb(Monitor *mon, const QDict *qdict);
void hmp_mce(Monitor *mon, const QDict *qdict);
void hmp_info_local_apic(Monitor *mon, const QDict *qdict);
-void hmp_info_io_apic(Monitor *mon, const QDict *qdict);
+void hmp_info_sev(Monitor *mon, const QDict *qdict);
+void hmp_info_sgx(Monitor *mon, const QDict *qdict);
+void hmp_info_via(Monitor *mon, const QDict *qdict);
+void hmp_memory_dump(Monitor *mon, const QDict *qdict);
+void hmp_physical_memory_dump(Monitor *mon, const QDict *qdict);
+void hmp_info_registers(Monitor *mon, const QDict *qdict);
+void hmp_gva2gpa(Monitor *mon, const QDict *qdict);
+void hmp_gpa2hva(Monitor *mon, const QDict *qdict);
+void hmp_gpa2hpa(Monitor *mon, const QDict *qdict);
#endif /* MONITOR_HMP_TARGET_H */
diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h
new file mode 100644
index 0000000000..f4cf8f6717
--- /dev/null
+++ b/include/monitor/hmp.h
@@ -0,0 +1,184 @@
+/*
+ * Human Monitor Interface
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HMP_H
+#define HMP_H
+
+#include "qemu/readline.h"
+#include "qapi/qapi-types-common.h"
+
+bool hmp_handle_error(Monitor *mon, Error *err);
+void hmp_help_cmd(Monitor *mon, const char *name);
+strList *hmp_split_at_comma(const char *str);
+
+void hmp_info_name(Monitor *mon, const QDict *qdict);
+void hmp_info_version(Monitor *mon, const QDict *qdict);
+void hmp_info_kvm(Monitor *mon, const QDict *qdict);
+void hmp_info_status(Monitor *mon, const QDict *qdict);
+void hmp_info_uuid(Monitor *mon, const QDict *qdict);
+void hmp_info_chardev(Monitor *mon, const QDict *qdict);
+void hmp_info_mice(Monitor *mon, const QDict *qdict);
+void hmp_info_migrate(Monitor *mon, const QDict *qdict);
+void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict);
+void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict);
+void hmp_info_cpus(Monitor *mon, const QDict *qdict);
+void hmp_info_vnc(Monitor *mon, const QDict *qdict);
+void hmp_info_spice(Monitor *mon, const QDict *qdict);
+void hmp_info_balloon(Monitor *mon, const QDict *qdict);
+void hmp_info_irq(Monitor *mon, const QDict *qdict);
+void hmp_info_pic(Monitor *mon, const QDict *qdict);
+void hmp_info_pci(Monitor *mon, const QDict *qdict);
+void hmp_info_tpm(Monitor *mon, const QDict *qdict);
+void hmp_info_iothreads(Monitor *mon, const QDict *qdict);
+void hmp_quit(Monitor *mon, const QDict *qdict);
+void hmp_stop(Monitor *mon, const QDict *qdict);
+void hmp_sync_profile(Monitor *mon, const QDict *qdict);
+void hmp_system_reset(Monitor *mon, const QDict *qdict);
+void hmp_system_powerdown(Monitor *mon, const QDict *qdict);
+void hmp_exit_preconfig(Monitor *mon, const QDict *qdict);
+void hmp_announce_self(Monitor *mon, const QDict *qdict);
+void hmp_cpu(Monitor *mon, const QDict *qdict);
+void hmp_memsave(Monitor *mon, const QDict *qdict);
+void hmp_pmemsave(Monitor *mon, const QDict *qdict);
+void hmp_ringbuf_write(Monitor *mon, const QDict *qdict);
+void hmp_ringbuf_read(Monitor *mon, const QDict *qdict);
+void hmp_cont(Monitor *mon, const QDict *qdict);
+void hmp_system_wakeup(Monitor *mon, const QDict *qdict);
+void hmp_nmi(Monitor *mon, const QDict *qdict);
+void hmp_info_network(Monitor *mon, const QDict *qdict);
+void hmp_set_link(Monitor *mon, const QDict *qdict);
+void hmp_balloon(Monitor *mon, const QDict *qdict);
+void hmp_loadvm(Monitor *mon, const QDict *qdict);
+void hmp_savevm(Monitor *mon, const QDict *qdict);
+void hmp_delvm(Monitor *mon, const QDict *qdict);
+void hmp_migrate_cancel(Monitor *mon, const QDict *qdict);
+void hmp_migrate_continue(Monitor *mon, const QDict *qdict);
+void hmp_migrate_incoming(Monitor *mon, const QDict *qdict);
+void hmp_migrate_recover(Monitor *mon, const QDict *qdict);
+void hmp_migrate_pause(Monitor *mon, const QDict *qdict);
+void hmp_migrate_set_capability(Monitor *mon, const QDict *qdict);
+void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict);
+void hmp_client_migrate_info(Monitor *mon, const QDict *qdict);
+void hmp_migrate_start_postcopy(Monitor *mon, const QDict *qdict);
+void hmp_x_colo_lost_heartbeat(Monitor *mon, const QDict *qdict);
+void hmp_set_password(Monitor *mon, const QDict *qdict);
+void hmp_expire_password(Monitor *mon, const QDict *qdict);
+void hmp_change(Monitor *mon, const QDict *qdict);
+#ifdef CONFIG_VNC
+void hmp_change_vnc(Monitor *mon, const char *device, const char *target,
+ const char *arg, const char *read_only, bool force,
+ Error **errp);
+#endif
+void hmp_change_medium(Monitor *mon, const char *device, const char *target,
+ const char *arg, const char *read_only, bool force,
+ Error **errp);
+void hmp_migrate(Monitor *mon, const QDict *qdict);
+void hmp_device_add(Monitor *mon, const QDict *qdict);
+void hmp_device_del(Monitor *mon, const QDict *qdict);
+void hmp_dump_guest_memory(Monitor *mon, const QDict *qdict);
+void hmp_netdev_add(Monitor *mon, const QDict *qdict);
+void hmp_netdev_del(Monitor *mon, const QDict *qdict);
+void hmp_getfd(Monitor *mon, const QDict *qdict);
+void hmp_closefd(Monitor *mon, const QDict *qdict);
+void hmp_mouse_move(Monitor *mon, const QDict *qdict);
+void hmp_mouse_button(Monitor *mon, const QDict *qdict);
+void hmp_mouse_set(Monitor *mon, const QDict *qdict);
+void hmp_sendkey(Monitor *mon, const QDict *qdict);
+void coroutine_fn hmp_screendump(Monitor *mon, const QDict *qdict);
+void hmp_chardev_add(Monitor *mon, const QDict *qdict);
+void hmp_chardev_change(Monitor *mon, const QDict *qdict);
+void hmp_chardev_remove(Monitor *mon, const QDict *qdict);
+void hmp_chardev_send_break(Monitor *mon, const QDict *qdict);
+void hmp_object_add(Monitor *mon, const QDict *qdict);
+void hmp_object_del(Monitor *mon, const QDict *qdict);
+void hmp_info_memdev(Monitor *mon, const QDict *qdict);
+void hmp_info_numa(Monitor *mon, const QDict *qdict);
+void hmp_info_memory_devices(Monitor *mon, const QDict *qdict);
+void hmp_qom_list(Monitor *mon, const QDict *qdict);
+void hmp_qom_get(Monitor *mon, const QDict *qdict);
+void hmp_qom_set(Monitor *mon, const QDict *qdict);
+void hmp_info_qom_tree(Monitor *mon, const QDict *dict);
+void hmp_virtio_query(Monitor *mon, const QDict *qdict);
+void hmp_virtio_status(Monitor *mon, const QDict *qdict);
+void hmp_virtio_queue_status(Monitor *mon, const QDict *qdict);
+void hmp_vhost_queue_status(Monitor *mon, const QDict *qdict);
+void hmp_virtio_queue_element(Monitor *mon, const QDict *qdict);
+void hmp_xen_event_inject(Monitor *mon, const QDict *qdict);
+void hmp_xen_event_list(Monitor *mon, const QDict *qdict);
+void object_add_completion(ReadLineState *rs, int nb_args, const char *str);
+void object_del_completion(ReadLineState *rs, int nb_args, const char *str);
+void device_add_completion(ReadLineState *rs, int nb_args, const char *str);
+void device_del_completion(ReadLineState *rs, int nb_args, const char *str);
+void sendkey_completion(ReadLineState *rs, int nb_args, const char *str);
+void chardev_remove_completion(ReadLineState *rs, int nb_args, const char *str);
+void chardev_add_completion(ReadLineState *rs, int nb_args, const char *str);
+void set_link_completion(ReadLineState *rs, int nb_args, const char *str);
+void netdev_add_completion(ReadLineState *rs, int nb_args, const char *str);
+void netdev_del_completion(ReadLineState *rs, int nb_args, const char *str);
+void ringbuf_write_completion(ReadLineState *rs, int nb_args, const char *str);
+void info_trace_events_completion(ReadLineState *rs, int nb_args, const char *str);
+void trace_event_completion(ReadLineState *rs, int nb_args, const char *str);
+void watchdog_action_completion(ReadLineState *rs, int nb_args,
+ const char *str);
+void migrate_set_capability_completion(ReadLineState *rs, int nb_args,
+ const char *str);
+void migrate_set_parameter_completion(ReadLineState *rs, int nb_args,
+ const char *str);
+void delvm_completion(ReadLineState *rs, int nb_args, const char *str);
+void loadvm_completion(ReadLineState *rs, int nb_args, const char *str);
+void hmp_rocker(Monitor *mon, const QDict *qdict);
+void hmp_rocker_ports(Monitor *mon, const QDict *qdict);
+void hmp_rocker_of_dpa_flows(Monitor *mon, const QDict *qdict);
+void hmp_rocker_of_dpa_groups(Monitor *mon, const QDict *qdict);
+void hmp_info_dump(Monitor *mon, const QDict *qdict);
+void hmp_info_ramblock(Monitor *mon, const QDict *qdict);
+void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict);
+void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict);
+void hmp_info_memory_size_summary(Monitor *mon, const QDict *qdict);
+void hmp_info_replay(Monitor *mon, const QDict *qdict);
+void hmp_replay_break(Monitor *mon, const QDict *qdict);
+void hmp_replay_delete_break(Monitor *mon, const QDict *qdict);
+void hmp_replay_seek(Monitor *mon, const QDict *qdict);
+void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict);
+void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict);
+void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
+void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
+void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
+void hmp_human_readable_text_helper(Monitor *mon,
+ HumanReadableText *(*qmp_handler)(Error **));
+void hmp_info_stats(Monitor *mon, const QDict *qdict);
+void hmp_one_insn_per_tb(Monitor *mon, const QDict *qdict);
+void hmp_watchdog_action(Monitor *mon, const QDict *qdict);
+void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict);
+void hmp_info_capture(Monitor *mon, const QDict *qdict);
+void hmp_stopcapture(Monitor *mon, const QDict *qdict);
+void hmp_wavcapture(Monitor *mon, const QDict *qdict);
+void hmp_trace_event(Monitor *mon, const QDict *qdict);
+void hmp_trace_file(Monitor *mon, const QDict *qdict);
+void hmp_info_trace_events(Monitor *mon, const QDict *qdict);
+void hmp_help(Monitor *mon, const QDict *qdict);
+void hmp_info_help(Monitor *mon, const QDict *qdict);
+void hmp_info_sync_profile(Monitor *mon, const QDict *qdict);
+void hmp_info_history(Monitor *mon, const QDict *qdict);
+void hmp_logfile(Monitor *mon, const QDict *qdict);
+void hmp_log(Monitor *mon, const QDict *qdict);
+void hmp_gdbserver(Monitor *mon, const QDict *qdict);
+void hmp_print(Monitor *mon, const QDict *qdict);
+void hmp_sum(Monitor *mon, const QDict *qdict);
+void hmp_ioport_read(Monitor *mon, const QDict *qdict);
+void hmp_ioport_write(Monitor *mon, const QDict *qdict);
+void hmp_boot_set(Monitor *mon, const QDict *qdict);
+void hmp_info_mtree(Monitor *mon, const QDict *qdict);
+void hmp_info_cryptodev(Monitor *mon, const QDict *qdict);
+
+#endif
diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h
index c1b40a9cac..965f5d5450 100644
--- a/include/monitor/monitor.h
+++ b/include/monitor/monitor.h
@@ -1,25 +1,28 @@
#ifndef MONITOR_H
#define MONITOR_H
-#include "qemu-common.h"
#include "block/block.h"
#include "qapi/qapi-types-misc.h"
#include "qemu/readline.h"
+#include "exec/hwaddr.h"
-extern __thread Monitor *cur_mon;
-
-/* flags for monitor_init */
-/* 0x01 unused */
-#define MONITOR_USE_READLINE 0x02
-#define MONITOR_USE_CONTROL 0x04
-#define MONITOR_USE_PRETTY 0x08
+typedef struct MonitorHMP MonitorHMP;
+typedef struct MonitorOptions MonitorOptions;
#define QMP_REQ_QUEUE_LEN_MAX 8
+extern QemuOptsList qemu_mon_opts;
+
+Monitor *monitor_cur(void);
+Monitor *monitor_set_cur(Coroutine *co, Monitor *mon);
bool monitor_cur_is_qmp(void);
void monitor_init_globals(void);
-void monitor_init(Chardev *chr, int flags);
+void monitor_init_globals_core(void);
+void monitor_init_qmp(Chardev *chr, bool pretty, Error **errp);
+void monitor_init_hmp(Chardev *chr, bool use_readline, Error **errp);
+int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp);
+int monitor_init_opts(QemuOpts *opts, Error **errp);
void monitor_cleanup(void);
int monitor_suspend(Monitor *mon);
@@ -28,27 +31,36 @@ void monitor_resume(Monitor *mon);
int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp);
int monitor_fd_param(Monitor *mon, const char *fdname, Error **errp);
-void monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
- GCC_FMT_ATTR(2, 0);
-void monitor_printf(Monitor *mon, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
-int monitor_fprintf(FILE *stream, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
+int monitor_puts(Monitor *mon, const char *str);
+int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
+ G_GNUC_PRINTF(2, 0);
+int monitor_printf(Monitor *mon, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
+void monitor_printc(Monitor *mon, int ch);
void monitor_flush(Monitor *mon);
-int monitor_set_cpu(int cpu_index);
-int monitor_get_cpu_index(void);
+int monitor_set_cpu(Monitor *mon, int cpu_index);
+int monitor_get_cpu_index(Monitor *mon);
+
+int monitor_puts_locked(Monitor *mon, const char *str);
+void monitor_flush_locked(Monitor *mon);
-void monitor_read_command(Monitor *mon, int show_prompt);
-int monitor_read_password(Monitor *mon, ReadLineFunc *readline_func,
+void *gpa2hva(MemoryRegion **p_mr, hwaddr addr, uint64_t size, Error **errp);
+
+void monitor_read_command(MonitorHMP *mon, int show_prompt);
+int monitor_read_password(MonitorHMP *mon, ReadLineFunc *readline_func,
void *opaque);
AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
- bool has_opaque, const char *opaque,
- Error **errp);
-int monitor_fdset_get_fd(int64_t fdset_id, int flags);
-int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd);
+ const char *opaque, Error **errp);
+int monitor_fdset_dup_fd_add(int64_t fdset_id, int flags);
void monitor_fdset_dup_fd_remove(int dup_fd);
-int monitor_fdset_dup_fd_find(int dup_fd);
+int64_t monitor_fdset_dup_fd_find(int dup_fd);
+
+void monitor_register_hmp(const char *name, bool info,
+ void (*cmd)(Monitor *mon, const QDict *qdict));
+void monitor_register_hmp_info_hrt(const char *name,
+ HumanReadableText *(*handler)(Error **errp));
-void monitor_vfprintf(FILE *stream,
- const char *fmt, va_list ap) GCC_FMT_ATTR(2, 0);
+int error_vprintf_unless_qmp(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+int error_printf_unless_qmp(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
#endif /* MONITOR_H */
diff --git a/include/monitor/qdev.h b/include/monitor/qdev.h
index 0ff3331284..1d57bf6577 100644
--- a/include/monitor/qdev.h
+++ b/include/monitor/qdev.h
@@ -1,17 +1,39 @@
#ifndef MONITOR_QDEV_H
#define MONITOR_QDEV_H
-#include "hw/qdev-core.h"
-
/*** monitor commands ***/
void hmp_info_qtree(Monitor *mon, const QDict *qdict);
void hmp_info_qdm(Monitor *mon, const QDict *qdict);
-void hmp_info_qom_tree(Monitor *mon, const QDict *dict);
void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp);
int qdev_device_help(QemuOpts *opts);
DeviceState *qdev_device_add(QemuOpts *opts, Error **errp);
-void qdev_set_id(DeviceState *dev, const char *id);
+DeviceState *qdev_device_add_from_qdict(const QDict *opts,
+ bool from_json, Error **errp);
+
+/**
+ * qdev_set_id: parent the device and set its id if provided.
+ * @dev: device to handle
+ * @id: id to be given to the device, or NULL.
+ *
+ * Returns: the id of the device in case of success; otherwise NULL.
+ *
+ * @dev must be unrealized, unparented and must not have an id.
+ *
+ * If @id is non-NULL, this function tries to setup @dev qom path as
+ * "/peripheral/id". If @id is already taken, it fails. If it succeeds,
+ * the id field of @dev is set to @id (@dev now owns the given @id
+ * parameter).
+ *
+ * If @id is NULL, this function generates a unique name and setups @dev
+ * qom path as "/peripheral-anon/name". This name is not set as the id
+ * of @dev.
+ *
+ * Upon success, it returns the id/name (generated or provided). The
+ * returned string is owned by the corresponding child property and must
+ * not be freed by the caller.
+ */
+const char *qdev_set_id(DeviceState *dev, char *id, Error **errp);
#endif
diff --git a/include/monitor/qmp-helpers.h b/include/monitor/qmp-helpers.h
new file mode 100644
index 0000000000..318c3357a2
--- /dev/null
+++ b/include/monitor/qmp-helpers.h
@@ -0,0 +1,29 @@
+/*
+ * QMP command helpers
+ *
+ * Copyright (c) 2022 Red Hat Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef MONITOR_QMP_HELPERS_H
+
+bool qmp_add_client_spice(int fd, bool has_skipauth, bool skipauth,
+ bool has_tls, bool tls, Error **errp);
+#ifdef CONFIG_VNC
+bool qmp_add_client_vnc(int fd, bool has_skipauth, bool skipauth,
+ bool has_tls, bool tls, Error **errp);
+#endif
+#ifdef CONFIG_DBUS_DISPLAY
+bool qmp_add_client_dbus_display(int fd, bool has_skipauth, bool skipauth,
+ bool has_tls, bool tls, Error **errp);
+#endif
+bool qmp_add_client_char(int fd, bool has_skipauth, bool skipauth,
+ bool has_tls, bool tls, const char *protocol,
+ Error **errp);
+
+#endif
diff --git a/include/net/announce.h b/include/net/announce.h
new file mode 100644
index 0000000000..3d90c83c23
--- /dev/null
+++ b/include/net/announce.h
@@ -0,0 +1,44 @@
+/*
+ * Self-announce facility
+ * (c) 2017-2019 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_NET_ANNOUNCE_H
+#define QEMU_NET_ANNOUNCE_H
+
+#include "qapi/qapi-types-net.h"
+#include "qemu/timer.h"
+
+struct AnnounceTimer {
+ QEMUTimer *tm;
+ AnnounceParameters params;
+ QEMUClockType type;
+ int round;
+};
+
+/* Returns: update the timer to the next time point */
+int64_t qemu_announce_timer_step(AnnounceTimer *timer);
+
+/*
+ * Delete the underlying timer and other data
+ * If 'free_named' true and the timer is a named timer, then remove
+ * it from the list of named timers and free the AnnounceTimer itself.
+ */
+void qemu_announce_timer_del(AnnounceTimer *timer, bool free_named);
+
+/*
+ * Under BQL/main thread
+ * Reset the timer to the given parameters/type/notifier.
+ */
+void qemu_announce_timer_reset(AnnounceTimer *timer,
+ AnnounceParameters *params,
+ QEMUClockType type,
+ QEMUTimerCB *cb,
+ void *opaque);
+
+void qemu_announce_self(AnnounceTimer *timer, AnnounceParameters *params);
+
+#endif
diff --git a/include/net/can_emu.h b/include/net/can_emu.h
index 1da4d01b95..6f9b206bb5 100644
--- a/include/net/can_emu.h
+++ b/include/net/can_emu.h
@@ -28,6 +28,7 @@
#ifndef NET_CAN_EMU_H
#define NET_CAN_EMU_H
+#include "qemu/queue.h"
#include "qom/object.h"
/* NOTE: the following two structures is copied from <linux/can.h>. */
@@ -45,7 +46,8 @@ typedef uint32_t qemu_canid_t;
typedef struct qemu_can_frame {
qemu_canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
uint8_t can_dlc; /* data length code: 0 .. 8 */
- uint8_t data[8] QEMU_ALIGNED(8);
+ uint8_t flags;
+ uint8_t data[64] QEMU_ALIGNED(8);
} qemu_can_frame;
/* Keep defines for QEMU separate from Linux ones for now */
@@ -57,6 +59,10 @@ typedef struct qemu_can_frame {
#define QEMU_CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */
#define QEMU_CAN_EFF_MASK 0x1FFFFFFFU /* extended frame format (EFF) */
+#define QEMU_CAN_FRMF_BRS 0x01 /* bit rate switch (2nd bitrate for data) */
+#define QEMU_CAN_FRMF_ESI 0x02 /* error state ind. of transmitting node */
+#define QEMU_CAN_FRMF_TYPE_FD 0x10 /* internal bit ind. of CAN FD frame */
+
/**
* struct qemu_can_filter - CAN ID based filter in can_register().
* @can_id: relevant bits of CAN ID which are not masked out.
@@ -82,7 +88,7 @@ typedef struct CanBusClientState CanBusClientState;
typedef struct CanBusState CanBusState;
typedef struct CanBusClientInfo {
- int (*can_receive)(CanBusClientState *);
+ bool (*can_receive)(CanBusClientState *);
ssize_t (*receive)(CanBusClientState *,
const struct qemu_can_frame *frames, size_t frames_cnt);
} CanBusClientInfo;
@@ -96,15 +102,11 @@ struct CanBusClientState {
char *model;
char *name;
void (*destructor)(CanBusClientState *);
+ bool fd_mode;
};
#define TYPE_CAN_BUS "can-bus"
-#define CAN_BUS_CLASS(klass) \
- OBJECT_CLASS_CHECK(CanBusClass, (klass), TYPE_CAN_BUS)
-#define CAN_BUS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(CanBusClass, (obj), TYPE_CAN_BUS)
-#define CAN_BUS(obj) \
- OBJECT_CHECK(CanBusState, (obj), TYPE_CAN_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(CanBusState, CAN_BUS)
int can_bus_filter_match(struct qemu_can_filter *filter, qemu_canid_t can_id);
@@ -120,4 +122,8 @@ int can_bus_client_set_filters(CanBusClientState *,
const struct qemu_can_filter *filters,
size_t filters_cnt);
+uint8_t can_dlc2len(uint8_t can_dlc);
+
+uint8_t can_len2dlc(uint8_t len);
+
#endif
diff --git a/include/net/can_host.h b/include/net/can_host.h
index d79676746b..caab71bdda 100644
--- a/include/net/can_host.h
+++ b/include/net/can_host.h
@@ -29,27 +29,23 @@
#define NET_CAN_HOST_H
#include "net/can_emu.h"
+#include "qom/object.h"
#define TYPE_CAN_HOST "can-host"
-#define CAN_HOST_CLASS(klass) \
- OBJECT_CLASS_CHECK(CanHostClass, (klass), TYPE_CAN_HOST)
-#define CAN_HOST_GET_CLASS(obj) \
- OBJECT_GET_CLASS(CanHostClass, (obj), TYPE_CAN_HOST)
-#define CAN_HOST(obj) \
- OBJECT_CHECK(CanHostState, (obj), TYPE_CAN_HOST)
-
-typedef struct CanHostState {
- ObjectClass oc;
+OBJECT_DECLARE_TYPE(CanHostState, CanHostClass, CAN_HOST)
+
+struct CanHostState {
+ Object oc;
CanBusState *bus;
CanBusClientState bus_client;
-} CanHostState;
+};
-typedef struct CanHostClass {
+struct CanHostClass {
ObjectClass oc;
void (*connect)(CanHostState *ch, Error **errp);
void (*disconnect)(CanHostState *ch);
-} CanHostClass;
+};
#endif
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 05a0d273fe..7dec37e56c 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -21,11 +21,16 @@
#include "qemu/bswap.h"
struct iovec;
+#define CSUM_IP 0x01
+#define CSUM_TCP 0x02
+#define CSUM_UDP 0x04
+#define CSUM_ALL (CSUM_IP | CSUM_TCP | CSUM_UDP)
+
uint32_t net_checksum_add_cont(int len, uint8_t *buf, int seq);
uint16_t net_checksum_finish(uint32_t sum);
uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto,
uint8_t *addrs, uint8_t *buf);
-void net_checksum_calculate(uint8_t *data, int length);
+void net_checksum_calculate(uint8_t *data, int length, int csum_flag);
static inline uint32_t
net_checksum_add(int len, uint8_t *buf)
diff --git a/include/net/eth.h b/include/net/eth.h
index e6dc8a7ba0..3b80b6e07f 100644
--- a/include/net/eth.h
+++ b/include/net/eth.h
@@ -31,6 +31,9 @@
#define ETH_ALEN 6
#define ETH_HLEN 14
+#define ETH_ZLEN 60 /* Min. octets in frame without FCS */
+#define ETH_FCS_LEN 4
+#define ETH_MTU 1500
struct eth_header {
uint8_t h_dest[ETH_ALEN]; /* destination eth addr */
@@ -158,7 +161,7 @@ struct tcp_hdr {
u_short th_dport; /* destination port */
uint32_t th_seq; /* sequence number */
uint32_t th_ack; /* acknowledgment number */
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
u_char th_off : 4, /* data offset */
th_x2:4; /* (unused) */
#else
@@ -177,6 +180,8 @@ struct tcp_hdr {
#define TH_PUSH 0x08
#define TH_ACK 0x10
#define TH_URG 0x20
+#define TH_ECE 0x40
+#define TH_CWR 0x80
u_short th_win; /* window */
u_short th_sum; /* checksum */
u_short th_urp; /* urgent pointer */
@@ -184,6 +189,7 @@ struct tcp_hdr {
#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt
#define ip6_ecn_acc ip6_ctlun.ip6_un3.ip6_un3_ecn
+#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen
#define PKT_GET_ETH_HDR(p) \
((struct eth_header *)(p))
@@ -218,6 +224,7 @@ struct tcp_hdr {
#define IP_HEADER_VERSION_6 (6)
#define IP_PROTO_TCP (6)
#define IP_PROTO_UDP (17)
+#define IP_PROTO_SCTP (132)
#define IPTOS_ECN_MASK 0x03
#define IPTOS_ECN(x) ((x) & IPTOS_ECN_MASK)
#define IPTOS_ECN_CE 0x03
@@ -308,10 +315,10 @@ eth_get_l2_hdr_length(const void *p)
}
static inline uint32_t
-eth_get_l2_hdr_length_iov(const struct iovec *iov, int iovcnt)
+eth_get_l2_hdr_length_iov(const struct iovec *iov, size_t iovcnt, size_t iovoff)
{
uint8_t p[sizeof(struct eth_header) + sizeof(struct vlan_header)];
- size_t copied = iov_to_buf(iov, iovcnt, 0, p, ARRAY_SIZE(p));
+ size_t copied = iov_to_buf(iov, iovcnt, iovoff, p, ARRAY_SIZE(p));
if (copied < ARRAY_SIZE(p)) {
return copied;
@@ -336,26 +343,19 @@ eth_get_pkt_tci(const void *p)
size_t
eth_strip_vlan(const struct iovec *iov, int iovcnt, size_t iovoff,
- uint8_t *new_ehdr_buf,
+ void *new_ehdr_buf,
uint16_t *payload_offset, uint16_t *tci);
size_t
-eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff,
- uint16_t vet, uint8_t *new_ehdr_buf,
+eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff, int index,
+ uint16_t vet, uint16_t vet_ext, void *new_ehdr_buf,
uint16_t *payload_offset, uint16_t *tci);
uint16_t
eth_get_l3_proto(const struct iovec *l2hdr_iov, int iovcnt, size_t l2hdr_len);
-void eth_setup_vlan_headers_ex(struct eth_header *ehdr, uint16_t vlan_tag,
- uint16_t vlan_ethtype, bool *is_new);
-
-static inline void
-eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag,
- bool *is_new)
-{
- eth_setup_vlan_headers_ex(ehdr, vlan_tag, ETH_P_VLAN, is_new);
-}
+void eth_setup_vlan_headers(struct eth_header *ehdr, size_t *ehdr_size,
+ uint16_t vlan_tag, uint16_t vlan_ethtype);
uint8_t eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto);
@@ -377,18 +377,25 @@ typedef struct eth_ip4_hdr_info_st {
bool fragment;
} eth_ip4_hdr_info;
+typedef enum EthL4HdrProto {
+ ETH_L4_HDR_PROTO_INVALID,
+ ETH_L4_HDR_PROTO_TCP,
+ ETH_L4_HDR_PROTO_UDP,
+ ETH_L4_HDR_PROTO_SCTP
+} EthL4HdrProto;
+
typedef struct eth_l4_hdr_info_st {
union {
struct tcp_header tcp;
struct udp_header udp;
} hdr;
+ EthL4HdrProto proto;
bool has_tcp_data;
} eth_l4_hdr_info;
-void eth_get_protocols(const struct iovec *iov, int iovcnt,
- bool *isip4, bool *isip6,
- bool *isudp, bool *istcp,
+void eth_get_protocols(const struct iovec *iov, size_t iovcnt, size_t iovoff,
+ bool *hasip4, bool *hasip6,
size_t *l3hdr_off,
size_t *l4hdr_off,
size_t *l5hdr_off,
@@ -396,11 +403,6 @@ void eth_get_protocols(const struct iovec *iov, int iovcnt,
eth_ip4_hdr_info *ip4hdr_info,
eth_l4_hdr_info *l4hdr_info);
-void eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
- void *l3hdr, size_t l3hdr_len,
- size_t l3payload_len,
- size_t frag_offset, bool more_frags);
-
void
eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len);
@@ -419,4 +421,20 @@ bool
eth_parse_ipv6_hdr(const struct iovec *pkt, int pkt_frags,
size_t ip6hdr_off, eth_ip6_hdr_info *info);
+/**
+ * eth_pad_short_frame - pad a short frame to the minimum Ethernet frame length
+ *
+ * If the Ethernet frame size is shorter than 60 bytes, it will be padded to
+ * 60 bytes at the address @padded_pkt.
+ *
+ * @padded_pkt: buffer address to hold the padded frame
+ * @padded_buflen: pointer holding length of @padded_pkt. If the frame is
+ * padded, the length will be updated to the padded one.
+ * @pkt: address to hold the original Ethernet frame
+ * @pkt_size: size of the original Ethernet frame
+ * @return true if the frame is padded, otherwise false
+ */
+bool eth_pad_short_frame(uint8_t *padded_pkt, size_t *padded_buflen,
+ const void *pkt, size_t pkt_size);
+
#endif
diff --git a/include/net/filter.h b/include/net/filter.h
index 49da666ac0..f15f7932b2 100644
--- a/include/net/filter.h
+++ b/include/net/filter.h
@@ -9,18 +9,13 @@
#ifndef QEMU_NET_FILTER_H
#define QEMU_NET_FILTER_H
-#include "qapi/qapi-types-net.h"
+#include "qapi/qapi-types-common.h"
+#include "qemu/queue.h"
#include "qom/object.h"
-#include "qemu-common.h"
#include "net/queue.h"
#define TYPE_NETFILTER "netfilter"
-#define NETFILTER(obj) \
- OBJECT_CHECK(NetFilterState, (obj), TYPE_NETFILTER)
-#define NETFILTER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(NetFilterClass, (obj), TYPE_NETFILTER)
-#define NETFILTER_CLASS(klass) \
- OBJECT_CLASS_CHECK(NetFilterClass, (klass), TYPE_NETFILTER)
+OBJECT_DECLARE_TYPE(NetFilterState, NetFilterClass, NETFILTER)
typedef void (FilterSetup) (NetFilterState *nf, Error **errp);
typedef void (FilterCleanup) (NetFilterState *nf);
@@ -40,7 +35,7 @@ typedef void (FilterStatusChanged) (NetFilterState *nf, Error **errp);
typedef void (FilterHandleEvent) (NetFilterState *nf, int event, Error **errp);
-typedef struct NetFilterClass {
+struct NetFilterClass {
ObjectClass parent_class;
/* optional */
@@ -50,7 +45,7 @@ typedef struct NetFilterClass {
FilterHandleEvent *handle_event;
/* mandatory */
FilterReceiveIOV *receive_iov;
-} NetFilterClass;
+};
struct NetFilterState {
@@ -62,6 +57,8 @@ struct NetFilterState {
NetClientState *netdev;
NetFilterDirection direction;
bool on;
+ char *position;
+ bool insert_before_flag;
QTAILQ_ENTRY(NetFilterState) next;
};
diff --git a/include/net/net.h b/include/net/net.h
index 643295d163..b1f9b35fcc 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -4,7 +4,7 @@
#include "qemu/queue.h"
#include "qapi/qapi-types-net.h"
#include "net/queue.h"
-#include "migration/vmstate.h"
+#include "hw/qdev-properties-system.h"
#define MAC_FMT "%02X:%02X:%02X:%02X:%02X:%02X"
#define MAC_ARG(x) ((uint8_t *)(x))[0], ((uint8_t *)(x))[1], \
@@ -43,7 +43,10 @@ typedef struct NICConf {
/* Net clients */
typedef void (NetPoll)(NetClientState *, bool enable);
-typedef int (NetCanReceive)(NetClientState *);
+typedef bool (NetCanReceive)(NetClientState *);
+typedef int (NetStart)(NetClientState *);
+typedef int (NetLoad)(NetClientState *);
+typedef void (NetStop)(NetClientState *);
typedef ssize_t (NetReceive)(NetClientState *, const uint8_t *, size_t);
typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
typedef void (NetCleanup) (NetClientState *);
@@ -51,15 +54,21 @@ typedef void (LinkStatusChanged)(NetClientState *);
typedef void (NetClientDestructor)(NetClientState *);
typedef RxFilterInfo *(QueryRxFilter)(NetClientState *);
typedef bool (HasUfo)(NetClientState *);
+typedef bool (HasUso)(NetClientState *);
typedef bool (HasVnetHdr)(NetClientState *);
typedef bool (HasVnetHdrLen)(NetClientState *, int);
+typedef bool (GetUsingVnetHdr)(NetClientState *);
typedef void (UsingVnetHdr)(NetClientState *, bool);
-typedef void (SetOffload)(NetClientState *, int, int, int, int, int);
+typedef void (SetOffload)(NetClientState *, int, int, int, int, int, int, int);
+typedef int (GetVnetHdrLen)(NetClientState *);
typedef void (SetVnetHdrLen)(NetClientState *, int);
typedef int (SetVnetLE)(NetClientState *, bool);
typedef int (SetVnetBE)(NetClientState *, bool);
typedef struct SocketReadState SocketReadState;
typedef void (SocketReadStateFinalize)(SocketReadState *rs);
+typedef void (NetAnnounce)(NetClientState *);
+typedef bool (SetSteeringEBPF)(NetClientState *, int);
+typedef bool (NetCheckPeerType)(NetClientState *, ObjectClass *, Error **);
typedef struct NetClientInfo {
NetClientDriver type;
@@ -68,18 +77,27 @@ typedef struct NetClientInfo {
NetReceive *receive_raw;
NetReceiveIOV *receive_iov;
NetCanReceive *can_receive;
+ NetStart *start;
+ NetLoad *load;
+ NetStop *stop;
NetCleanup *cleanup;
LinkStatusChanged *link_status_changed;
QueryRxFilter *query_rx_filter;
NetPoll *poll;
HasUfo *has_ufo;
+ HasUso *has_uso;
HasVnetHdr *has_vnet_hdr;
HasVnetHdrLen *has_vnet_hdr_len;
+ GetUsingVnetHdr *get_using_vnet_hdr;
UsingVnetHdr *using_vnet_hdr;
SetOffload *set_offload;
+ GetVnetHdrLen *get_vnet_hdr_len;
SetVnetHdrLen *set_vnet_hdr_len;
SetVnetLE *set_vnet_le;
SetVnetBE *set_vnet_be;
+ NetAnnounce *announce;
+ SetSteeringEBPF *set_steering_ebpf;
+ NetCheckPeerType *check_peer_type;
} NetClientInfo;
struct NetClientState {
@@ -97,12 +115,18 @@ struct NetClientState {
unsigned rxfilter_notify_enabled:1;
int vring_enable;
int vnet_hdr_len;
+ bool is_netdev;
+ bool do_not_pad; /* do not pad to the minimum ethernet frame length */
+ bool is_datapath;
QTAILQ_HEAD(, NetFilterState) filters;
};
+typedef QTAILQ_HEAD(NetClientStateList, NetClientState) NetClientStateList;
+
typedef struct NICState {
NetClientState *ncs;
NICConf *conf;
+ MemReentrancyGuard *reentrancy_guard;
void *opaque;
bool peer_deleted;
} NICState;
@@ -128,10 +152,15 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
NetClientState *peer,
const char *model,
const char *name);
+NetClientState *qemu_new_net_control_client(NetClientInfo *info,
+ NetClientState *peer,
+ const char *model,
+ const char *name);
NICState *qemu_new_nic(NetClientInfo *info,
NICConf *conf,
const char *model,
const char *name,
+ MemReentrancyGuard *reentrancy_guard,
void *opaque);
void qemu_del_nic(NICState *nic);
NetClientState *qemu_get_subqueue(NICState *nic, int queue_index);
@@ -141,39 +170,121 @@ void *qemu_get_nic_opaque(NetClientState *nc);
void qemu_del_net_client(NetClientState *nc);
typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
+int qemu_can_receive_packet(NetClientState *nc);
int qemu_can_send_packet(NetClientState *nc);
ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
int iovcnt);
ssize_t qemu_sendv_packet_async(NetClientState *nc, const struct iovec *iov,
int iovcnt, NetPacketSent *sent_cb);
-void qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_receive_packet_iov(NetClientState *nc,
+ const struct iovec *iov,
+ int iovcnt);
ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size);
ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
int size, NetPacketSent *sent_cb);
void qemu_purge_queued_packets(NetClientState *nc);
void qemu_flush_queued_packets(NetClientState *nc);
void qemu_flush_or_purge_queued_packets(NetClientState *nc, bool purge);
+void qemu_set_info_str(NetClientState *nc,
+ const char *fmt, ...) G_GNUC_PRINTF(2, 3);
void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]);
bool qemu_has_ufo(NetClientState *nc);
+bool qemu_has_uso(NetClientState *nc);
bool qemu_has_vnet_hdr(NetClientState *nc);
bool qemu_has_vnet_hdr_len(NetClientState *nc, int len);
+bool qemu_get_using_vnet_hdr(NetClientState *nc);
void qemu_using_vnet_hdr(NetClientState *nc, bool enable);
void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
- int ecn, int ufo);
+ int ecn, int ufo, int uso4, int uso6);
+int qemu_get_vnet_hdr_len(NetClientState *nc);
void qemu_set_vnet_hdr_len(NetClientState *nc, int len);
int qemu_set_vnet_le(NetClientState *nc, bool is_le);
int qemu_set_vnet_be(NetClientState *nc, bool is_be);
void qemu_macaddr_default_if_unset(MACAddr *macaddr);
-int qemu_show_nic_models(const char *arg, const char *const *models);
-void qemu_check_nic_model(NICInfo *nd, const char *model);
-int qemu_find_nic_model(NICInfo *nd, const char * const *models,
- const char *default_model);
+/**
+ * qemu_find_nic_info: Obtain NIC configuration information
+ * @typename: Name of device object type
+ * @match_default: Match NIC configurations with no model specified
+ * @alias: Additional model string to match (for user convenience and
+ * backward compatibility).
+ *
+ * Search for a NIC configuration matching the NIC model constraints.
+ */
+NICInfo *qemu_find_nic_info(const char *typename, bool match_default,
+ const char *alias);
+/**
+ * qemu_configure_nic_device: Apply NIC configuration to a given device
+ * @dev: Network device to be configured
+ * @match_default: Match NIC configurations with no model specified
+ * @alias: Additional model string to match
+ *
+ * Search for a NIC configuration for the provided device, using the
+ * additionally specified matching constraints. If found, apply the
+ * configuration using qdev_set_nic_properties() and return %true.
+ *
+ * This is used by platform code which creates the device anyway,
+ * regardless of whether there is a configuration for it. This tends
+ * to be platforms which ignore `--nodefaults` and create net devices
+ * anyway, for example because the Ethernet device on that board is
+ * always physically present.
+ */
+bool qemu_configure_nic_device(DeviceState *dev, bool match_default,
+ const char *alias);
+/**
+ * qemu_create_nic_device: Create a NIC device if a configuration exists for it
+ * @typename: Object typename of network device
+ * @match_default: Match NIC configurations with no model specified
+ * @alias: Additional model string to match
+ *
+ * Search for a NIC configuration for the provided device type. If found,
+ * create an object of the corresponding type and return it.
+ */
+DeviceState *qemu_create_nic_device(const char *typename, bool match_default,
+ const char *alias);
+
+/*
+ * qemu_create_nic_bus_devices: Create configured NIC devices for a given bus
+ * @bus: Bus on which to create devices
+ * @parent_type: Object type for devices to be created (e.g. TYPE_PCI_DEVICE)
+ * @default_model: Object type name for default NIC model (or %NULL)
+ * @alias: Additional model string to replace, for user convenience
+ * @alias_target: Actual object type name to be used in place of @alias
+ *
+ * Instantiate dynamic NICs on a given bus, typically a PCI bus. This scans
+ * for available NIC configurations which either specify a model which is
+ * a child type of @parent_type, or which do not specify a model when
+ * @default_model is non-NULL. Each device is instantiated on the given @bus.
+ *
+ * A single substitution is supported, e.g. "xen" → "xen-net-device" for the
+ * Xen bus, or "virtio" → "virtio-net-pci" for PCI. This allows the user to
+ * specify a more understandable "model=" parameter on the command line, not
+ * only the real object typename.
+ */
+void qemu_create_nic_bus_devices(BusState *bus, const char *parent_type,
+ const char *default_model,
+ const char *alias, const char *alias_target);
void print_net_client(Monitor *mon, NetClientState *nc);
-void hmp_info_network(Monitor *mon, const QDict *qdict);
void net_socket_rs_init(SocketReadState *rs,
SocketReadStateFinalize *finalize,
bool vnet_hdr);
+NetClientState *qemu_get_peer(NetClientState *nc, int queue_index);
+
+/**
+ * qemu_get_nic_models:
+ * @device_type: Defines which devices should be taken into consideration
+ * (e.g. TYPE_DEVICE for all devices, or TYPE_PCI_DEVICE for PCI)
+ *
+ * Get an array of pointers to names of NIC devices that are available in
+ * the QEMU binary. The array is terminated with a NULL pointer entry.
+ * The caller is responsible for freeing the memory when it is not required
+ * anymore, e.g. with g_ptr_array_free(..., true).
+ *
+ * Returns: Pointer to the array that contains the pointers to the names.
+ */
+GPtrArray *qemu_get_nic_models(const char *device_type);
/* NIC info */
@@ -190,25 +301,24 @@ struct NICInfo {
int nvectors;
};
-extern int nb_nics;
-extern NICInfo nd_table[MAX_NICS];
-extern const char *host_net_devices[];
-
/* from net.c */
-int net_client_parse(QemuOptsList *opts_list, const char *str);
-int net_init_clients(Error **errp);
+extern NetClientStateList net_clients;
+bool netdev_is_modern(const char *optstr);
+void netdev_parse_modern(const char *optstr);
+void net_client_parse(QemuOptsList *opts_list, const char *optstr);
+void show_netdevs(void);
+void net_init_clients(void);
void net_check_clients(void);
void net_cleanup(void);
void hmp_host_net_add(Monitor *mon, const QDict *qdict);
void hmp_host_net_remove(Monitor *mon, const QDict *qdict);
void netdev_add(QemuOpts *opts, Error **errp);
-void qmp_netdev_add(QDict *qdict, QObject **ret, Error **errp);
int net_hub_id_for_client(NetClientState *nc, int *id);
NetClientState *net_hub_port_find(int hub_id);
-#define DEFAULT_NETWORK_SCRIPT "/etc/qemu-ifup"
-#define DEFAULT_NETWORK_DOWN_SCRIPT "/etc/qemu-ifdown"
+#define DEFAULT_NETWORK_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifup"
+#define DEFAULT_NETWORK_DOWN_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifdown"
#define DEFAULT_BRIDGE_HELPER CONFIG_QEMU_HELPERDIR "/qemu-bridge-helper"
#define DEFAULT_BRIDGE_INTERFACE "br0"
@@ -231,4 +341,9 @@ uint32_t net_crc32_le(const uint8_t *p, int len);
.offset = vmstate_offset_macaddr(_state, _field), \
}
+static inline bool net_peer_needs_padding(NetClientState *nc)
+{
+ return nc->peer && !nc->peer->do_not_pad;
+}
+
#endif
diff --git a/include/net/queue.h b/include/net/queue.h
index 5469fdbeaa..9f2f289d77 100644
--- a/include/net/queue.h
+++ b/include/net/queue.h
@@ -24,7 +24,6 @@
#ifndef QEMU_NET_QUEUE_H
#define QEMU_NET_QUEUE_H
-#include "qemu-common.h"
typedef struct NetPacket NetPacket;
typedef struct NetQueue NetQueue;
@@ -56,6 +55,14 @@ void qemu_net_queue_append_iov(NetQueue *queue,
void qemu_del_net_queue(NetQueue *queue);
+ssize_t qemu_net_queue_receive(NetQueue *queue,
+ const uint8_t *data,
+ size_t size);
+
+ssize_t qemu_net_queue_receive_iov(NetQueue *queue,
+ const struct iovec *iov,
+ int iovcnt);
+
ssize_t qemu_net_queue_send(NetQueue *queue,
NetClientState *sender,
unsigned flags,
diff --git a/include/net/tap.h b/include/net/tap.h
index ce6f8418ac..5d585515f9 100644
--- a/include/net/tap.h
+++ b/include/net/tap.h
@@ -26,7 +26,6 @@
#ifndef QEMU_NET_TAP_H
#define QEMU_NET_TAP_H
-#include "qemu-common.h"
#include "standard-headers/linux/virtio_net.h"
int tap_enable(NetClientState *nc);
diff --git a/include/net/vhost-user.h b/include/net/vhost-user.h
index 5bcd8a6285..35bf619709 100644
--- a/include/net/vhost-user.h
+++ b/include/net/vhost-user.h
@@ -14,5 +14,6 @@
struct vhost_net;
struct vhost_net *vhost_user_get_vhost_net(NetClientState *nc);
uint64_t vhost_user_get_acked_features(NetClientState *nc);
+void vhost_user_save_acked_features(NetClientState *nc);
#endif /* VHOST_USER_H */
diff --git a/include/net/vhost-vdpa.h b/include/net/vhost-vdpa.h
new file mode 100644
index 0000000000..b81f9a6f2a
--- /dev/null
+++ b/include/net/vhost-vdpa.h
@@ -0,0 +1,21 @@
+/*
+ * vhost-vdpa.h
+ *
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * Copyright(c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef VHOST_VDPA_H
+#define VHOST_VDPA_H
+
+#define TYPE_VHOST_VDPA "vhost-vdpa"
+
+struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc);
+
+extern const int vdpa_feature_bits[];
+
+#endif /* VHOST_VDPA_H */
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 77e47398c4..c6a5361a2a 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -4,9 +4,6 @@
#include "net/net.h"
#include "hw/virtio/vhost-backend.h"
-#define VHOST_NET_INIT_FAILED \
- "vhost-net requested but could not be initialized"
-
struct vhost_net;
typedef struct vhost_net VHostNetState;
@@ -14,23 +11,33 @@ typedef struct VhostNetOptions {
VhostBackendType backend_type;
NetClientState *net_backend;
uint32_t busyloop_timeout;
+ unsigned int nvqs;
void *opaque;
} VhostNetOptions;
uint64_t vhost_net_get_max_queues(VHostNetState *net);
struct vhost_net *vhost_net_init(VhostNetOptions *options);
-int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
-void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
+int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
+ int data_queue_pairs, int cvq);
+void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
+ int data_queue_pairs, int cvq);
void vhost_net_cleanup(VHostNetState *net);
uint64_t vhost_net_get_features(VHostNetState *net, uint64_t features);
void vhost_net_ack_features(VHostNetState *net, uint64_t features);
+int vhost_net_get_config(struct vhost_net *net, uint8_t *config,
+ uint32_t config_len);
+
+int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags);
bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
int idx, bool mask);
+bool vhost_net_config_pending(VHostNetState *net);
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask);
int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
VHostNetState *get_vhost_net(NetClientState *nc);
@@ -40,4 +47,10 @@ uint64_t vhost_net_get_acked_features(VHostNetState *net);
int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu);
+void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
+ int vq_index);
+int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
+ int vq_index);
+
+void vhost_net_save_acked_features(NetClientState *nc);
#endif
diff --git a/include/qapi/clone-visitor.h b/include/qapi/clone-visitor.h
index 5b665ee38c..adf9a788e2 100644
--- a/include/qapi/clone-visitor.h
+++ b/include/qapi/clone-visitor.h
@@ -20,10 +20,10 @@
*/
typedef struct QapiCloneVisitor QapiCloneVisitor;
-void *qapi_clone(const void *src, void (*visit_type)(Visitor *, const char *,
+void *qapi_clone(const void *src, bool (*visit_type)(Visitor *, const char *,
void **, Error **));
void qapi_clone_members(void *dst, const void *src, size_t sz,
- void (*visit_type_members)(Visitor *, void *,
+ bool (*visit_type_members)(Visitor *, void *,
Error **));
/*
@@ -34,7 +34,7 @@ void qapi_clone_members(void *dst, const void *src, size_t sz,
*/
#define QAPI_CLONE(type, src) \
((type *)qapi_clone(src, \
- (void (*)(Visitor *, const char *, void**, \
+ (bool (*)(Visitor *, const char *, void **, \
Error **))visit_type_ ## type))
/*
@@ -45,7 +45,7 @@ void qapi_clone_members(void *dst, const void *src, size_t sz,
*/
#define QAPI_CLONE_MEMBERS(type, dst, src) \
qapi_clone_members(dst, src, sizeof(type), \
- (void (*)(Visitor *, void *, \
+ (bool (*)(Visitor *, void *, \
Error **))visit_type_ ## type ## _members)
#endif
diff --git a/include/qapi/compat-policy.h b/include/qapi/compat-policy.h
new file mode 100644
index 0000000000..8b7b25c0b5
--- /dev/null
+++ b/include/qapi/compat-policy.h
@@ -0,0 +1,45 @@
+/*
+ * Policy for handling "funny" management interfaces
+ *
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef QAPI_COMPAT_POLICY_H
+#define QAPI_COMPAT_POLICY_H
+
+#include "qapi/error.h"
+#include "qapi/qapi-types-compat.h"
+
+extern CompatPolicy compat_policy;
+
+bool compat_policy_input_ok(unsigned special_features,
+ const CompatPolicy *policy,
+ ErrorClass error_class,
+ const char *kind, const char *name,
+ Error **errp);
+
+/*
+ * Create a QObject input visitor for @obj for use with QMP
+ *
+ * This is like qobject_input_visitor_new(), except it obeys the
+ * policy for handling deprecated management interfaces set with
+ * -compat.
+ */
+Visitor *qobject_input_visitor_new_qmp(QObject *obj);
+
+/*
+ * Create a QObject output visitor for @obj for use with QMP
+ *
+ * This is like qobject_output_visitor_new(), except it obeys the
+ * policy for handling deprecated management interfaces set with
+ * -compat.
+ */
+Visitor *qobject_output_visitor_new_qmp(QObject **result);
+
+#endif
diff --git a/include/qapi/error.h b/include/qapi/error.h
index 51b63dd4b5..71f8fb2c50 100644
--- a/include/qapi/error.h
+++ b/include/qapi/error.h
@@ -15,18 +15,55 @@
/*
* Error reporting system loosely patterned after Glib's GError.
*
+ * = Rules =
+ *
+ * - Functions that use Error to report errors have an Error **errp
+ * parameter. It should be the last parameter, except for functions
+ * taking variable arguments.
+ *
+ * - You may pass NULL to not receive the error, &error_abort to abort
+ * on error, &error_fatal to exit(1) on error, or a pointer to a
+ * variable containing NULL to receive the error.
+ *
+ * - Separation of concerns: the function is responsible for detecting
+ * errors and failing cleanly; handling the error is its caller's
+ * job. Since the value of @errp is about handling the error, the
+ * function should not examine it.
+ *
+ * - The function may pass @errp to functions it calls to pass on
+ * their errors to its caller. If it dereferences @errp to check
+ * for errors, it must use ERRP_GUARD().
+ *
+ * - On success, the function should not touch *errp. On failure, it
+ * should set a new error, e.g. with error_setg(errp, ...), or
+ * propagate an existing one, e.g. with error_propagate(errp, ...).
+ *
+ * - Whenever practical, also return a value that indicates success /
+ * failure. This can make the error checking more concise, and can
+ * avoid useless error object creation and destruction. Note that
+ * we still have many functions returning void. We recommend
+ * • bool-valued functions return true on success / false on failure,
+ * • pointer-valued functions return non-null / null pointer, and
+ * • integer-valued functions return non-negative / negative.
+ *
+ * = Creating errors =
+ *
* Create an error:
- * error_setg(&err, "situation normal, all fouled up");
+ * error_setg(errp, "situation normal, all fouled up");
+ * where @errp points to the location to receive the error.
*
* Create an error and add additional explanation:
- * error_setg(&err, "invalid quark");
- * error_append_hint(&err, "Valid quarks are up, down, strange, "
+ * error_setg(errp, "invalid quark");
+ * error_append_hint(errp, "Valid quarks are up, down, strange, "
* "charm, top, bottom.\n");
+ * This may require use of ERRP_GUARD(); more on that below.
*
* Do *not* contract this to
- * error_setg(&err, "invalid quark\n"
+ * error_setg(errp, "invalid quark\n" // WRONG!
* "Valid quarks are up, down, strange, charm, top, bottom.");
*
+ * = Reporting and destroying errors =
+ *
* Report an error to the current monitor if we have one, else stderr:
* error_report_err(err);
* This frees the error object.
@@ -40,6 +77,30 @@
* error_free(err);
* Note that this loses hints added with error_append_hint().
*
+ * Call a function ignoring errors:
+ * foo(arg, NULL);
+ * This is more concise than
+ * Error *err = NULL;
+ * foo(arg, &err);
+ * error_free(err); // don't do this
+ *
+ * Call a function aborting on errors:
+ * foo(arg, &error_abort);
+ * This is more concise and fails more nicely than
+ * Error *err = NULL;
+ * foo(arg, &err);
+ * assert(!err); // don't do this
+ *
+ * Call a function treating errors as fatal:
+ * foo(arg, &error_fatal);
+ * This is more concise than
+ * Error *err = NULL;
+ * foo(arg, &err);
+ * if (err) { // don't do this
+ * error_report_err(err);
+ * exit(1);
+ * }
+ *
* Handle an error without reporting it (just for completeness):
* error_free(err);
*
@@ -47,57 +108,73 @@
* reporting it (primarily useful in testsuites):
* error_free_or_abort(&err);
*
- * Pass an existing error to the caller:
- * error_propagate(errp, err);
- * where Error **errp is a parameter, by convention the last one.
+ * = Passing errors around =
*
- * Pass an existing error to the caller with the message modified:
- * error_propagate_prepend(errp, err);
- *
- * Avoid
- * error_propagate(errp, err);
- * error_prepend(errp, "Could not frobnicate '%s': ", name);
- * because this fails to prepend when @errp is &error_fatal.
+ * Errors get passed to the caller through the conventional @errp
+ * parameter.
*
* Create a new error and pass it to the caller:
* error_setg(errp, "situation normal, all fouled up");
*
- * Call a function and receive an error from it:
- * Error *err = NULL;
- * foo(arg, &err);
- * if (err) {
+ * Call a function, receive an error from it, and pass it to the caller
+ * - when the function returns a value that indicates failure, say
+ * false:
+ * if (!foo(arg, errp)) {
* handle the error...
* }
+ * - when it does not, say because it is a void function:
+ * ERRP_GUARD();
+ * foo(arg, errp);
+ * if (*errp) {
+ * handle the error...
+ * }
+ * More on ERRP_GUARD() below.
*
- * Call a function ignoring errors:
- * foo(arg, NULL);
- *
- * Call a function aborting on errors:
- * foo(arg, &error_abort);
- *
- * Call a function treating errors as fatal:
- * foo(arg, &error_fatal);
- *
- * Receive an error and pass it on to the caller:
+ * Code predating ERRP_GUARD() still exists, and looks like this:
* Error *err = NULL;
* foo(arg, &err);
* if (err) {
* handle the error...
- * error_propagate(errp, err);
+ * error_propagate(errp, err); // deprecated
* }
- * where Error **errp is a parameter, by convention the last one.
- *
- * Do *not* "optimize" this to
+ * Avoid in new code. Do *not* "optimize" it to
* foo(arg, errp);
* if (*errp) { // WRONG!
* handle the error...
* }
- * because errp may be NULL!
+ * because errp may be NULL without the ERRP_GUARD() guard.
*
* But when all you do with the error is pass it on, please use
* foo(arg, errp);
* for readability.
*
+ * Receive an error, and handle it locally
+ * - when the function returns a value that indicates failure, say
+ * false:
+ * Error *err = NULL;
+ * if (!foo(arg, &err)) {
+ * handle the error...
+ * }
+ * - when it does not, say because it is a void function:
+ * Error *err = NULL;
+ * foo(arg, &err);
+ * if (err) {
+ * handle the error...
+ * }
+ *
+ * Pass an existing error to the caller:
+ * error_propagate(errp, err);
+ * This is rarely needed. When @err is a local variable, use of
+ * ERRP_GUARD() commonly results in more readable code.
+ *
+ * Pass an existing error to the caller with the message modified:
+ * error_propagate_prepend(errp, err,
+ * "Could not frobnicate '%s': ", name);
+ * This is more concise than
+ * error_propagate(errp, err); // don't do this
+ * error_prepend(errp, "Could not frobnicate '%s': ", name);
+ * and works even when @errp is &error_fatal.
+ *
* Receive and accumulate multiple errors (first one wins):
* Error *err = NULL, *local_err = NULL;
* foo(arg, &err);
@@ -108,18 +185,94 @@
* }
*
* Do *not* "optimize" this to
+ * Error *err = NULL;
* foo(arg, &err);
* bar(arg, &err); // WRONG!
* if (err) {
* handle the error...
* }
* because this may pass a non-null err to bar().
+ *
+ * Likewise, do *not*
+ * Error *err = NULL;
+ * if (cond1) {
+ * error_setg(&err, ...);
+ * }
+ * if (cond2) {
+ * error_setg(&err, ...); // WRONG!
+ * }
+ * because this may pass a non-null err to error_setg().
+ *
+ * = Why, when and how to use ERRP_GUARD() =
+ *
+ * Without ERRP_GUARD(), use of the @errp parameter is restricted:
+ * - It must not be dereferenced, because it may be null.
+ * - It should not be passed to error_prepend(), error_vprepend(), or
+ * error_append_hint(), because that doesn't work with &error_fatal.
+ * ERRP_GUARD() lifts these restrictions.
+ *
+ * To use ERRP_GUARD(), add it right at the beginning of the function.
+ * @errp can then be used without worrying about the argument being
+ * NULL or &error_fatal.
+ *
+ * Using it when it's not needed is safe, but please avoid cluttering
+ * the source with useless code.
+ *
+ * = Converting to ERRP_GUARD() =
+ *
+ * To convert a function to use ERRP_GUARD():
+ *
+ * 0. If the Error ** parameter is not named @errp, rename it to
+ * @errp.
+ *
+ * 1. Add an ERRP_GUARD() invocation, by convention right at the
+ * beginning of the function. This makes @errp safe to use.
+ *
+ * 2. Replace &err by errp, and err by *errp. Delete local variable
+ * @err.
+ *
+ * 3. Delete error_propagate(errp, *errp), replace
+ * error_propagate_prepend(errp, *errp, ...) by error_prepend(errp, ...)
+ *
+ * 4. Ensure @errp is valid at return: when you destroy *errp, set
+ * *errp = NULL.
+ *
+ * Example:
+ *
+ * bool fn(..., Error **errp)
+ * {
+ * Error *err = NULL;
+ *
+ * foo(arg, &err);
+ * if (err) {
+ * handle the error...
+ * error_propagate(errp, err);
+ * return false;
+ * }
+ * ...
+ * }
+ *
+ * becomes
+ *
+ * bool fn(..., Error **errp)
+ * {
+ * ERRP_GUARD();
+ *
+ * foo(arg, errp);
+ * if (*errp) {
+ * handle the error...
+ * return false;
+ * }
+ * ...
+ * }
+ *
+ * For mass-conversion, use scripts/coccinelle/errp-guard.cocci.
*/
#ifndef ERROR_H
#define ERROR_H
-#include "qapi/qapi-types-common.h"
+#include "qapi/qapi-types-error.h"
/*
* Overall category of an error.
@@ -167,7 +320,7 @@ ErrorClass error_get_class(const Error *err);
void error_setg_internal(Error **errp,
const char *src, int line, const char *func,
const char *fmt, ...)
- GCC_FMT_ATTR(5, 6);
+ G_GNUC_PRINTF(5, 6);
/*
* Just like error_setg(), with @os_error info added to the message.
@@ -183,7 +336,7 @@ void error_setg_internal(Error **errp,
void error_setg_errno_internal(Error **errp,
const char *fname, int line, const char *func,
int os_error, const char *fmt, ...)
- GCC_FMT_ATTR(6, 7);
+ G_GNUC_PRINTF(6, 7);
#ifdef _WIN32
/*
@@ -197,7 +350,7 @@ void error_setg_errno_internal(Error **errp,
void error_setg_win32_internal(Error **errp,
const char *src, int line, const char *func,
int win32_err, const char *fmt, ...)
- GCC_FMT_ATTR(6, 7);
+ G_GNUC_PRINTF(6, 7);
#endif
/*
@@ -214,6 +367,7 @@ void error_setg_win32_internal(Error **errp,
* the error object.
* Else, move the error object from @local_err to *@dst_errp.
* On return, @local_err is invalid.
+ * Please use ERRP_GUARD() instead when possible.
* Please don't error_propagate(&error_fatal, ...), use
* error_report_err() and exit(), because that's more obvious.
*/
@@ -225,22 +379,25 @@ void error_propagate(Error **dst_errp, Error *local_err);
* Behaves like
* error_prepend(&local_err, fmt, ...);
* error_propagate(dst_errp, local_err);
+ * Please use ERRP_GUARD() and error_prepend() instead when possible.
*/
void error_propagate_prepend(Error **dst_errp, Error *local_err,
- const char *fmt, ...);
+ const char *fmt, ...)
+ G_GNUC_PRINTF(3, 4);
/*
* Prepend some text to @errp's human-readable error message.
* The text is made by formatting @fmt, @ap like vprintf().
*/
-void error_vprepend(Error **errp, const char *fmt, va_list ap);
+void error_vprepend(Error *const *errp, const char *fmt, va_list ap)
+ G_GNUC_PRINTF(2, 0);
/*
* Prepend some text to @errp's human-readable error message.
* The text is made by formatting @fmt, ... like printf().
*/
-void error_prepend(Error **errp, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+void error_prepend(Error *const *errp, const char *fmt, ...)
+ G_GNUC_PRINTF(2, 3);
/*
* Append a printf-style human-readable explanation to an existing error.
@@ -256,8 +413,8 @@ void error_prepend(Error **errp, const char *fmt, ...)
* May be called multiple times. The resulting hint should end with a
* newline.
*/
-void error_append_hint(Error **errp, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+void error_append_hint(Error *const *errp, const char *fmt, ...)
+ G_GNUC_PRINTF(2, 3);
/*
* Convenience function to report open() failure.
@@ -301,13 +458,13 @@ void error_report_err(Error *err);
* Convenience function to error_prepend(), warn_report() and free @err.
*/
void warn_reportf_err(Error *err, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
/*
* Convenience function to error_prepend(), error_report() and free @err.
*/
void error_reportf_err(Error *err, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
/*
* Just like error_setg(), except you get to specify the error class.
@@ -320,7 +477,53 @@ void error_reportf_err(Error *err, const char *fmt, ...)
void error_set_internal(Error **errp,
const char *src, int line, const char *func,
ErrorClass err_class, const char *fmt, ...)
- GCC_FMT_ATTR(6, 7);
+ G_GNUC_PRINTF(6, 7);
+
+/*
+ * Make @errp parameter easier to use regardless of argument value
+ *
+ * This macro is for use right at the beginning of a function that
+ * takes an Error **errp parameter to pass errors to its caller. The
+ * parameter must be named @errp.
+ *
+ * It must be used when the function dereferences @errp or passes
+ * @errp to error_prepend(), error_vprepend(), or error_append_hint().
+ * It is safe to use even when it's not needed, but please avoid
+ * cluttering the source with useless code.
+ *
+ * If @errp is NULL or &error_fatal, rewrite it to point to a local
+ * Error variable, which will be automatically propagated to the
+ * original @errp on function exit.
+ *
+ * Note: &error_abort is not rewritten, because that would move the
+ * abort from the place where the error is created to the place where
+ * it's propagated.
+ */
+#define ERRP_GUARD() \
+ g_auto(ErrorPropagator) _auto_errp_prop = {.errp = errp}; \
+ do { \
+ if (!errp || errp == &error_fatal) { \
+ errp = &_auto_errp_prop.local_err; \
+ } \
+ } while (0)
+
+typedef struct ErrorPropagator {
+ Error *local_err;
+ Error **errp;
+} ErrorPropagator;
+
+static inline void error_propagator_cleanup(ErrorPropagator *prop)
+{
+ error_propagate(prop->errp, prop->local_err);
+}
+
+G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(ErrorPropagator, error_propagator_cleanup);
+
+/*
+ * Special error destination to warn on error.
+ * See error_setg() and error_propagate() for details.
+ */
+extern Error *error_warn;
/*
* Special error destination to abort on error.
diff --git a/include/qapi/forward-visitor.h b/include/qapi/forward-visitor.h
new file mode 100644
index 0000000000..50fb3e9d50
--- /dev/null
+++ b/include/qapi/forward-visitor.h
@@ -0,0 +1,27 @@
+/*
+ * Forwarding visitor
+ *
+ * Copyright Red Hat, Inc. 2021
+ *
+ * Author: Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef FORWARD_VISITOR_H
+#define FORWARD_VISITOR_H
+
+#include "qapi/visitor.h"
+
+typedef struct ForwardFieldVisitor ForwardFieldVisitor;
+
+/*
+ * The forwarding visitor only expects a single name, @from, to be passed for
+ * toplevel fields. It is converted to @to and forwarded to the @target visitor.
+ * Calls within a struct are forwarded without changing the name.
+ */
+Visitor *visitor_forward_field(Visitor *target, const char *from, const char *to);
+
+#endif
diff --git a/include/qapi/qmp-event.h b/include/qapi/qmp-event.h
index 23e588ccf8..b60f1d3a89 100644
--- a/include/qapi/qmp-event.h
+++ b/include/qapi/qmp-event.h
@@ -14,11 +14,5 @@
#ifndef QMP_EVENT_H
#define QMP_EVENT_H
-typedef void (*QMPEventFuncEmit)(unsigned event, QDict *dict);
-
-void qmp_event_set_func_emit(QMPEventFuncEmit emit);
-
-QMPEventFuncEmit qmp_event_get_func_emit(void);
-
QDict *qmp_event_build_dict(const char *event_name);
#endif
diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h
index 68a528a9aa..f2e956813a 100644
--- a/include/qapi/qmp/dispatch.h
+++ b/include/qapi/qmp/dispatch.h
@@ -14,47 +14,54 @@
#ifndef QAPI_QMP_DISPATCH_H
#define QAPI_QMP_DISPATCH_H
+#include "monitor/monitor.h"
#include "qemu/queue.h"
typedef void (QmpCommandFunc)(QDict *, QObject **, Error **);
typedef enum QmpCommandOptions
{
- QCO_NO_OPTIONS = 0x0,
QCO_NO_SUCCESS_RESP = (1U << 0),
QCO_ALLOW_OOB = (1U << 1),
QCO_ALLOW_PRECONFIG = (1U << 2),
+ QCO_COROUTINE = (1U << 3),
} QmpCommandOptions;
typedef struct QmpCommand
{
const char *name;
+ /* Runs in coroutine context if QCO_COROUTINE is set */
QmpCommandFunc *fn;
QmpCommandOptions options;
+ unsigned special_features;
QTAILQ_ENTRY(QmpCommand) node;
bool enabled;
+ const char *disable_reason;
} QmpCommand;
typedef QTAILQ_HEAD(QmpCommandList, QmpCommand) QmpCommandList;
void qmp_register_command(QmpCommandList *cmds, const char *name,
- QmpCommandFunc *fn, QmpCommandOptions options);
-void qmp_unregister_command(QmpCommandList *cmds, const char *name);
-QmpCommand *qmp_find_command(QmpCommandList *cmds, const char *name);
-void qmp_disable_command(QmpCommandList *cmds, const char *name);
+ QmpCommandFunc *fn, QmpCommandOptions options,
+ unsigned special_features);
+const QmpCommand *qmp_find_command(const QmpCommandList *cmds,
+ const char *name);
+void qmp_disable_command(QmpCommandList *cmds, const char *name,
+ const char *err_msg);
void qmp_enable_command(QmpCommandList *cmds, const char *name);
bool qmp_command_is_enabled(const QmpCommand *cmd);
+bool qmp_command_available(const QmpCommand *cmd, Error **errp);
const char *qmp_command_name(const QmpCommand *cmd);
bool qmp_has_success_response(const QmpCommand *cmd);
QDict *qmp_error_response(Error *err);
-QDict *qmp_dispatch(QmpCommandList *cmds, QObject *request,
- bool allow_oob);
+QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request,
+ bool allow_oob, Monitor *cur_mon);
bool qmp_is_oob(const QDict *dict);
-typedef void (*qmp_cmd_callback_fn)(QmpCommand *cmd, void *opaque);
+typedef void (*qmp_cmd_callback_fn)(const QmpCommand *cmd, void *opaque);
-void qmp_for_each_command(QmpCommandList *cmds, qmp_cmd_callback_fn fn,
+void qmp_for_each_command(const QmpCommandList *cmds, qmp_cmd_callback_fn fn,
void *opaque);
#endif
diff --git a/include/qapi/qmp/json-writer.h b/include/qapi/qmp/json-writer.h
new file mode 100644
index 0000000000..b70ba64077
--- /dev/null
+++ b/include/qapi/qmp/json-writer.h
@@ -0,0 +1,35 @@
+/*
+ * JSON Writer
+ *
+ * Copyright (c) 2020 Red Hat Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef JSON_WRITER_H
+#define JSON_WRITER_H
+
+JSONWriter *json_writer_new(bool pretty);
+const char *json_writer_get(JSONWriter *);
+GString *json_writer_get_and_free(JSONWriter *);
+void json_writer_free(JSONWriter *);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(JSONWriter, json_writer_free)
+
+void json_writer_start_object(JSONWriter *, const char *name);
+void json_writer_end_object(JSONWriter *);
+void json_writer_start_array(JSONWriter *, const char *name);
+void json_writer_end_array(JSONWriter *);
+void json_writer_bool(JSONWriter *, const char *name, bool val);
+void json_writer_null(JSONWriter *, const char *name);
+void json_writer_int64(JSONWriter *, const char *name, int64_t val);
+void json_writer_uint64(JSONWriter *, const char *name, uint64_t val);
+void json_writer_double(JSONWriter *, const char *name, double val);
+void json_writer_str(JSONWriter *, const char *name, const char *str);
+
+#endif
diff --git a/include/qapi/qmp/qbool.h b/include/qapi/qmp/qbool.h
index 5f61e38e64..0d09726939 100644
--- a/include/qapi/qmp/qbool.h
+++ b/include/qapi/qmp/qbool.h
@@ -21,9 +21,11 @@ struct QBool {
bool value;
};
+void qbool_unref(QBool *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QBool, qbool_unref)
+
QBool *qbool_from_bool(bool value);
bool qbool_get_bool(const QBool *qb);
-bool qbool_is_equal(const QObject *x, const QObject *y);
-void qbool_destroy_obj(QObject *obj);
#endif /* QBOOL_H */
diff --git a/include/qapi/qmp/qdict.h b/include/qapi/qmp/qdict.h
index 7f3ec10a10..82e90fc072 100644
--- a/include/qapi/qmp/qdict.h
+++ b/include/qapi/qmp/qdict.h
@@ -30,6 +30,10 @@ struct QDict {
QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX];
};
+void qdict_unref(QDict *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QDict, qdict_unref)
+
/* Object API */
QDict *qdict_new(void);
const char *qdict_entry_key(const QDictEntry *entry);
@@ -39,13 +43,8 @@ void qdict_put_obj(QDict *qdict, const char *key, QObject *value);
void qdict_del(QDict *qdict, const char *key);
int qdict_haskey(const QDict *qdict, const char *key);
QObject *qdict_get(const QDict *qdict, const char *key);
-bool qdict_is_equal(const QObject *x, const QObject *y);
-void qdict_iter(const QDict *qdict,
- void (*iter)(const char *key, QObject *obj, void *opaque),
- void *opaque);
const QDictEntry *qdict_first(const QDict *qdict);
const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry);
-void qdict_destroy_obj(QObject *obj);
/* Helper to qdict_put_obj(), accepts any object */
#define qdict_put(qdict, key, obj) \
diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h
index 7c76e24aa7..00b18e9082 100644
--- a/include/qapi/qmp/qerror.h
+++ b/include/qapi/qmp/qerror.h
@@ -16,78 +16,22 @@
* These macros will go away, please don't use in new code, and do not
* add new ones!
*/
-#define QERR_BASE_NOT_FOUND \
- "Base '%s' not found"
-
-#define QERR_BUS_NO_HOTPLUG \
- "Bus '%s' does not support hotplugging"
-
-#define QERR_DEVICE_HAS_NO_MEDIUM \
- "Device '%s' has no medium"
-
-#define QERR_DEVICE_INIT_FAILED \
- "Device '%s' could not be initialized"
-
-#define QERR_DEVICE_IN_USE \
- "Device '%s' is in use"
-
-#define QERR_DEVICE_NO_HOTPLUG \
- "Device '%s' does not support hotplugging"
-
-#define QERR_FD_NOT_FOUND \
- "File descriptor named '%s' not found"
-
-#define QERR_FD_NOT_SUPPLIED \
- "No file descriptor supplied via SCM_RIGHTS"
-
-#define QERR_FEATURE_DISABLED \
- "The feature '%s' is not enabled"
-
-#define QERR_INVALID_BLOCK_FORMAT \
- "Invalid block format '%s'"
-
-#define QERR_INVALID_PARAMETER \
- "Invalid parameter '%s'"
-
-#define QERR_INVALID_PARAMETER_TYPE \
- "Invalid parameter type for '%s', expected: %s"
#define QERR_INVALID_PARAMETER_VALUE \
"Parameter '%s' expects %s"
-#define QERR_INVALID_PASSWORD \
- "Password incorrect"
-
#define QERR_IO_ERROR \
"An IO error has occurred"
-#define QERR_MIGRATION_ACTIVE \
- "There's a migration process in progress"
-
#define QERR_MISSING_PARAMETER \
"Parameter '%s' is missing"
-#define QERR_PERMISSION_DENIED \
- "Insufficient permission to perform this operation"
-
-#define QERR_PROPERTY_VALUE_BAD \
- "Property '%s.%s' doesn't take value '%s'"
-
#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \
"Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")"
#define QERR_QGA_COMMAND_FAILED \
"Guest agent command failed, error was '%s'"
-#define QERR_REPLAY_NOT_SUPPORTED \
- "Record/replay feature is not supported for '%s'"
-
-#define QERR_SET_PASSWD_FAILED \
- "Could not set password"
-
-#define QERR_UNDEFINED_ERROR \
- "An undefined error has occurred"
-
#define QERR_UNSUPPORTED \
"this feature or command is not currently supported"
diff --git a/include/qapi/qmp/qjson.h b/include/qapi/qmp/qjson.h
index 5ebbe5a118..7bd8d2de1b 100644
--- a/include/qapi/qmp/qjson.h
+++ b/include/qapi/qmp/qjson.h
@@ -17,15 +17,15 @@
QObject *qobject_from_json(const char *string, Error **errp);
QObject *qobject_from_vjsonf_nofail(const char *string, va_list ap)
- GCC_FMT_ATTR(1, 0);
+ G_GNUC_PRINTF(1, 0);
QObject *qobject_from_jsonf_nofail(const char *string, ...)
- GCC_FMT_ATTR(1, 2);
+ G_GNUC_PRINTF(1, 2);
QDict *qdict_from_vjsonf_nofail(const char *string, va_list ap)
- GCC_FMT_ATTR(1, 0);
+ G_GNUC_PRINTF(1, 0);
QDict *qdict_from_jsonf_nofail(const char *string, ...)
- GCC_FMT_ATTR(1, 2);
+ G_GNUC_PRINTF(1, 2);
-QString *qobject_to_json(const QObject *obj);
-QString *qobject_to_json_pretty(const QObject *obj);
+GString *qobject_to_json(const QObject *obj);
+GString *qobject_to_json_pretty(const QObject *obj, bool pretty);
#endif /* QJSON_H */
diff --git a/include/qapi/qmp/qlist.h b/include/qapi/qmp/qlist.h
index 8d2c32ca28..e4e985d435 100644
--- a/include/qapi/qmp/qlist.h
+++ b/include/qapi/qmp/qlist.h
@@ -26,6 +26,10 @@ struct QList {
QTAILQ_HEAD(,QListEntry) head;
};
+void qlist_unref(QList *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QList, qlist_unref)
+
#define qlist_append(qlist, obj) \
qlist_append_obj(qlist, QOBJECT(obj))
@@ -34,10 +38,10 @@ void qlist_append_int(QList *qlist, int64_t value);
void qlist_append_null(QList *qlist);
void qlist_append_str(QList *qlist, const char *value);
-#define QLIST_FOREACH_ENTRY(qlist, var) \
- for ((var) = ((qlist)->head.tqh_first); \
- (var); \
- (var) = ((var)->next.tqe_next))
+#define QLIST_FOREACH_ENTRY(qlist, var) \
+ for ((var) = QTAILQ_FIRST(&(qlist)->head); \
+ (var); \
+ (var) = QTAILQ_NEXT((var), next))
static inline QObject *qlist_entry_obj(const QListEntry *entry)
{
@@ -47,14 +51,10 @@ static inline QObject *qlist_entry_obj(const QListEntry *entry)
QList *qlist_new(void);
QList *qlist_copy(QList *src);
void qlist_append_obj(QList *qlist, QObject *obj);
-void qlist_iter(const QList *qlist,
- void (*iter)(QObject *obj, void *opaque), void *opaque);
QObject *qlist_pop(QList *qlist);
QObject *qlist_peek(QList *qlist);
int qlist_empty(const QList *qlist);
size_t qlist_size(const QList *qlist);
-bool qlist_is_equal(const QObject *x, const QObject *y);
-void qlist_destroy_obj(QObject *obj);
static inline const QListEntry *qlist_first(const QList *qlist)
{
diff --git a/include/qapi/qmp/qnull.h b/include/qapi/qmp/qnull.h
index c1426882c5..7feb7c7d83 100644
--- a/include/qapi/qmp/qnull.h
+++ b/include/qapi/qmp/qnull.h
@@ -26,6 +26,8 @@ static inline QNull *qnull(void)
return qobject_ref(&qnull_);
}
-bool qnull_is_equal(const QObject *x, const QObject *y);
+void qnull_unref(QNull *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNull, qnull_unref)
#endif /* QNULL_H */
diff --git a/include/qapi/qmp/qnum.h b/include/qapi/qmp/qnum.h
index bbae0a5ec8..e86788dd2e 100644
--- a/include/qapi/qmp/qnum.h
+++ b/include/qapi/qmp/qnum.h
@@ -54,6 +54,10 @@ struct QNum {
} u;
};
+void qnum_unref(QNum *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNum, qnum_unref)
+
QNum *qnum_from_int(int64_t value);
QNum *qnum_from_uint(uint64_t value);
QNum *qnum_from_double(double value);
@@ -68,7 +72,4 @@ double qnum_get_double(QNum *qn);
char *qnum_to_string(QNum *qn);
-bool qnum_is_equal(const QObject *x, const QObject *y);
-void qnum_destroy_obj(QObject *obj);
-
#endif /* QNUM_H */
diff --git a/include/qapi/qmp/qobject.h b/include/qapi/qmp/qobject.h
index fcfd549220..89b97d88bc 100644
--- a/include/qapi/qmp/qobject.h
+++ b/include/qapi/qmp/qobject.h
@@ -45,10 +45,16 @@ struct QObject {
struct QObjectBase_ base;
};
-#define QOBJECT(obj) ({ \
+/*
+ * Preprocessor sorcery ahead: use a different identifier for the
+ * local variable in each expansion, so we can nest macro calls
+ * without shadowing variables.
+ */
+#define QOBJECT_INTERNAL(obj, _obj) ({ \
typeof(obj) _obj = (obj); \
- _obj ? container_of(&(_obj)->base, QObject, base) : NULL; \
+ _obj ? container_of(&_obj->base, QObject, base) : NULL; \
})
+#define QOBJECT(obj) QOBJECT_INTERNAL((obj), MAKE_IDENTFIER(_obj))
/* Required for qobject_to() */
#define QTYPE_CAST_TO_QNull QTYPE_QNULL
@@ -64,14 +70,6 @@ QEMU_BUILD_BUG_MSG(QTYPE__MAX != 7,
#define qobject_to(type, obj) \
((type *)qobject_check_type(obj, glue(QTYPE_CAST_TO_, type)))
-/* Initialize an object to default values */
-static inline void qobject_init(QObject *obj, QType type)
-{
- assert(QTYPE_NONE < type && type < QTYPE__MAX);
- obj->base.refcnt = 1;
- obj->base.type = type;
-}
-
static inline void qobject_ref_impl(QObject *obj)
{
if (obj) {
@@ -90,6 +88,7 @@ bool qobject_is_equal(const QObject *x, const QObject *y);
/**
* qobject_destroy(): Free resources used by the object
+ * For use via qobject_unref() only!
*/
void qobject_destroy(QObject *obj);
diff --git a/include/qapi/qmp/qstring.h b/include/qapi/qmp/qstring.h
index 3e83e3a95d..318d815d6a 100644
--- a/include/qapi/qmp/qstring.h
+++ b/include/qapi/qmp/qstring.h
@@ -17,22 +17,17 @@
struct QString {
struct QObjectBase_ base;
- char *string;
- size_t length;
- size_t capacity;
+ const char *string;
};
+void qstring_unref(QString *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QString, qstring_unref)
+
QString *qstring_new(void);
QString *qstring_from_str(const char *str);
QString *qstring_from_substr(const char *str, size_t start, size_t end);
-size_t qstring_get_length(const QString *qstring);
+QString *qstring_from_gstring(GString *gstr);
const char *qstring_get_str(const QString *qstring);
-const char *qstring_get_try_str(const QString *qstring);
-const char *qobject_get_try_str(const QObject *qstring);
-void qstring_append_int(QString *qstring, int64_t value);
-void qstring_append(QString *qstring, const char *str);
-void qstring_append_chr(QString *qstring, int c);
-bool qstring_is_equal(const QObject *x, const QObject *y);
-void qstring_destroy_obj(QObject *obj);
#endif /* QSTRING_H */
diff --git a/include/qapi/string-output-visitor.h b/include/qapi/string-output-visitor.h
index 268dfe9986..b1ee473b30 100644
--- a/include/qapi/string-output-visitor.h
+++ b/include/qapi/string-output-visitor.h
@@ -26,9 +26,9 @@ typedef struct StringOutputVisitor StringOutputVisitor;
* If everything else succeeds, pass @result to visit_complete() to
* collect the result of the visit.
*
- * The string output visitor does not implement support for visiting
- * QAPI structs, alternates, null, or arbitrary QTypes. It also
- * requires a non-null list argument to visit_start_list().
+ * The string output visitor does not implement support for alternates, null,
+ * or arbitrary QTypes. Struct fields are not shown. It also requires a
+ * non-null list argument to visit_start_list().
*/
Visitor *string_output_visitor_new(bool human, char **result);
diff --git a/include/qapi/type-helpers.h b/include/qapi/type-helpers.h
new file mode 100644
index 0000000000..fc8352cdec
--- /dev/null
+++ b/include/qapi/type-helpers.h
@@ -0,0 +1,22 @@
+/*
+ * QAPI common helper functions
+ *
+ * This file provides helper functions related to types defined
+ * in the QAPI schema.
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include "qapi/qapi-types-common.h"
+
+HumanReadableText *human_readable_text_from_str(GString *str);
+
+/*
+ * Produce and return a NULL-terminated array of strings from @list.
+ * The result is g_malloc()'d and all strings are g_strdup()'d. It
+ * can be freed with g_strfreev(), or by g_auto(GStrv) automatic
+ * cleanup.
+ */
+char **strv_from_str_list(const strList *list);
diff --git a/include/qapi/util.h b/include/qapi/util.h
index a7c3c64148..20dfea8a54 100644
--- a/include/qapi/util.h
+++ b/include/qapi/util.h
@@ -11,15 +11,62 @@
#ifndef QAPI_UTIL_H
#define QAPI_UTIL_H
+typedef enum {
+ QAPI_DEPRECATED,
+ QAPI_UNSTABLE,
+} QapiSpecialFeature;
+
typedef struct QEnumLookup {
const char *const *array;
- int size;
+ const unsigned char *const special_features;
+ const int size;
} QEnumLookup;
const char *qapi_enum_lookup(const QEnumLookup *lookup, int val);
int qapi_enum_parse(const QEnumLookup *lookup, const char *buf,
int def, Error **errp);
+bool qapi_bool_parse(const char *name, const char *value, bool *obj,
+ Error **errp);
int parse_qapi_name(const char *name, bool complete);
+/*
+ * For any GenericList @list, insert @element at the front.
+ *
+ * Note that this macro evaluates @element exactly once, so it is safe
+ * to have side-effects with that argument.
+ */
+#define QAPI_LIST_PREPEND(list, element) do { \
+ typeof(list) _tmp = g_malloc(sizeof(*(list))); \
+ _tmp->value = (element); \
+ _tmp->next = (list); \
+ (list) = _tmp; \
+} while (0)
+
+/*
+ * For any pointer to a GenericList @tail (usually the 'next' member of a
+ * list element), insert @element at the back and update the tail.
+ *
+ * Note that this macro evaluates @element exactly once, so it is safe
+ * to have side-effects with that argument.
+ */
+#define QAPI_LIST_APPEND(tail, element) do { \
+ *(tail) = g_malloc0(sizeof(**(tail))); \
+ (*(tail))->value = (element); \
+ (tail) = &(*(tail))->next; \
+} while (0)
+
+/*
+ * For any GenericList @list, return its length.
+ */
+#define QAPI_LIST_LENGTH(list) \
+ ({ \
+ size_t _len = 0; \
+ typeof(list) _tail; \
+ for (_tail = list; _tail != NULL; _tail = _tail->next) { \
+ _len++; \
+ } \
+ _len; \
+ })
+
#endif
diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h
index 8ccb3b6c20..2badec5ba4 100644
--- a/include/qapi/visitor-impl.h
+++ b/include/qapi/visitor-impl.h
@@ -43,76 +43,90 @@ typedef enum VisitorType {
struct Visitor
{
+ /*
+ * Only input visitors may fail!
+ */
+
/* Must be set to visit structs */
- void (*start_struct)(Visitor *v, const char *name, void **obj,
+ bool (*start_struct)(Visitor *v, const char *name, void **obj,
size_t size, Error **errp);
/* Optional; intended for input visitors */
- void (*check_struct)(Visitor *v, Error **errp);
+ bool (*check_struct)(Visitor *v, Error **errp);
/* Must be set to visit structs */
void (*end_struct)(Visitor *v, void **obj);
/* Must be set; implementations may require @list to be non-null,
* but must document it. */
- void (*start_list)(Visitor *v, const char *name, GenericList **list,
+ bool (*start_list)(Visitor *v, const char *name, GenericList **list,
size_t size, Error **errp);
/* Must be set */
GenericList *(*next_list)(Visitor *v, GenericList *tail, size_t size);
/* Optional; intended for input visitors */
- void (*check_list)(Visitor *v, Error **errp);
+ bool (*check_list)(Visitor *v, Error **errp);
/* Must be set */
void (*end_list)(Visitor *v, void **list);
- /* Must be set by input and dealloc visitors to visit alternates;
- * optional for output visitors. */
- void (*start_alternate)(Visitor *v, const char *name,
+ /* Must be set by input and clone visitors to visit alternates */
+ bool (*start_alternate)(Visitor *v, const char *name,
GenericAlternate **obj, size_t size,
Error **errp);
- /* Optional, needed for dealloc visitor */
+ /* Optional */
void (*end_alternate)(Visitor *v, void **obj);
/* Must be set */
- void (*type_int64)(Visitor *v, const char *name, int64_t *obj,
+ bool (*type_int64)(Visitor *v, const char *name, int64_t *obj,
Error **errp);
/* Must be set */
- void (*type_uint64)(Visitor *v, const char *name, uint64_t *obj,
+ bool (*type_uint64)(Visitor *v, const char *name, uint64_t *obj,
Error **errp);
/* Optional; fallback is type_uint64() */
- void (*type_size)(Visitor *v, const char *name, uint64_t *obj,
+ bool (*type_size)(Visitor *v, const char *name, uint64_t *obj,
Error **errp);
/* Must be set */
- void (*type_bool)(Visitor *v, const char *name, bool *obj, Error **errp);
+ bool (*type_bool)(Visitor *v, const char *name, bool *obj, Error **errp);
/* Must be set */
- void (*type_str)(Visitor *v, const char *name, char **obj, Error **errp);
+ bool (*type_str)(Visitor *v, const char *name, char **obj, Error **errp);
/* Must be set to visit numbers */
- void (*type_number)(Visitor *v, const char *name, double *obj,
+ bool (*type_number)(Visitor *v, const char *name, double *obj,
Error **errp);
/* Must be set to visit arbitrary QTypes */
- void (*type_any)(Visitor *v, const char *name, QObject **obj,
+ bool (*type_any)(Visitor *v, const char *name, QObject **obj,
Error **errp);
/* Must be set to visit explicit null values. */
- void (*type_null)(Visitor *v, const char *name, QNull **obj,
+ bool (*type_null)(Visitor *v, const char *name, QNull **obj,
Error **errp);
/* Must be set for input visitors to visit structs, optional otherwise.
The core takes care of the return type in the public interface. */
void (*optional)(Visitor *v, const char *name, bool *present);
+ /* Optional */
+ bool (*policy_reject)(Visitor *v, const char *name,
+ unsigned special_features, Error **errp);
+
+ /* Optional */
+ bool (*policy_skip)(Visitor *v, const char *name,
+ unsigned special_features);
+
/* Must be set */
VisitorType type;
+ /* Optional */
+ struct CompatPolicy compat_policy;
+
/* Must be set for output visitors, optional otherwise. */
void (*complete)(Visitor *v, void *opaque);
diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h
index 5b2ed3f202..27b85d4700 100644
--- a/include/qapi/visitor.h
+++ b/include/qapi/visitor.h
@@ -16,6 +16,7 @@
#define QAPI_VISITOR_H
#include "qapi/qapi-builtin-types.h"
+#include "qapi/qapi-types-compat.h"
/*
* The QAPI schema defines both a set of C data types, and a QMP wire
@@ -25,19 +26,21 @@
* for doing work at each node of a QAPI graph; it can also be used
* for a virtual walk, where there is no actual QAPI C struct.
*
- * There are four kinds of visitor classes: input visitors (QObject,
- * string, and QemuOpts) parse an external representation and build
- * the corresponding QAPI graph, output visitors (QObject and string) take
- * a completed QAPI graph and generate an external representation, the
- * dealloc visitor can take a QAPI graph (possibly partially
- * constructed) and recursively free its resources, and the clone
- * visitor performs a deep clone of one QAPI object to another. While
- * the dealloc and QObject input/output visitors are general, the string,
- * QemuOpts, and clone visitors have some implementation limitations;
- * see the documentation for each visitor for more details on what it
- * supports. Also, see visitor-impl.h for the callback contracts
- * implemented by each visitor, and docs/devel/qapi-code-gen.txt for more
- * about the QAPI code generator.
+ * There are four kinds of visitors: input visitors (QObject, string,
+ * and QemuOpts) parse an external representation and build the
+ * corresponding QAPI object, output visitors (QObject and string)
+ * take a QAPI object and generate an external representation, the
+ * dealloc visitor takes a QAPI object (possibly partially
+ * constructed) and recursively frees it, and the clone visitor
+ * performs a deep clone of a QAPI object.
+ *
+ * While the dealloc and QObject input/output visitors are general,
+ * the string, QemuOpts, and clone visitors have some implementation
+ * limitations; see the documentation for each visitor for more
+ * details on what it supports. Also, see visitor-impl.h for the
+ * callback contracts implemented by each visitor, and
+ * docs/devel/qapi-code-gen.rst for more about the QAPI code
+ * generator.
*
* All of the visitors are created via:
*
@@ -45,20 +48,24 @@
*
* A visitor should be used for exactly one top-level visit_type_FOO()
* or virtual walk; if that is successful, the caller can optionally
- * call visit_complete() (for now, useful only for output visits, but
- * safe to call on all visits). Then, regardless of success or
- * failure, the user should call visit_free() to clean up resources.
- * It is okay to free the visitor without completing the visit, if
- * some other error is detected in the meantime.
+ * call visit_complete() (useful only for output visits, but safe to
+ * call on all visits). Then, regardless of success or failure, the
+ * user should call visit_free() to clean up resources. It is okay to
+ * free the visitor without completing the visit, if some other error
+ * is detected in the meantime.
+ *
+ * The clone and dealloc visitor should not be used directly outside
+ * of QAPI code. Use the qapi_free_FOO() and QAPI_CLONE() instead,
+ * described below.
*
* All QAPI types have a corresponding function with a signature
* roughly compatible with this:
*
- * void visit_type_FOO(Visitor *v, const char *name, T obj, Error **errp);
+ * bool visit_type_FOO(Visitor *v, const char *name, T obj, Error **errp);
*
* where T is FOO for scalar types, and FOO * otherwise. The scalar
* visitors are declared here; the remaining visitors are generated in
- * qapi-visit.h.
+ * qapi-visit-MODULE.h.
*
* The @name parameter of visit_type_FOO() describes the relation
* between this QAPI value and its parent container. When visiting
@@ -68,55 +75,58 @@
* alternate, @name should equal the name used for visiting the
* alternate.
*
- * The visit_type_FOO() functions expect a non-null @obj argument;
- * they allocate *@obj during input visits, leave it unchanged on
- * output visits, and recursively free any resources during a dealloc
- * visit. Each function also takes the customary @errp argument (see
+ * The visit_type_FOO() functions take a non-null @obj argument; they
+ * allocate *@obj during input visits, leave it unchanged during
+ * output and clone visits, and free it (recursively) during a dealloc
+ * visit.
+ *
+ * Each function also takes the customary @errp argument (see
* qapi/error.h for details), for reporting any errors (such as if a
* member @name is not present, or is present but not the specified
- * type).
+ * type). Only input visitors can fail.
*
* If an error is detected during visit_type_FOO() with an input
- * visitor, then *@obj will be NULL for pointer types, and left
- * unchanged for scalar types. Using an output or clone visitor with
- * an incomplete object has undefined behavior (other than a special
- * case for visit_type_str() treating NULL like ""), while the dealloc
- * visitor safely handles incomplete objects. Since input visitors
- * never produce an incomplete object, such an object is possible only
- * by manual construction.
+ * visitor, then *@obj will be set to NULL for pointer types, and left
+ * unchanged for scalar types.
+ *
+ * Using an output or clone visitor with an incomplete object has
+ * undefined behavior (other than a special case for visit_type_str()
+ * treating NULL like ""), while the dealloc visitor safely handles
+ * incomplete objects. Since input visitors never produce an
+ * incomplete object, such an object is possible only by manual
+ * construction.
+ *
+ * visit_type_FOO() returns true on success, false on error.
*
* For the QAPI object types (structs, unions, and alternates), there
- * is an additional generated function in qapi-visit.h compatible
- * with:
+ * is an additional generated function in qapi-visit-MODULE.h
+ * compatible with:
*
- * void visit_type_FOO_members(Visitor *v, FOO *obj, Error **errp);
+ * bool visit_type_FOO_members(Visitor *v, FOO *obj, Error **errp);
*
* for visiting the members of a type without also allocating the QAPI
- * struct.
+ * struct. It also returns true on success, false on error.
*
- * Additionally, in qapi-types.h, all QAPI pointer types (structs,
- * unions, alternates, and lists) have a generated function compatible
+ * Additionally, QAPI pointer types (structs, unions, alternates, and
+ * lists) have a generated function in qapi-types-MODULE.h compatible
* with:
*
* void qapi_free_FOO(FOO *obj);
*
- * where behaves like free() in that @obj may be NULL. Such objects
- * may also be used with the following macro, provided alongside the
- * clone visitor:
+ * Does nothing when @obj is NULL.
+ *
+ * Such objects may also be used with macro
*
* Type *QAPI_CLONE(Type, src);
*
- * in order to perform a deep clone of @src. Because of the generated
- * qapi_free functions and the QAPI_CLONE() macro, the clone and
- * dealloc visitor should not be used directly outside of QAPI code.
+ * in order to perform a deep clone of @src.
*
- * QAPI types can also inherit from a base class; when this happens, a
- * function is generated for easily going from the derived type to the
- * base type:
+ * For QAPI types can that inherit from a base type, a function is
+ * generated for going from the derived type to the base type:
*
* BASE *qapi_CHILD_base(CHILD *obj);
*
- * For a real QAPI struct, typical input usage involves:
+ * Typical input visitor usage involves:
*
* <example>
* Foo *f;
@@ -124,8 +134,7 @@
* Visitor *v;
*
* v = FOO_visitor_new(...);
- * visit_type_Foo(v, NULL, &f, &err);
- * if (err) {
+ * if (!visit_type_Foo(v, NULL, &f, &err)) {
* ...handle error...
* } else {
* ...use f...
@@ -141,8 +150,7 @@
* Visitor *v;
*
* v = FOO_visitor_new(...);
- * visit_type_FooList(v, NULL, &l, &err);
- * if (err) {
+ * if (!visit_type_FooList(v, NULL, &l, &err)) {
* ...handle error...
* } else {
* for ( ; l; l = l->next) {
@@ -153,36 +161,22 @@
* qapi_free_FooList(l);
* </example>
*
- * Similarly, typical output usage is:
+ * Typical output visitor usage:
*
* <example>
* Foo *f = ...obtain populated object...
- * Error *err = NULL;
* Visitor *v;
* Type *result;
*
* v = FOO_visitor_new(..., &result);
- * visit_type_Foo(v, NULL, &f, &err);
- * if (err) {
- * ...handle error...
- * } else {
- * visit_complete(v, &result);
- * ...use result...
- * }
+ * visit_type_Foo(v, NULL, &f, &error_abort);
+ * visit_complete(v, &result);
* visit_free(v);
+ * ...use result...
* </example>
*
- * When visiting a real QAPI struct, this file provides several
- * helpers that rely on in-tree information to control the walk:
- * visit_optional() for the 'has_member' field associated with
- * optional 'member' in the C struct; and visit_next_list() for
- * advancing through a FooList linked list. Similarly, the
- * visit_is_input() helper makes it possible to write code that is
- * visitor-agnostic everywhere except for cleanup. Only the generated
- * visit_type functions need to use these helpers.
- *
* It is also possible to use the visitors to do a virtual walk, where
- * no actual QAPI struct is present. In this situation, decisions
+ * no actual QAPI object is present. In this situation, decisions
* about what needs to be walked are made by the calling code, and
* structured visits are split between pairs of start and end methods
* (where the end method must be called if the start function
@@ -193,38 +187,44 @@
* <example>
* Visitor *v;
* Error *err = NULL;
+ * bool ok = false;
* int value;
*
* v = FOO_visitor_new(...);
- * visit_start_struct(v, NULL, NULL, 0, &err);
- * if (err) {
+ * if (!visit_start_struct(v, NULL, NULL, 0, &err)) {
* goto out;
* }
- * visit_start_list(v, "list", NULL, 0, &err);
- * if (err) {
+ * if (!visit_start_list(v, "list", NULL, 0, &err)) {
* goto outobj;
* }
* value = 1;
- * visit_type_int(v, NULL, &value, &err);
- * if (err) {
+ * if (!visit_type_int(v, NULL, &value, &err)) {
* goto outlist;
* }
* value = 2;
- * visit_type_int(v, NULL, &value, &err);
- * if (err) {
+ * if (!visit_type_int(v, NULL, &value, &err)) {
* goto outlist;
* }
+ * ok = true;
* outlist:
+ * if (ok) {
+ * ok = visit_check_list(v, &err);
+ * }
* visit_end_list(v, NULL);
- * if (!err) {
- * visit_check_struct(v, &err);
+ * if (ok) {
+ * ok = visit_check_struct(v, &err);
* }
* outobj:
* visit_end_struct(v, NULL);
* out:
- * error_propagate(errp, err);
* visit_free(v);
* </example>
+ *
+ * This file provides helpers for use by the generated
+ * visit_type_FOO(): visit_optional() for the 'has_member' field
+ * associated with optional 'member' in the C struct,
+ * visit_next_list() for advancing through a FooList linked list, and
+ * visit_is_input() for cleaning up on failure.
*/
/*** Useful types ***/
@@ -282,9 +282,10 @@ void visit_free(Visitor *v);
* into *@obj. @obj may also be NULL for a virtual walk, in which
* case @size is ignored.
*
- * @errp obeys typical error usage, and reports failures such as a
- * member @name is not present, or present but not an object. On
- * error, input visitors set *@obj to NULL.
+ * On failure, set *@obj to NULL and store an error through @errp.
+ * Can happen only when @v is an input visitor.
+ *
+ * Return true on success, false on failure.
*
* After visit_start_struct() succeeds, the caller may visit its
* members one after the other, passing the member's name and address
@@ -295,21 +296,23 @@ void visit_free(Visitor *v);
* FIXME Should this be named visit_start_object, since it is also
* used for QAPI unions, and maps to JSON objects?
*/
-void visit_start_struct(Visitor *v, const char *name, void **obj,
+bool visit_start_struct(Visitor *v, const char *name, void **obj,
size_t size, Error **errp);
/*
* Prepare for completing an object visit.
*
- * @errp obeys typical error usage, and reports failures such as
- * unparsed keys remaining in the input stream.
+ * On failure, store an error through @errp. Can happen only when @v
+ * is an input visitor.
+ *
+ * Return true on success, false on failure.
*
* Should be called prior to visit_end_struct() if all other
* intermediate visit steps were successful, to allow the visitor one
* last chance to report errors. May be skipped on a cleanup path,
* where there is no need to check for further errors.
*/
-void visit_check_struct(Visitor *v, Error **errp);
+bool visit_check_struct(Visitor *v, Error **errp);
/*
* Complete an object visit started earlier.
@@ -338,21 +341,22 @@ void visit_end_struct(Visitor *v, void **obj);
* allow @list to be NULL for a virtual walk, in which case @size is
* ignored.
*
- * @errp obeys typical error usage, and reports failures such as a
- * member @name is not present, or present but not a list. On error,
- * input visitors set *@list to NULL.
+ * On failure, set *@list to NULL and store an error through @errp.
+ * Can happen only when @v is an input visitor.
+ *
+ * Return true on success, false on failure.
*
* After visit_start_list() succeeds, the caller may visit its members
- * one after the other. A real visit (where @obj is non-NULL) uses
+ * one after the other. A real visit (where @list is non-NULL) uses
* visit_next_list() for traversing the linked list, while a virtual
- * visit (where @obj is NULL) uses other means. For each list
+ * visit (where @list is NULL) uses other means. For each list
* element, call the appropriate visit_type_FOO() with name set to
* NULL and obj set to the address of the value member of the list
* element. Finally, visit_end_list() needs to be called with the
* same @list to clean up, even if intermediate visits fail. See the
* examples above.
*/
-void visit_start_list(Visitor *v, const char *name, GenericList **list,
+bool visit_start_list(Visitor *v, const char *name, GenericList **list,
size_t size, Error **errp);
/*
@@ -364,25 +368,27 @@ void visit_start_list(Visitor *v, const char *name, GenericList **list,
* @tail must not be NULL; on the first call, @tail is the value of
* *list after visit_start_list(), and on subsequent calls @tail must
* be the previously returned value. Should be called in a loop until
- * a NULL return or error occurs; for each non-NULL return, the caller
- * then calls the appropriate visit_type_*() for the element type of
- * the list, with that function's name parameter set to NULL and obj
- * set to the address of @tail->value.
+ * a NULL return; for each non-NULL return, the caller then calls the
+ * appropriate visit_type_*() for the element type of the list, with
+ * that function's name parameter set to NULL and obj set to the
+ * address of @tail->value.
*/
GenericList *visit_next_list(Visitor *v, GenericList *tail, size_t size);
/*
* Prepare for completing a list visit.
*
- * @errp obeys typical error usage, and reports failures such as
- * unvisited list tail remaining in the input stream.
+ * On failure, store an error through @errp. Can happen only when @v
+ * is an input visitor.
+ *
+ * Return true on success, false on failure.
*
* Should be called prior to visit_end_list() if all other
* intermediate visit steps were successful, to allow the visitor one
* last chance to report errors. May be skipped on a cleanup path,
* where there is no need to check for further errors.
*/
-void visit_check_list(Visitor *v, Error **errp);
+bool visit_check_list(Visitor *v, Error **errp);
/*
* Complete a list visit started earlier.
@@ -407,14 +413,19 @@ void visit_end_list(Visitor *v, void **list);
*
* @obj must not be NULL. Input and clone visitors use @size to
* determine how much memory to allocate into *@obj, then determine
- * the qtype of the next thing to be visited, stored in (*@obj)->type.
- * Other visitors will leave @obj unchanged.
+ * the qtype of the next thing to be visited, and store it in
+ * (*@obj)->type. Other visitors leave @obj unchanged.
+ *
+ * On failure, set *@obj to NULL and store an error through @errp.
+ * Can happen only when @v is an input visitor.
+ *
+ * Return true on success, false on failure.
*
* If successful, this must be paired with visit_end_alternate() with
* the same @obj to clean up, even if visiting the contents of the
* alternate fails.
*/
-void visit_start_alternate(Visitor *v, const char *name,
+bool visit_start_alternate(Visitor *v, const char *name,
GenericAlternate **obj, size_t size,
Error **errp);
@@ -450,6 +461,41 @@ void visit_end_alternate(Visitor *v, void **obj);
bool visit_optional(Visitor *v, const char *name, bool *present);
/*
+ * Should we reject member @name due to policy?
+ *
+ * @special_features is the member's special features encoded as a
+ * bitset of QapiSpecialFeature.
+ *
+ * @name must not be NULL. This function is only useful between
+ * visit_start_struct() and visit_end_struct(), since only objects
+ * have deprecated members.
+ */
+bool visit_policy_reject(Visitor *v, const char *name,
+ unsigned special_features, Error **errp);
+
+/*
+ *
+ * Should we skip member @name due to policy?
+ *
+ * @special_features is the member's special features encoded as a
+ * bitset of QapiSpecialFeature.
+ *
+ * @name must not be NULL. This function is only useful between
+ * visit_start_struct() and visit_end_struct(), since only objects
+ * have deprecated members.
+ */
+bool visit_policy_skip(Visitor *v, const char *name,
+ unsigned special_features);
+
+/*
+ * Set policy for handling deprecated management interfaces.
+ *
+ * Intended use: call visit_set_policy(v, &compat_policy) when
+ * visiting management interface input or output.
+ */
+void visit_set_policy(Visitor *v, CompatPolicy *policy);
+
+/*
* Visit an enum value.
*
* @name expresses the relationship of this enum to its parent
@@ -461,14 +507,19 @@ bool visit_optional(Visitor *v, const char *name, bool *present);
*
* Currently, all input visitors parse text input, and all output
* visitors produce text output. The mapping between enumeration
- * values and strings is done by the visitor core, using @strings; it
- * should be the ENUM_lookup array from visit-types.h.
+ * values and strings is done by the visitor core, using @lookup.
+ *
+ * On failure, store an error through @errp. Can happen only when @v
+ * is an input visitor.
+ *
+ * Return true on success, false on failure.
*
* May call visit_type_str() under the hood, and the enum visit may
* fail even if the corresponding string visit succeeded; this implies
- * that visit_type_str() must have no unwelcome side effects.
+ * that an input visitor's visit_type_str() must have no unwelcome
+ * side effects.
*/
-void visit_type_enum(Visitor *v, const char *name, int *obj,
+bool visit_type_enum(Visitor *v, const char *name, int *obj,
const QEnumLookup *lookup, Error **errp);
/*
@@ -476,6 +527,11 @@ void visit_type_enum(Visitor *v, const char *name, int *obj,
*/
bool visit_is_input(Visitor *v);
+/*
+ * Check if visitor is a dealloc visitor.
+ */
+bool visit_is_dealloc(Visitor *v);
+
/*** Visiting built-in types ***/
/*
@@ -486,28 +542,33 @@ bool visit_is_input(Visitor *v);
*
* @obj must be non-NULL. Input visitors set *@obj to the value;
* other visitors will leave *@obj unchanged.
+ *
+ * On failure, store an error through @errp. Can happen only when @v
+ * is an input visitor.
+ *
+ * Return true on success, false on failure.
*/
-void visit_type_int(Visitor *v, const char *name, int64_t *obj, Error **errp);
+bool visit_type_int(Visitor *v, const char *name, int64_t *obj, Error **errp);
/*
* Visit a uint8_t value.
* Like visit_type_int(), except clamps the value to uint8_t range.
*/
-void visit_type_uint8(Visitor *v, const char *name, uint8_t *obj,
+bool visit_type_uint8(Visitor *v, const char *name, uint8_t *obj,
Error **errp);
/*
* Visit a uint16_t value.
* Like visit_type_int(), except clamps the value to uint16_t range.
*/
-void visit_type_uint16(Visitor *v, const char *name, uint16_t *obj,
+bool visit_type_uint16(Visitor *v, const char *name, uint16_t *obj,
Error **errp);
/*
* Visit a uint32_t value.
* Like visit_type_int(), except clamps the value to uint32_t range.
*/
-void visit_type_uint32(Visitor *v, const char *name, uint32_t *obj,
+bool visit_type_uint32(Visitor *v, const char *name, uint32_t *obj,
Error **errp);
/*
@@ -515,34 +576,34 @@ void visit_type_uint32(Visitor *v, const char *name, uint32_t *obj,
* Like visit_type_int(), except clamps the value to uint64_t range,
* that is, ensures it is unsigned.
*/
-void visit_type_uint64(Visitor *v, const char *name, uint64_t *obj,
+bool visit_type_uint64(Visitor *v, const char *name, uint64_t *obj,
Error **errp);
/*
* Visit an int8_t value.
* Like visit_type_int(), except clamps the value to int8_t range.
*/
-void visit_type_int8(Visitor *v, const char *name, int8_t *obj, Error **errp);
+bool visit_type_int8(Visitor *v, const char *name, int8_t *obj, Error **errp);
/*
* Visit an int16_t value.
* Like visit_type_int(), except clamps the value to int16_t range.
*/
-void visit_type_int16(Visitor *v, const char *name, int16_t *obj,
+bool visit_type_int16(Visitor *v, const char *name, int16_t *obj,
Error **errp);
/*
* Visit an int32_t value.
* Like visit_type_int(), except clamps the value to int32_t range.
*/
-void visit_type_int32(Visitor *v, const char *name, int32_t *obj,
+bool visit_type_int32(Visitor *v, const char *name, int32_t *obj,
Error **errp);
/*
* Visit an int64_t value.
* Identical to visit_type_int().
*/
-void visit_type_int64(Visitor *v, const char *name, int64_t *obj,
+bool visit_type_int64(Visitor *v, const char *name, int64_t *obj,
Error **errp);
/*
@@ -551,7 +612,7 @@ void visit_type_int64(Visitor *v, const char *name, int64_t *obj,
* recognize additional syntax, such as suffixes for easily scaling
* values.
*/
-void visit_type_size(Visitor *v, const char *name, uint64_t *obj,
+bool visit_type_size(Visitor *v, const char *name, uint64_t *obj,
Error **errp);
/*
@@ -562,8 +623,13 @@ void visit_type_size(Visitor *v, const char *name, uint64_t *obj,
*
* @obj must be non-NULL. Input visitors set *@obj to the value;
* other visitors will leave *@obj unchanged.
+ *
+ * On failure, store an error through @errp. Can happen only when @v
+ * is an input visitor.
+ *
+ * Return true on success, false on failure.
*/
-void visit_type_bool(Visitor *v, const char *name, bool *obj, Error **errp);
+bool visit_type_bool(Visitor *v, const char *name, bool *obj, Error **errp);
/*
* Visit a string value.
@@ -579,9 +645,14 @@ void visit_type_bool(Visitor *v, const char *name, bool *obj, Error **errp);
* It is safe to cast away const when preparing a (const char *) value
* into @obj for use by an output visitor.
*
+ * On failure, set *@obj to NULL and store an error through @errp.
+ * Can happen only when @v is an input visitor.
+ *
+ * Return true on success, false on failure.
+ *
* FIXME: Callers that try to output NULL *obj should not be allowed.
*/
-void visit_type_str(Visitor *v, const char *name, char **obj, Error **errp);
+bool visit_type_str(Visitor *v, const char *name, char **obj, Error **errp);
/*
* Visit a number (i.e. double) value.
@@ -592,8 +663,13 @@ void visit_type_str(Visitor *v, const char *name, char **obj, Error **errp);
* @obj must be non-NULL. Input visitors set *@obj to the value;
* other visitors will leave *@obj unchanged. Visitors should
* document if infinity or NaN are not permitted.
+ *
+ * On failure, store an error through @errp. Can happen only when @v
+ * is an input visitor.
+ *
+ * Return true on success, false on failure.
*/
-void visit_type_number(Visitor *v, const char *name, double *obj,
+bool visit_type_number(Visitor *v, const char *name, double *obj,
Error **errp);
/*
@@ -606,11 +682,16 @@ void visit_type_number(Visitor *v, const char *name, double *obj,
* other visitors will leave *@obj unchanged. *@obj must be non-NULL
* for output visitors.
*
+ * On failure, set *@obj to NULL and store an error through @errp.
+ * Can happen only when @v is an input visitor.
+ *
+ * Return true on success, false on failure.
+ *
* Note that some kinds of input can't express arbitrary QObject.
* E.g. the visitor returned by qobject_input_visitor_new_keyval()
* can't create numbers or booleans, only strings.
*/
-void visit_type_any(Visitor *v, const char *name, QObject **obj, Error **errp);
+bool visit_type_any(Visitor *v, const char *name, QObject **obj, Error **errp);
/*
* Visit a JSON null value.
@@ -620,8 +701,13 @@ void visit_type_any(Visitor *v, const char *name, QObject **obj, Error **errp);
*
* @obj must be non-NULL. Input visitors set *@obj to the value;
* other visitors ignore *@obj.
+ *
+ * On failure, set *@obj to NULL and store an error through @errp.
+ * Can happen only when @v is an input visitor.
+ *
+ * Return true on success, false on failure.
*/
-void visit_type_null(Visitor *v, const char *name, QNull **obj,
+bool visit_type_null(Visitor *v, const char *name, QNull **obj,
Error **errp);
#endif
diff --git a/include/qemu-common.h b/include/qemu-common.h
deleted file mode 100644
index ed60ba251d..0000000000
--- a/include/qemu-common.h
+++ /dev/null
@@ -1,163 +0,0 @@
-
-/* Common header file that is included by all of QEMU.
- *
- * This file is supposed to be included only by .c files. No header file should
- * depend on qemu-common.h, as this would easily lead to circular header
- * dependencies.
- *
- * If a header file uses a definition from qemu-common.h, that definition
- * must be moved to a separate header file, and the header that uses it
- * must include that header.
- */
-#ifndef QEMU_COMMON_H
-#define QEMU_COMMON_H
-
-#include "qemu/fprintf-fn.h"
-
-#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR)
-
-/* Copyright string for -version arguments, About dialogs, etc */
-#define QEMU_COPYRIGHT "Copyright (c) 2003-2018 " \
- "Fabrice Bellard and the QEMU Project developers"
-
-/* Bug reporting information for --help arguments, About dialogs, etc */
-#define QEMU_HELP_BOTTOM \
- "See <https://qemu.org/contribute/report-a-bug> for how to report bugs.\n" \
- "More information on the QEMU project at <https://qemu.org>."
-
-/* main function, renamed */
-#if defined(CONFIG_COCOA)
-int qemu_main(int argc, char **argv, char **envp);
-#endif
-
-void qemu_get_timedate(struct tm *tm, int offset);
-int qemu_timedate_diff(struct tm *tm);
-
-#define qemu_isalnum(c) isalnum((unsigned char)(c))
-#define qemu_isalpha(c) isalpha((unsigned char)(c))
-#define qemu_iscntrl(c) iscntrl((unsigned char)(c))
-#define qemu_isdigit(c) isdigit((unsigned char)(c))
-#define qemu_isgraph(c) isgraph((unsigned char)(c))
-#define qemu_islower(c) islower((unsigned char)(c))
-#define qemu_isprint(c) isprint((unsigned char)(c))
-#define qemu_ispunct(c) ispunct((unsigned char)(c))
-#define qemu_isspace(c) isspace((unsigned char)(c))
-#define qemu_isupper(c) isupper((unsigned char)(c))
-#define qemu_isxdigit(c) isxdigit((unsigned char)(c))
-#define qemu_tolower(c) tolower((unsigned char)(c))
-#define qemu_toupper(c) toupper((unsigned char)(c))
-#define qemu_isascii(c) isascii((unsigned char)(c))
-#define qemu_toascii(c) toascii((unsigned char)(c))
-
-void *qemu_oom_check(void *ptr);
-
-ssize_t qemu_write_full(int fd, const void *buf, size_t count)
- QEMU_WARN_UNUSED_RESULT;
-
-#ifndef _WIN32
-int qemu_pipe(int pipefd[2]);
-/* like openpty() but also makes it raw; return master fd */
-int qemu_openpty_raw(int *aslave, char *pty_name);
-#endif
-
-#ifdef _WIN32
-/* MinGW needs type casts for the 'buf' and 'optval' arguments. */
-#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \
- getsockopt(sockfd, level, optname, (void *)optval, optlen)
-#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \
- setsockopt(sockfd, level, optname, (const void *)optval, optlen)
-#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, (void *)buf, len, flags)
-#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \
- sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen)
-#else
-#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \
- getsockopt(sockfd, level, optname, optval, optlen)
-#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \
- setsockopt(sockfd, level, optname, optval, optlen)
-#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags)
-#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \
- sendto(sockfd, buf, len, flags, destaddr, addrlen)
-#endif
-
-extern bool tcg_allowed;
-void tcg_exec_init(unsigned long tb_size);
-#ifdef CONFIG_TCG
-#define tcg_enabled() (tcg_allowed)
-#else
-#define tcg_enabled() 0
-#endif
-
-void cpu_exec_init_all(void);
-void cpu_exec_step_atomic(CPUState *cpu);
-
-/**
- * set_preferred_target_page_bits:
- * @bits: number of bits needed to represent an address within the page
- *
- * Set the preferred target page size (the actual target page
- * size may be smaller than any given CPU's preference).
- * Returns true on success, false on failure (which can only happen
- * if this is called after the system has already finalized its
- * choice of page size and the requested page size is smaller than that).
- */
-bool set_preferred_target_page_bits(int bits);
-
-/**
- * Sends a (part of) iovec down a socket, yielding when the socket is full, or
- * Receives data into a (part of) iovec from a socket,
- * yielding when there is no data in the socket.
- * The same interface as qemu_sendv_recvv(), with added yielding.
- * XXX should mark these as coroutine_fn
- */
-ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt,
- size_t offset, size_t bytes, bool do_send);
-#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \
- qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false)
-#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \
- qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true)
-
-/**
- * The same as above, but with just a single buffer
- */
-ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send);
-#define qemu_co_recv(sockfd, buf, bytes) \
- qemu_co_send_recv(sockfd, buf, bytes, false)
-#define qemu_co_send(sockfd, buf, bytes) \
- qemu_co_send_recv(sockfd, buf, bytes, true)
-
-void qemu_progress_init(int enabled, float min_skip);
-void qemu_progress_end(void);
-void qemu_progress_print(float delta, int max);
-const char *qemu_get_vm_name(void);
-
-#define QEMU_FILE_TYPE_BIOS 0
-#define QEMU_FILE_TYPE_KEYMAP 1
-char *qemu_find_file(int type, const char *name);
-
-/* OS specific functions */
-void os_setup_early_signal_handling(void);
-char *os_find_datadir(void);
-int os_parse_cmd_args(int index, const char *optarg);
-
-#include "qemu/module.h"
-
-/*
- * Hexdump a buffer to a file. An optional string prefix is added to every line
- */
-
-void qemu_hexdump(const char *buf, FILE *fp, const char *prefix, size_t size);
-
-/*
- * helper to parse debug environment variables
- */
-int parse_debug_env(const char *name, int max, int initial);
-
-const char *qemu_ether_ntoa(const MACAddr *mac);
-char *size_to_str(uint64_t val);
-void page_size_init(void);
-
-/* returns non-zero if dump is in progress, otherwise zero is
- * returned. */
-bool dump_in_progress(void);
-
-#endif
diff --git a/include/qemu-io.h b/include/qemu-io.h
index 7433239372..3af513004a 100644
--- a/include/qemu-io.h
+++ b/include/qemu-io.h
@@ -18,7 +18,6 @@
#ifndef QEMU_IO_H
#define QEMU_IO_H
-#include "qemu-common.h"
#define CMD_FLAG_GLOBAL ((int)0x80000000) /* don't iterate "args" */
diff --git a/include/qemu-main.h b/include/qemu-main.h
new file mode 100644
index 0000000000..940960a7db
--- /dev/null
+++ b/include/qemu-main.h
@@ -0,0 +1,11 @@
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_MAIN_H
+#define QEMU_MAIN_H
+
+int qemu_default_main(void);
+extern int (*qemu_main)(void);
+
+#endif /* QEMU_MAIN_H */
diff --git a/include/sysemu/accel.h b/include/qemu/accel.h
index 5565e00a96..972a849a2b 100644
--- a/include/sysemu/accel.h
+++ b/include/qemu/accel.h
@@ -20,27 +20,35 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
-#ifndef HW_ACCEL_H
-#define HW_ACCEL_H
+#ifndef QEMU_ACCEL_H
+#define QEMU_ACCEL_H
#include "qom/object.h"
-#include "hw/qdev-properties.h"
+#include "exec/hwaddr.h"
-typedef struct AccelState {
+struct AccelState {
/*< private >*/
Object parent_obj;
-} AccelState;
+};
typedef struct AccelClass {
/*< private >*/
ObjectClass parent_class;
/*< public >*/
- const char *opt_name;
const char *name;
- int (*available)(void);
int (*init_machine)(MachineState *ms);
+#ifndef CONFIG_USER_ONLY
void (*setup_post)(MachineState *ms, AccelState *accel);
+ bool (*has_memory)(MachineState *ms, AddressSpace *as,
+ hwaddr start_addr, hwaddr size);
+#endif
+ bool (*cpu_common_realize)(CPUState *cpu, Error **errp);
+ void (*cpu_common_unrealize)(CPUState *cpu);
+
+ /* gdbstub related hooks */
+ int (*gdbstub_supported_sstep_flags)(void);
+
bool *allowed;
/*
* Array of global properties that would be applied when specific
@@ -64,10 +72,44 @@ typedef struct AccelClass {
#define ACCEL_GET_CLASS(obj) \
OBJECT_GET_CLASS(AccelClass, (obj), TYPE_ACCEL)
-extern unsigned long tcg_tb_size;
+AccelClass *accel_find(const char *opt_name);
+AccelState *current_accel(void);
+const char *current_accel_name(void);
+
+void accel_init_interfaces(AccelClass *ac);
+
+#ifndef CONFIG_USER_ONLY
+int accel_init_machine(AccelState *accel, MachineState *ms);
-void configure_accelerator(MachineState *ms, const char *progname);
/* Called just before os_setup_post (ie just before drop OS privs) */
void accel_setup_post(MachineState *ms);
+#endif /* !CONFIG_USER_ONLY */
-#endif
+/**
+ * accel_cpu_instance_init:
+ * @cpu: The CPU that needs to do accel-specific object initializations.
+ */
+void accel_cpu_instance_init(CPUState *cpu);
+
+/**
+ * accel_cpu_common_realize:
+ * @cpu: The CPU that needs to call accel-specific cpu realization.
+ * @errp: currently unused.
+ */
+bool accel_cpu_common_realize(CPUState *cpu, Error **errp);
+
+/**
+ * accel_cpu_common_unrealize:
+ * @cpu: The CPU that needs to call accel-specific cpu unrealization.
+ */
+void accel_cpu_common_unrealize(CPUState *cpu);
+
+/**
+ * accel_supported_gdbstub_sstep_flags:
+ *
+ * Returns the supported single step modes for the configured
+ * accelerator.
+ */
+int accel_supported_gdbstub_sstep_flags(void);
+
+#endif /* QEMU_ACCEL_H */
diff --git a/include/qemu/async-teardown.h b/include/qemu/async-teardown.h
new file mode 100644
index 0000000000..b281da005b
--- /dev/null
+++ b/include/qemu/async-teardown.h
@@ -0,0 +1,20 @@
+/*
+ * Asynchronous teardown
+ *
+ * Copyright IBM, Corp. 2022
+ *
+ * Authors:
+ * Claudio Imbrenda <imbrenda@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef QEMU_ASYNC_TEARDOWN_H
+#define QEMU_ASYNC_TEARDOWN_H
+
+#ifdef CONFIG_LINUX
+void init_async_teardown(void);
+#endif
+
+#endif
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index a6ac188188..99110abefb 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -8,13 +8,15 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
- * See docs/devel/atomics.txt for discussion about the guarantees each
+ * See docs/devel/atomics.rst for discussion about the guarantees each
* atomic primitive is meant to provide.
*/
#ifndef QEMU_ATOMIC_H
#define QEMU_ATOMIC_H
+#include "compiler.h"
+
/* Compiler barrier */
#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
@@ -60,8 +62,9 @@
(unsigned short)1, \
(expr)+0))))))
-#ifdef __ATOMIC_RELAXED
-/* For C11 atomic ops */
+#ifndef __ATOMIC_RELAXED
+#error "Expecting C11 atomic ops"
+#endif
/* Manual memory barriers
*
@@ -80,7 +83,7 @@
* no processors except Alpha need a barrier here. Leave it in if
* using Thread Sanitizer to avoid warnings, otherwise optimize it away.
*/
-#if defined(__SANITIZE_THREAD__)
+#ifdef QEMU_SANITIZE_THREAD
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
#elif defined(__alpha__)
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
@@ -88,6 +91,13 @@
#define smp_read_barrier_depends() barrier()
#endif
+/*
+ * A signal barrier forces all pending local memory ops to be observed before
+ * a SIGSEGV is delivered to the *same* thread. In practice this is exactly
+ * the same as barrier(), but since we have the correct builtin, use it.
+ */
+#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
+
/* Sanity check that the size of an atomic operation isn't "overly large".
* Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
* want to use them because we ought not need them, and this lets us do a
@@ -118,371 +128,199 @@
* no effect on the generated code but not using the atomic primitives
* will get flagged by sanitizers as a violation.
*/
-#define atomic_read__nocheck(ptr) \
+#define qatomic_read__nocheck(ptr) \
__atomic_load_n(ptr, __ATOMIC_RELAXED)
-#define atomic_read(ptr) \
- ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_read__nocheck(ptr); \
+#define qatomic_read(ptr) \
+ ({ \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ qatomic_read__nocheck(ptr); \
})
-#define atomic_set__nocheck(ptr, i) \
+#define qatomic_set__nocheck(ptr, i) \
__atomic_store_n(ptr, i, __ATOMIC_RELAXED)
-#define atomic_set(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_set__nocheck(ptr, i); \
+#define qatomic_set(ptr, i) do { \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ qatomic_set__nocheck(ptr, i); \
} while(0)
/* See above: most compilers currently treat consume and acquire the
- * same, but this slows down atomic_rcu_read unnecessarily.
+ * same, but this slows down qatomic_rcu_read unnecessarily.
*/
-#ifdef __SANITIZE_THREAD__
-#define atomic_rcu_read__nocheck(ptr, valptr) \
+#ifdef QEMU_SANITIZE_THREAD
+#define qatomic_rcu_read__nocheck(ptr, valptr) \
__atomic_load(ptr, valptr, __ATOMIC_CONSUME);
#else
-#define atomic_rcu_read__nocheck(ptr, valptr) \
- __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
+#define qatomic_rcu_read__nocheck(ptr, valptr) \
+ __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
smp_read_barrier_depends();
#endif
-#define atomic_rcu_read(ptr) \
- ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- typeof_strip_qual(*ptr) _val; \
- atomic_rcu_read__nocheck(ptr, &_val); \
- _val; \
+/*
+ * Preprocessor sorcery ahead: use a different identifier for the
+ * local variable in each expansion, so we can nest macro calls
+ * without shadowing variables.
+ */
+#define qatomic_rcu_read_internal(ptr, _val) \
+ ({ \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ typeof_strip_qual(*ptr) _val; \
+ qatomic_rcu_read__nocheck(ptr, &_val); \
+ _val; \
})
+#define qatomic_rcu_read(ptr) \
+ qatomic_rcu_read_internal((ptr), MAKE_IDENTFIER(_val))
-#define atomic_rcu_set(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
+#define qatomic_rcu_set(ptr, i) do { \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
-#define atomic_load_acquire(ptr) \
+#define qatomic_load_acquire(ptr) \
({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
_val; \
})
-#define atomic_store_release(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
+#define qatomic_store_release(ptr, i) do { \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
/* All the remaining operations are fully sequentially consistent */
-#define atomic_xchg__nocheck(ptr, i) ({ \
+#define qatomic_xchg__nocheck(ptr, i) ({ \
__atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
})
-#define atomic_xchg(ptr, i) ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_xchg__nocheck(ptr, i); \
+#define qatomic_xchg(ptr, i) ({ \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ qatomic_xchg__nocheck(ptr, i); \
})
-/* Returns the eventual value, failed or not */
-#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
+/* Returns the old value of '*ptr' (whether the cmpxchg failed or not) */
+#define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \
typeof_strip_qual(*ptr) _old = (old); \
(void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
_old; \
})
-#define atomic_cmpxchg(ptr, old, new) ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_cmpxchg__nocheck(ptr, old, new); \
+#define qatomic_cmpxchg(ptr, old, new) ({ \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ qatomic_cmpxchg__nocheck(ptr, old, new); \
})
/* Provide shorter names for GCC atomic builtins, return old value */
-#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
-
-#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
+
+#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
+
+#define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
/* And even shorter names that return void. */
-#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
-#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
-#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_inc(ptr) \
+ ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
+#define qatomic_dec(ptr) \
+ ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
+#define qatomic_add(ptr, n) \
+ ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_sub(ptr, n) \
+ ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_and(ptr, n) \
+ ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_or(ptr, n) \
+ ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_xor(ptr, n) \
+ ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
-#else /* __ATOMIC_RELAXED */
+#define smp_wmb() smp_mb_release()
+#define smp_rmb() smp_mb_acquire()
/*
- * We use GCC builtin if it's available, as that can use mfence on
- * 32-bit as well, e.g. if built with -march=pentium-m. However, on
- * i386 the spec is buggy, and the implementation followed it until
- * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
+ * SEQ_CST is weaker than the older __sync_* builtins and Linux
+ * kernel read-modify-write atomics. Provide a macro to obtain
+ * the same semantics.
*/
-#if defined(__i386__) || defined(__x86_64__)
-#if !QEMU_GNUC_PREREQ(4, 4)
-#if defined __x86_64__
-#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
+#if !defined(QEMU_SANITIZE_THREAD) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
+# define smp_mb__before_rmw() signal_barrier()
+# define smp_mb__after_rmw() signal_barrier()
#else
-#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
-#endif
-#endif
-#endif
-
-
-#ifdef __alpha__
-#define smp_read_barrier_depends() asm volatile("mb":::"memory")
+# define smp_mb__before_rmw() smp_mb()
+# define smp_mb__after_rmw() smp_mb()
#endif
-#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
-
/*
- * Because of the strongly ordered storage model, wmb() and rmb() are nops
- * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
- * qemu memory or non-temporal load/stores from C code.
+ * On some architectures, qatomic_set_mb is more efficient than a store
+ * plus a fence.
*/
-#define smp_mb_release() barrier()
-#define smp_mb_acquire() barrier()
-
-/*
- * __sync_lock_test_and_set() is documented to be an acquire barrier only,
- * but it is a full barrier at the hardware level. Add a compiler barrier
- * to make it a full barrier also at the compiler level.
- */
-#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
-
-#elif defined(_ARCH_PPC)
-/*
- * We use an eieio() for wmb() on powerpc. This assumes we don't
- * need to order cacheable and non-cacheable stores with respect to
- * each other.
- *
- * smp_mb has the same problem as on x86 for not-very-new GCC
- * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
- */
-#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
-#if defined(__powerpc64__)
-#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
-#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
+#if !defined(QEMU_SANITIZE_THREAD) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
+# define qatomic_set_mb(ptr, i) \
+ ({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
#else
-#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
-#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
-#endif
-#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
-
-#endif /* _ARCH_PPC */
-
-/*
- * For (host) platforms we don't have explicit barrier definitions
- * for, we use the gcc __sync_synchronize() primitive to generate a
- * full barrier. This should be safe on all platforms, though it may
- * be overkill for smp_mb_acquire() and smp_mb_release().
- */
-#ifndef smp_mb
-#define smp_mb() __sync_synchronize()
-#endif
-
-#ifndef smp_mb_acquire
-#define smp_mb_acquire() __sync_synchronize()
-#endif
-
-#ifndef smp_mb_release
-#define smp_mb_release() __sync_synchronize()
-#endif
-
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() barrier()
+# define qatomic_set_mb(ptr, i) \
+ ({ qatomic_store_release(ptr, i); smp_mb(); })
#endif
-/* These will only be atomic if the processor does the fetch or store
- * in a single issue memory operation
- */
-#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
-#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
-
-#define atomic_read(ptr) atomic_read__nocheck(ptr)
-#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
-
-/**
- * atomic_rcu_read - reads a RCU-protected pointer to a local variable
- * into a RCU read-side critical section. The pointer can later be safely
- * dereferenced within the critical section.
- *
- * This ensures that the pointer copy is invariant thorough the whole critical
- * section.
- *
- * Inserts memory barriers on architectures that require them (currently only
- * Alpha) and documents which pointers are protected by RCU.
- *
- * atomic_rcu_read also includes a compiler barrier to ensure that
- * value-speculative optimizations (e.g. VSS: Value Speculation
- * Scheduling) does not perform the data read before the pointer read
- * by speculating the value of the pointer.
- *
- * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
- */
-#define atomic_rcu_read(ptr) ({ \
- typeof(*ptr) _val = atomic_read(ptr); \
- smp_read_barrier_depends(); \
- _val; \
+#define qatomic_fetch_inc_nonzero(ptr) ({ \
+ typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr); \
+ while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
+ _oldn = qatomic_read(ptr); \
+ } \
+ _oldn; \
})
-/**
- * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
- * meant to be read by RCU read-side critical sections.
+/*
+ * Abstractions to access atomically (i.e. "once") i64/u64 variables.
*
- * Documents which pointers will be dereferenced by RCU read-side critical
- * sections and adds the required memory barriers on architectures requiring
- * them. It also makes sure the compiler does not reorder code initializing the
- * data structure before its publication.
+ * The i386 abi is odd in that by default members are only aligned to
+ * 4 bytes, which means that 8-byte types can wind up mis-aligned.
+ * Clang will then warn about this, and emit a call into libatomic.
*
- * Should match atomic_rcu_read().
+ * Use of these types in structures when they will be used with atomic
+ * operations can avoid this.
*/
-#define atomic_rcu_set(ptr, i) do { \
- smp_wmb(); \
- atomic_set(ptr, i); \
-} while (0)
-
-#define atomic_load_acquire(ptr) ({ \
- typeof(*ptr) _val = atomic_read(ptr); \
- smp_mb_acquire(); \
- _val; \
-})
-
-#define atomic_store_release(ptr, i) do { \
- smp_mb_release(); \
- atomic_set(ptr, i); \
-} while (0)
-
-#ifndef atomic_xchg
-#if defined(__clang__)
-#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
-#else
-/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
-#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
-#endif
-#endif
-#define atomic_xchg__nocheck atomic_xchg
-
-/* Provide shorter names for GCC atomic builtins. */
-#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
-#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
-#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
-#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
-#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
-#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
-#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
-
-#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
-#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
-#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
-#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
-#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
-#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
-#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
-
-#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
-#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
-
-/* And even shorter names that return void. */
-#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
-#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
-#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
-#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
-#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
-#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
-#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
+typedef int64_t aligned_int64_t __attribute__((aligned(8)));
+typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
-#endif /* __ATOMIC_RELAXED */
-
-#ifndef smp_wmb
-#define smp_wmb() smp_mb_release()
-#endif
-#ifndef smp_rmb
-#define smp_rmb() smp_mb_acquire()
-#endif
-
-/* This is more efficient than a store plus a fence. */
-#if !defined(__SANITIZE_THREAD__)
-#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
-#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
-#endif
-#endif
-
-/* atomic_mb_read/set semantics map Java volatile variables. They are
- * less expensive on some platforms (notably POWER) than fully
- * sequentially consistent operations.
- *
- * As long as they are used as paired operations they are safe to
- * use. See docs/devel/atomics.txt for more discussion.
- */
-
-#ifndef atomic_mb_read
-#define atomic_mb_read(ptr) \
- atomic_load_acquire(ptr)
-#endif
-
-#ifndef atomic_mb_set
-#define atomic_mb_set(ptr, i) do { \
- atomic_store_release(ptr, i); \
- smp_mb(); \
-} while(0)
-#endif
-
-#define atomic_fetch_inc_nonzero(ptr) ({ \
- typeof_strip_qual(*ptr) _oldn = atomic_read(ptr); \
- while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
- _oldn = atomic_read(ptr); \
- } \
- _oldn; \
-})
-
-/* Abstractions to access atomically (i.e. "once") i64/u64 variables */
#ifdef CONFIG_ATOMIC64
-static inline int64_t atomic_read_i64(const int64_t *ptr)
-{
- /* use __nocheck because sizeof(void *) might be < sizeof(u64) */
- return atomic_read__nocheck(ptr);
-}
-
-static inline uint64_t atomic_read_u64(const uint64_t *ptr)
-{
- return atomic_read__nocheck(ptr);
-}
-
-static inline void atomic_set_i64(int64_t *ptr, int64_t val)
-{
- atomic_set__nocheck(ptr, val);
-}
-
-static inline void atomic_set_u64(uint64_t *ptr, uint64_t val)
-{
- atomic_set__nocheck(ptr, val);
-}
-
-static inline void atomic64_init(void)
+/* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
+#define qatomic_read_i64(P) \
+ _Generic(*(P), int64_t: qatomic_read__nocheck(P))
+#define qatomic_read_u64(P) \
+ _Generic(*(P), uint64_t: qatomic_read__nocheck(P))
+#define qatomic_set_i64(P, V) \
+ _Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
+#define qatomic_set_u64(P, V) \
+ _Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
+
+static inline void qatomic64_init(void)
{
}
#else /* !CONFIG_ATOMIC64 */
-int64_t atomic_read_i64(const int64_t *ptr);
-uint64_t atomic_read_u64(const uint64_t *ptr);
-void atomic_set_i64(int64_t *ptr, int64_t val);
-void atomic_set_u64(uint64_t *ptr, uint64_t val);
-void atomic64_init(void);
+int64_t qatomic_read_i64(const int64_t *ptr);
+uint64_t qatomic_read_u64(const uint64_t *ptr);
+void qatomic_set_i64(int64_t *ptr, int64_t val);
+void qatomic_set_u64(uint64_t *ptr, uint64_t val);
+void qatomic64_init(void);
#endif /* !CONFIG_ATOMIC64 */
#endif /* QEMU_ATOMIC_H */
diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h
index a6af22ff10..88af6d4ea3 100644
--- a/include/qemu/atomic128.h
+++ b/include/qemu/atomic128.h
@@ -6,13 +6,32 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
- * See docs/devel/atomics.txt for discussion about the guarantees each
+ * See docs/devel/atomics.rst for discussion about the guarantees each
* atomic primitive is meant to provide.
*/
#ifndef QEMU_ATOMIC128_H
#define QEMU_ATOMIC128_H
+#include "qemu/int128.h"
+
+/*
+ * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
+ * that are supported by the host, e.g. s390x. We can force the pointer to
+ * have our known alignment with __builtin_assume_aligned, however prior to
+ * GCC 13 that was only reliable with optimization enabled. See
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
+ */
+#if defined(CONFIG_ATOMIC128_OPT)
+# if !defined(__OPTIMIZE__)
+# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
+# endif
+# define CONFIG_ATOMIC128
+#endif
+#ifndef ATTRIBUTE_ATOMIC128_OPT
+# define ATTRIBUTE_ATOMIC128_OPT
+#endif
+
/*
* GCC is a house divided about supporting large atomic operations.
*
@@ -24,8 +43,8 @@
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
*
* This interpretation is not especially helpful for QEMU.
- * For softmmu, all RAM is always read/write from the hypervisor.
- * For user-only, if the guest doesn't implement such an __atomic_read
+ * For system-mode, all RAM is always read/write from the hypervisor.
+ * For user-mode, if the guest doesn't implement such an __atomic_read
* then the host need not worry about it either.
*
* Moreover, using libatomic is not an option, because its interface is
@@ -39,115 +58,7 @@
* Therefore, special case each platform.
*/
-#if defined(CONFIG_ATOMIC128)
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- return atomic_cmpxchg__nocheck(ptr, cmp, new);
-}
-# define HAVE_CMPXCHG128 1
-#elif defined(CONFIG_CMPXCHG128)
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- return __sync_val_compare_and_swap_16(ptr, cmp, new);
-}
-# define HAVE_CMPXCHG128 1
-#elif defined(__aarch64__)
-/* Through gcc 8, aarch64 has no support for 128-bit at all. */
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
- uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
- uint64_t oldl, oldh;
- uint32_t tmp;
-
- asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t"
- "cmp %[oldl], %[cmpl]\n\t"
- "ccmp %[oldh], %[cmph], #0, eq\n\t"
- "b.ne 1f\n\t"
- "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t"
- "cbnz %w[tmp], 0b\n"
- "1:"
- : [mem] "+m"(*ptr), [tmp] "=&r"(tmp),
- [oldl] "=&r"(oldl), [oldh] "=r"(oldh)
- : [cmpl] "r"(cmpl), [cmph] "r"(cmph),
- [newl] "r"(newl), [newh] "r"(newh)
- : "memory", "cc");
-
- return int128_make128(oldl, oldh);
-}
-# define HAVE_CMPXCHG128 1
-#else
-/* Fallback definition that must be optimized away, or error. */
-Int128 QEMU_ERROR("unsupported atomic")
- atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
-# define HAVE_CMPXCHG128 0
-#endif /* Some definition for HAVE_CMPXCHG128 */
-
-
-#if defined(CONFIG_ATOMIC128)
-static inline Int128 atomic16_read(Int128 *ptr)
-{
- return atomic_read__nocheck(ptr);
-}
-
-static inline void atomic16_set(Int128 *ptr, Int128 val)
-{
- atomic_set__nocheck(ptr, val);
-}
-
-# define HAVE_ATOMIC128 1
-#elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__)
-/* We can do better than cmpxchg for AArch64. */
-static inline Int128 atomic16_read(Int128 *ptr)
-{
- uint64_t l, h;
- uint32_t tmp;
-
- /* The load must be paired with the store to guarantee not tearing. */
- asm("0: ldxp %[l], %[h], %[mem]\n\t"
- "stxp %w[tmp], %[l], %[h], %[mem]\n\t"
- "cbnz %w[tmp], 0b"
- : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h));
-
- return int128_make128(l, h);
-}
-
-static inline void atomic16_set(Int128 *ptr, Int128 val)
-{
- uint64_t l = int128_getlo(val), h = int128_gethi(val);
- uint64_t t1, t2;
-
- /* Load into temporaries to acquire the exclusive access lock. */
- asm("0: ldxp %[t1], %[t2], %[mem]\n\t"
- "stxp %w[t1], %[l], %[h], %[mem]\n\t"
- "cbnz %w[t1], 0b"
- : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2)
- : [l] "r"(l), [h] "r"(h));
-}
-
-# define HAVE_ATOMIC128 1
-#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128
-static inline Int128 atomic16_read(Int128 *ptr)
-{
- /* Maybe replace 0 with 0, returning the old value. */
- return atomic16_cmpxchg(ptr, 0, 0);
-}
-
-static inline void atomic16_set(Int128 *ptr, Int128 val)
-{
- Int128 old = *ptr, cmp;
- do {
- cmp = old;
- old = atomic16_cmpxchg(ptr, cmp, val);
- } while (old != cmp);
-}
-
-# define HAVE_ATOMIC128 1
-#else
-/* Fallback definitions that must be optimized away, or error. */
-Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr);
-void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val);
-# define HAVE_ATOMIC128 0
-#endif /* Some definition for HAVE_ATOMIC128 */
+#include "host/atomic128-cas.h"
+#include "host/atomic128-ldst.h"
#endif /* QEMU_ATOMIC128_H */
diff --git a/include/qemu/base64.h b/include/qemu/base64.h
index 815d85267d..46a75fbbeb 100644
--- a/include/qemu/base64.h
+++ b/include/qemu/base64.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,7 +21,6 @@
#ifndef QEMU_BASE64_H
#define QEMU_BASE64_H
-#include "qemu-common.h"
/**
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
index 509eeddece..97806811ee 100644
--- a/include/qemu/bitmap.h
+++ b/include/qemu/bitmap.h
@@ -22,41 +22,45 @@
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_set(dst, pos, nbits) Set specified bit area
- * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
- * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area
- * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_to_le(dst, src, nbits) Convert bitmap to little endian
* bitmap_from_le(dst, src, nbits) Convert bitmap from little endian
+ * bitmap_copy_with_src_offset(dst, src, offset, nbits)
+ * *dst = *src (with an offset into src)
+ * bitmap_copy_with_dst_offset(dst, src, offset, nbits)
+ * *dst = *src (with an offset into dst)
*/
/*
* Also the following operations apply to bitmaps.
*
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
@@ -221,6 +225,10 @@ static inline int bitmap_intersects(const unsigned long *src1,
static inline long bitmap_count_one(const unsigned long *bitmap, long nbits)
{
+ if (unlikely(!nbits)) {
+ return 0;
+ }
+
if (small_nbits(nbits)) {
return ctpopl(*bitmap & BITMAP_LAST_WORD_MASK(nbits));
} else {
@@ -228,10 +236,24 @@ static inline long bitmap_count_one(const unsigned long *bitmap, long nbits)
}
}
+static inline long bitmap_count_one_with_offset(const unsigned long *bitmap,
+ long offset, long nbits)
+{
+ long aligned_offset = QEMU_ALIGN_DOWN(offset, BITS_PER_LONG);
+ long redundant_bits = offset - aligned_offset;
+ long bits_to_count = nbits + redundant_bits;
+ const unsigned long *bitmap_start = bitmap +
+ aligned_offset / BITS_PER_LONG;
+
+ return bitmap_count_one(bitmap_start, bits_to_count) -
+ bitmap_count_one(bitmap_start, redundant_bits);
+}
+
void bitmap_set(unsigned long *map, long i, long len);
void bitmap_set_atomic(unsigned long *map, long i, long len);
void bitmap_clear(unsigned long *map, long start, long nr);
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
+bool bitmap_test_and_clear(unsigned long *map, long start, long nr);
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
long nr);
unsigned long bitmap_find_next_zero_area(unsigned long *map,
@@ -254,4 +276,9 @@ void bitmap_to_le(unsigned long *dst, const unsigned long *src,
void bitmap_from_le(unsigned long *dst, const unsigned long *src,
long nbits);
+void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src,
+ unsigned long offset, unsigned long nbits);
+void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src,
+ unsigned long shift, unsigned long nbits);
+
#endif /* BITMAP_H */
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index 3f0926cf40..2c0a2fe751 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -20,6 +20,7 @@
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
@@ -50,7 +51,7 @@ static inline void set_bit_atomic(long nr, unsigned long *addr)
unsigned long mask = BIT_MASK(nr);
unsigned long *p = addr + BIT_WORD(nr);
- atomic_or(p, mask);
+ qatomic_or(p, mask);
}
/**
@@ -67,6 +68,19 @@ static inline void clear_bit(long nr, unsigned long *addr)
}
/**
+ * clear_bit_atomic - Clears a bit in memory atomically
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+static inline void clear_bit_atomic(long nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+
+ return qatomic_and(p, ~mask);
+}
+
+/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
@@ -139,7 +153,8 @@ static inline int test_bit(long nr, const unsigned long *addr)
* @addr: The address to start the search at
* @size: The maximum size to search
*
- * Returns the bit number of the first set bit, or size.
+ * Returns the bit number of the last set bit,
+ * or @size if there is no set bit in the bitmap.
*/
unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
@@ -149,6 +164,9 @@ unsigned long find_last_bit(const unsigned long *addr,
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next set bit,
+ * or @size if there are no further set bits in the bitmap.
*/
unsigned long find_next_bit(const unsigned long *addr,
unsigned long size,
@@ -159,6 +177,9 @@ unsigned long find_next_bit(const unsigned long *addr,
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next cleared bit,
+ * or @size if there are no further clear bits in the bitmap.
*/
unsigned long find_next_zero_bit(const unsigned long *addr,
@@ -170,7 +191,8 @@ unsigned long find_next_zero_bit(const unsigned long *addr,
* @addr: The address to start the search at
* @size: The maximum size to search
*
- * Returns the bit number of the first set bit.
+ * Returns the bit number of the first set bit,
+ * or @size if there is no set bit in the bitmap.
*/
static inline unsigned long find_first_bit(const unsigned long *addr,
unsigned long size)
@@ -193,7 +215,8 @@ static inline unsigned long find_first_bit(const unsigned long *addr,
* @addr: The address to start the search at
* @size: The maximum size to search
*
- * Returns the bit number of the first cleared bit.
+ * Returns the bit number of the first cleared bit,
+ * or @size if there is no clear bit in the bitmap.
*/
static inline unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size)
@@ -208,7 +231,7 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr,
*/
static inline uint8_t rol8(uint8_t word, unsigned int shift)
{
- return (word << shift) | (word >> ((8 - shift) & 7));
+ return (word << (shift & 7)) | (word >> (-shift & 7));
}
/**
@@ -218,7 +241,7 @@ static inline uint8_t rol8(uint8_t word, unsigned int shift)
*/
static inline uint8_t ror8(uint8_t word, unsigned int shift)
{
- return (word >> shift) | (word << ((8 - shift) & 7));
+ return (word >> (shift & 7)) | (word << (-shift & 7));
}
/**
@@ -228,7 +251,7 @@ static inline uint8_t ror8(uint8_t word, unsigned int shift)
*/
static inline uint16_t rol16(uint16_t word, unsigned int shift)
{
- return (word << shift) | (word >> ((16 - shift) & 15));
+ return (word << (shift & 15)) | (word >> (-shift & 15));
}
/**
@@ -238,7 +261,7 @@ static inline uint16_t rol16(uint16_t word, unsigned int shift)
*/
static inline uint16_t ror16(uint16_t word, unsigned int shift)
{
- return (word >> shift) | (word << ((16 - shift) & 15));
+ return (word >> (shift & 15)) | (word << (-shift & 15));
}
/**
@@ -248,7 +271,7 @@ static inline uint16_t ror16(uint16_t word, unsigned int shift)
*/
static inline uint32_t rol32(uint32_t word, unsigned int shift)
{
- return (word << shift) | (word >> ((32 - shift) & 31));
+ return (word << (shift & 31)) | (word >> (-shift & 31));
}
/**
@@ -258,7 +281,7 @@ static inline uint32_t rol32(uint32_t word, unsigned int shift)
*/
static inline uint32_t ror32(uint32_t word, unsigned int shift)
{
- return (word >> shift) | (word << ((32 - shift) & 31));
+ return (word >> (shift & 31)) | (word << (-shift & 31));
}
/**
@@ -268,7 +291,7 @@ static inline uint32_t ror32(uint32_t word, unsigned int shift)
*/
static inline uint64_t rol64(uint64_t word, unsigned int shift)
{
- return (word << shift) | (word >> ((64 - shift) & 63));
+ return (word << (shift & 63)) | (word >> (-shift & 63));
}
/**
@@ -278,7 +301,36 @@ static inline uint64_t rol64(uint64_t word, unsigned int shift)
*/
static inline uint64_t ror64(uint64_t word, unsigned int shift)
{
- return (word >> shift) | (word << ((64 - shift) & 63));
+ return (word >> (shift & 63)) | (word << (-shift & 63));
+}
+
+/**
+ * hswap32 - swap 16-bit halfwords within a 32-bit value
+ * @h: value to swap
+ */
+static inline uint32_t hswap32(uint32_t h)
+{
+ return rol32(h, 16);
+}
+
+/**
+ * hswap64 - swap 16-bit halfwords within a 64-bit value
+ * @h: value to swap
+ */
+static inline uint64_t hswap64(uint64_t h)
+{
+ uint64_t m = 0x0000ffff0000ffffull;
+ h = rol64(h, 32);
+ return ((h & m) << 16) | ((h >> 16) & m);
+}
+
+/**
+ * wswap64 - swap 32-bit words within a 64-bit value
+ * @h: value to swap
+ */
+static inline uint64_t wswap64(uint64_t h)
+{
+ return rol64(h, 32);
}
/**
@@ -301,6 +353,44 @@ static inline uint32_t extract32(uint32_t value, int start, int length)
}
/**
+ * extract8:
+ * @value: the value to extract the bit field from
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ *
+ * Extract from the 8 bit input @value the bit field specified by the
+ * @start and @length parameters, and return it. The bit field must
+ * lie entirely within the 8 bit word. It is valid to request that
+ * all 8 bits are returned (ie @length 8 and @start 0).
+ *
+ * Returns: the value of the bit field extracted from the input value.
+ */
+static inline uint8_t extract8(uint8_t value, int start, int length)
+{
+ assert(start >= 0 && length > 0 && length <= 8 - start);
+ return extract32(value, start, length);
+}
+
+/**
+ * extract16:
+ * @value: the value to extract the bit field from
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ *
+ * Extract from the 16 bit input @value the bit field specified by the
+ * @start and @length parameters, and return it. The bit field must
+ * lie entirely within the 16 bit word. It is valid to request that
+ * all 16 bits are returned (ie @length 16 and @start 0).
+ *
+ * Returns: the value of the bit field extracted from the input value.
+ */
+static inline uint16_t extract16(uint16_t value, int start, int length)
+{
+ assert(start >= 0 && length > 0 && length <= 16 - start);
+ return extract32(value, start, length);
+}
+
+/**
* extract64:
* @value: the value to extract the bit field from
* @start: the lowest bit in the bit field (numbered from 0)
@@ -423,13 +513,16 @@ static inline uint64_t deposit64(uint64_t value, int start, int length,
/**
* half_shuffle32:
- * @value: 32-bit value (of which only the bottom 16 bits are of interest)
+ * @x: 32-bit value (of which only the bottom 16 bits are of interest)
+ *
+ * Given an input value::
+ *
+ * xxxx xxxx xxxx xxxx ABCD EFGH IJKL MNOP
*
- * Given an input value:
- * xxxx xxxx xxxx xxxx ABCD EFGH IJKL MNOP
* return the value where the bottom 16 bits are spread out into
- * the odd bits in the word, and the even bits are zeroed:
- * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N 0O0P
+ * the odd bits in the word, and the even bits are zeroed::
+ *
+ * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N 0O0P
*
* Any bits set in the top half of the input are ignored.
*
@@ -449,13 +542,16 @@ static inline uint32_t half_shuffle32(uint32_t x)
/**
* half_shuffle64:
- * @value: 64-bit value (of which only the bottom 32 bits are of interest)
+ * @x: 64-bit value (of which only the bottom 32 bits are of interest)
+ *
+ * Given an input value::
+ *
+ * xxxx xxxx xxxx .... xxxx xxxx ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
*
- * Given an input value:
- * xxxx xxxx xxxx .... xxxx xxxx ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
* return the value where the bottom 32 bits are spread out into
- * the odd bits in the word, and the even bits are zeroed:
- * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N .... 0U0V 0W0X 0Y0Z 0a0b 0c0d 0e0f
+ * the odd bits in the word, and the even bits are zeroed::
+ *
+ * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N .... 0U0V 0W0X 0Y0Z 0a0b 0c0d 0e0f
*
* Any bits set in the top half of the input are ignored.
*
@@ -476,13 +572,16 @@ static inline uint64_t half_shuffle64(uint64_t x)
/**
* half_unshuffle32:
- * @value: 32-bit value (of which only the odd bits are of interest)
+ * @x: 32-bit value (of which only the odd bits are of interest)
+ *
+ * Given an input value::
+ *
+ * xAxB xCxD xExF xGxH xIxJ xKxL xMxN xOxP
*
- * Given an input value:
- * xAxB xCxD xExF xGxH xIxJ xKxL xMxN xOxP
* return the value where all the odd bits are compressed down
- * into the low half of the word, and the high half is zeroed:
- * 0000 0000 0000 0000 ABCD EFGH IJKL MNOP
+ * into the low half of the word, and the high half is zeroed::
+ *
+ * 0000 0000 0000 0000 ABCD EFGH IJKL MNOP
*
* Any even bits set in the input are ignored.
*
@@ -503,13 +602,16 @@ static inline uint32_t half_unshuffle32(uint32_t x)
/**
* half_unshuffle64:
- * @value: 64-bit value (of which only the odd bits are of interest)
+ * @x: 64-bit value (of which only the odd bits are of interest)
+ *
+ * Given an input value::
+ *
+ * xAxB xCxD xExF xGxH xIxJ xKxL xMxN .... xUxV xWxX xYxZ xaxb xcxd xexf
*
- * Given an input value:
- * xAxB xCxD xExF xGxH xIxJ xKxL xMxN .... xUxV xWxX xYxZ xaxb xcxd xexf
* return the value where all the odd bits are compressed down
- * into the low half of the word, and the high half is zeroed:
- * 0000 0000 0000 .... 0000 0000 ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
+ * into the low half of the word, and the high half is zeroed::
+ *
+ * 0000 0000 0000 .... 0000 0000 ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
*
* Any even bits set in the input are ignored.
*
diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
index a684c1a7a2..bd67468e5e 100644
--- a/include/qemu/bswap.h
+++ b/include/qemu/bswap.h
@@ -1,83 +1,54 @@
#ifndef BSWAP_H
#define BSWAP_H
-#include "fpu/softfloat-types.h"
+#undef bswap16
+#define bswap16(_x) __builtin_bswap16(_x)
+#undef bswap32
+#define bswap32(_x) __builtin_bswap32(_x)
+#undef bswap64
+#define bswap64(_x) __builtin_bswap64(_x)
-#ifdef CONFIG_MACHINE_BSWAP_H
-# include <sys/endian.h>
-# include <machine/bswap.h>
-#elif defined(__FreeBSD__)
-# include <sys/endian.h>
-#elif defined(CONFIG_BYTESWAP_H)
-# include <byteswap.h>
-
-static inline uint16_t bswap16(uint16_t x)
-{
- return bswap_16(x);
-}
-
-static inline uint32_t bswap32(uint32_t x)
+static inline uint32_t bswap24(uint32_t x)
{
- return bswap_32(x);
+ return (((x & 0x000000ffU) << 16) |
+ ((x & 0x0000ff00U) << 0) |
+ ((x & 0x00ff0000U) >> 16));
}
-static inline uint64_t bswap64(uint64_t x)
-{
- return bswap_64(x);
-}
-# else
-static inline uint16_t bswap16(uint16_t x)
-{
- return (((x & 0x00ff) << 8) |
- ((x & 0xff00) >> 8));
-}
-
-static inline uint32_t bswap32(uint32_t x)
-{
- return (((x & 0x000000ffU) << 24) |
- ((x & 0x0000ff00U) << 8) |
- ((x & 0x00ff0000U) >> 8) |
- ((x & 0xff000000U) >> 24));
-}
-
-static inline uint64_t bswap64(uint64_t x)
+static inline void bswap16s(uint16_t *s)
{
- return (((x & 0x00000000000000ffULL) << 56) |
- ((x & 0x000000000000ff00ULL) << 40) |
- ((x & 0x0000000000ff0000ULL) << 24) |
- ((x & 0x00000000ff000000ULL) << 8) |
- ((x & 0x000000ff00000000ULL) >> 8) |
- ((x & 0x0000ff0000000000ULL) >> 24) |
- ((x & 0x00ff000000000000ULL) >> 40) |
- ((x & 0xff00000000000000ULL) >> 56));
+ *s = __builtin_bswap16(*s);
}
-#endif /* ! CONFIG_MACHINE_BSWAP_H */
-static inline void bswap16s(uint16_t *s)
+static inline void bswap24s(uint32_t *s)
{
- *s = bswap16(*s);
+ *s = bswap24(*s & 0x00ffffffU);
}
static inline void bswap32s(uint32_t *s)
{
- *s = bswap32(*s);
+ *s = __builtin_bswap32(*s);
}
static inline void bswap64s(uint64_t *s)
{
- *s = bswap64(*s);
+ *s = __builtin_bswap64(*s);
}
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define be_bswap(v, size) (v)
-#define le_bswap(v, size) glue(bswap, size)(v)
+#define le_bswap(v, size) glue(__builtin_bswap, size)(v)
+#define le_bswap24(v) bswap24(v)
#define be_bswaps(v, size)
-#define le_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
+#define le_bswaps(p, size) \
+ do { *p = glue(__builtin_bswap, size)(*p); } while (0)
#else
#define le_bswap(v, size) (v)
-#define be_bswap(v, size) glue(bswap, size)(v)
+#define le_bswap24(v) (v)
+#define be_bswap(v, size) glue(__builtin_bswap, size)(v)
#define le_bswaps(v, size)
-#define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
+#define be_bswaps(p, size) \
+ do { *p = glue(__builtin_bswap, size)(*p); } while (0)
#endif
/**
@@ -167,18 +138,21 @@ CPU_CONVERT(le, 16, uint16_t)
CPU_CONVERT(le, 32, uint32_t)
CPU_CONVERT(le, 64, uint64_t)
-/* len must be one of 1, 2, 4 */
-static inline uint32_t qemu_bswap_len(uint32_t value, int len)
-{
- return bswap32(value) >> (32 - 8 * len);
-}
-
/*
- * Same as cpu_to_le{16,32}, except that gcc will figure the result is
+ * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
* a compile-time constant if you pass in a constant. So this can be
* used to initialize static variables.
*/
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
+# define const_le64(_x) \
+ ((((_x) & 0x00000000000000ffULL) << 56) | \
+ (((_x) & 0x000000000000ff00ULL) << 40) | \
+ (((_x) & 0x0000000000ff0000ULL) << 24) | \
+ (((_x) & 0x00000000ff000000ULL) << 8) | \
+ (((_x) & 0x000000ff00000000ULL) >> 8) | \
+ (((_x) & 0x0000ff0000000000ULL) >> 24) | \
+ (((_x) & 0x00ff000000000000ULL) >> 40) | \
+ (((_x) & 0xff00000000000000ULL) >> 56))
# define const_le32(_x) \
((((_x) & 0x000000ffU) << 24) | \
(((_x) & 0x0000ff00U) << 8) | \
@@ -188,76 +162,19 @@ static inline uint32_t qemu_bswap_len(uint32_t value, int len)
((((_x) & 0x00ff) << 8) | \
(((_x) & 0xff00) >> 8))
#else
+# define const_le64(_x) (_x)
# define const_le32(_x) (_x)
# define const_le16(_x) (_x)
#endif
-/* Unions for reinterpreting between floats and integers. */
-
-typedef union {
- float32 f;
- uint32_t l;
-} CPU_FloatU;
-
-typedef union {
- float64 d;
-#if defined(HOST_WORDS_BIGENDIAN)
- struct {
- uint32_t upper;
- uint32_t lower;
- } l;
-#else
- struct {
- uint32_t lower;
- uint32_t upper;
- } l;
-#endif
- uint64_t ll;
-} CPU_DoubleU;
-
-typedef union {
- floatx80 d;
- struct {
- uint64_t lower;
- uint16_t upper;
- } l;
-} CPU_LDoubleU;
-
-typedef union {
- float128 q;
-#if defined(HOST_WORDS_BIGENDIAN)
- struct {
- uint32_t upmost;
- uint32_t upper;
- uint32_t lower;
- uint32_t lowest;
- } l;
- struct {
- uint64_t upper;
- uint64_t lower;
- } ll;
-#else
- struct {
- uint32_t lowest;
- uint32_t lower;
- uint32_t upper;
- uint32_t upmost;
- } l;
- struct {
- uint64_t lower;
- uint64_t upper;
- } ll;
-#endif
-} CPU_QuadU;
-
/* unaligned/endian-independent pointer access */
/*
* the generic syntax is:
*
- * load: ld{type}{sign}{size}{endian}_p(ptr)
+ * load: ld{type}{sign}{size}_{endian}_p(ptr)
*
- * store: st{type}{size}{endian}_p(ptr, val)
+ * store: st{type}{size}_{endian}_p(ptr, val)
*
* Note there are small differences with the softmmu access API!
*
@@ -273,6 +190,7 @@ typedef union {
* size is:
* b: 8 bits
* w: 16 bits
+ * 24: 24 bits
* l: 32 bits
* q: 64 bits
*
@@ -293,10 +211,10 @@ typedef union {
*
* For cases where the size to be used is not fixed at compile time,
* there are
- * stn{endian}_p(ptr, sz, val)
+ * stn_{endian}_p(ptr, sz, val)
* which stores @val to @ptr as an @endian-order number @sz bytes in size
* and
- * ldn{endian}_p(ptr, sz)
+ * ldn_{endian}_p(ptr, sz)
* which loads @sz bytes from @ptr as an unsigned @endian-order number
* and returns it in a uint64_t.
*/
@@ -316,51 +234,62 @@ static inline void stb_p(void *ptr, uint8_t v)
*(uint8_t *)ptr = v;
}
-/* Any compiler worth its salt will turn these memcpy into native unaligned
- operations. Thus we don't need to play games with packed attributes, or
- inline byte-by-byte stores. */
+/*
+ * Any compiler worth its salt will turn these memcpy into native unaligned
+ * operations. Thus we don't need to play games with packed attributes, or
+ * inline byte-by-byte stores.
+ * Some compilation environments (eg some fortify-source implementations)
+ * may intercept memcpy() in a way that defeats the compiler optimization,
+ * though, so we use __builtin_memcpy() to give ourselves the best chance
+ * of good performance.
+ */
static inline int lduw_he_p(const void *ptr)
{
uint16_t r;
- memcpy(&r, ptr, sizeof(r));
+ __builtin_memcpy(&r, ptr, sizeof(r));
return r;
}
static inline int ldsw_he_p(const void *ptr)
{
int16_t r;
- memcpy(&r, ptr, sizeof(r));
+ __builtin_memcpy(&r, ptr, sizeof(r));
return r;
}
static inline void stw_he_p(void *ptr, uint16_t v)
{
- memcpy(ptr, &v, sizeof(v));
+ __builtin_memcpy(ptr, &v, sizeof(v));
+}
+
+static inline void st24_he_p(void *ptr, uint32_t v)
+{
+ __builtin_memcpy(ptr, &v, 3);
}
static inline int ldl_he_p(const void *ptr)
{
int32_t r;
- memcpy(&r, ptr, sizeof(r));
+ __builtin_memcpy(&r, ptr, sizeof(r));
return r;
}
static inline void stl_he_p(void *ptr, uint32_t v)
{
- memcpy(ptr, &v, sizeof(v));
+ __builtin_memcpy(ptr, &v, sizeof(v));
}
static inline uint64_t ldq_he_p(const void *ptr)
{
uint64_t r;
- memcpy(&r, ptr, sizeof(r));
+ __builtin_memcpy(&r, ptr, sizeof(r));
return r;
}
static inline void stq_he_p(void *ptr, uint64_t v)
{
- memcpy(ptr, &v, sizeof(v));
+ __builtin_memcpy(ptr, &v, sizeof(v));
}
static inline int lduw_le_p(const void *ptr)
@@ -388,6 +317,11 @@ static inline void stw_le_p(void *ptr, uint16_t v)
stw_he_p(ptr, le_bswap(v, 16));
}
+static inline void st24_le_p(void *ptr, uint32_t v)
+{
+ st24_he_p(ptr, le_bswap24(v));
+}
+
static inline void stl_le_p(void *ptr, uint32_t v)
{
stl_he_p(ptr, le_bswap(v, 32));
@@ -398,36 +332,6 @@ static inline void stq_le_p(void *ptr, uint64_t v)
stq_he_p(ptr, le_bswap(v, 64));
}
-/* float access */
-
-static inline float32 ldfl_le_p(const void *ptr)
-{
- CPU_FloatU u;
- u.l = ldl_le_p(ptr);
- return u.f;
-}
-
-static inline void stfl_le_p(void *ptr, float32 v)
-{
- CPU_FloatU u;
- u.f = v;
- stl_le_p(ptr, u.l);
-}
-
-static inline float64 ldfq_le_p(const void *ptr)
-{
- CPU_DoubleU u;
- u.ll = ldq_le_p(ptr);
- return u.d;
-}
-
-static inline void stfq_le_p(void *ptr, float64 v)
-{
- CPU_DoubleU u;
- u.d = v;
- stq_le_p(ptr, u.ll);
-}
-
static inline int lduw_be_p(const void *ptr)
{
return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
@@ -463,36 +367,6 @@ static inline void stq_be_p(void *ptr, uint64_t v)
stq_he_p(ptr, be_bswap(v, 64));
}
-/* float access */
-
-static inline float32 ldfl_be_p(const void *ptr)
-{
- CPU_FloatU u;
- u.l = ldl_be_p(ptr);
- return u.f;
-}
-
-static inline void stfl_be_p(void *ptr, float32 v)
-{
- CPU_FloatU u;
- u.f = v;
- stl_be_p(ptr, u.l);
-}
-
-static inline float64 ldfq_be_p(const void *ptr)
-{
- CPU_DoubleU u;
- u.ll = ldq_be_p(ptr);
- return u.d;
-}
-
-static inline void stfq_be_p(void *ptr, float64 v)
-{
- CPU_DoubleU u;
- u.d = v;
- stq_be_p(ptr, u.ll);
-}
-
static inline unsigned long leul_to_cpu(unsigned long v)
{
#if HOST_LONG_BITS == 32
diff --git a/include/qemu/buffer.h b/include/qemu/buffer.h
index b2ead1f051..e95dfd696c 100644
--- a/include/qemu/buffer.h
+++ b/include/qemu/buffer.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,7 +21,6 @@
#ifndef QEMU_BUFFER_H
#define QEMU_BUFFER_H
-#include "qemu-common.h"
typedef struct Buffer Buffer;
@@ -50,7 +49,7 @@ struct Buffer {
* to identify in debug traces.
*/
void buffer_init(Buffer *buffer, const char *name, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
/**
* buffer_shrink:
diff --git a/include/qemu/cacheflush.h b/include/qemu/cacheflush.h
new file mode 100644
index 0000000000..ae20bcda73
--- /dev/null
+++ b/include/qemu/cacheflush.h
@@ -0,0 +1,35 @@
+/*
+ * Flush the host cpu caches.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_CACHEFLUSH_H
+#define QEMU_CACHEFLUSH_H
+
+/**
+ * flush_idcache_range:
+ * @rx: instruction address
+ * @rw: data address
+ * @len: length to flush
+ *
+ * Flush @len bytes of the data cache at @rw and the icache at @rx
+ * to bring them in sync. The two addresses may be different virtual
+ * mappings of the same physical page(s).
+ */
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
+
+static inline void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
+{
+ /* icache is coherent and does not require flushing. */
+}
+
+#else
+
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len);
+
+#endif
+
+#endif /* QEMU_CACHEFLUSH_H */
diff --git a/include/qemu/cacheinfo.h b/include/qemu/cacheinfo.h
new file mode 100644
index 0000000000..019a157ea0
--- /dev/null
+++ b/include/qemu/cacheinfo.h
@@ -0,0 +1,21 @@
+/*
+ * QEMU host cacheinfo information
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_CACHEINFO_H
+#define QEMU_CACHEINFO_H
+
+/*
+ * These variables represent our best guess at the host icache and
+ * dcache sizes, expressed both as the size in bytes and as the
+ * base-2 log of the size in bytes. They are initialized at startup
+ * (via an attribute 'constructor' function).
+ */
+extern int qemu_icache_linesize;
+extern int qemu_icache_linesize_log;
+extern int qemu_dcache_linesize;
+extern int qemu_dcache_linesize_log;
+
+#endif
diff --git a/include/qemu/chardev_open.h b/include/qemu/chardev_open.h
new file mode 100644
index 0000000000..64e8fcfdcb
--- /dev/null
+++ b/include/qemu/chardev_open.h
@@ -0,0 +1,16 @@
+/*
+ * QEMU Chardev Helper
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_CHARDEV_OPEN_H
+#define QEMU_CHARDEV_OPEN_H
+
+int open_cdev(const char *devpath, dev_t cdev);
+#endif
diff --git a/include/qemu/clang-tsa.h b/include/qemu/clang-tsa.h
new file mode 100644
index 0000000000..ba06fb8c92
--- /dev/null
+++ b/include/qemu/clang-tsa.h
@@ -0,0 +1,114 @@
+#ifndef CLANG_TSA_H
+#define CLANG_TSA_H
+
+/*
+ * Copyright 2018 Jarkko Hietaniemi <jhi@iki.fi>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without
+ * limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+ *
+ * TSA is available since clang 3.6-ish.
+ */
+#ifdef __clang__
+# define TSA(x) __attribute__((x))
+#else
+# define TSA(x) /* No TSA, make TSA attributes no-ops. */
+#endif
+
+/* TSA_CAPABILITY() is used to annotate typedefs:
+ *
+ * typedef pthread_mutex_t TSA_CAPABILITY("mutex") tsa_mutex;
+ */
+#define TSA_CAPABILITY(x) TSA(capability(x))
+
+/* TSA_GUARDED_BY() is used to annotate global variables,
+ * the data is guarded:
+ *
+ * Foo foo TSA_GUARDED_BY(mutex);
+ */
+#define TSA_GUARDED_BY(x) TSA(guarded_by(x))
+
+/* TSA_PT_GUARDED_BY() is used to annotate global pointers, the data
+ * behind the pointer is guarded.
+ *
+ * Foo* ptr TSA_PT_GUARDED_BY(mutex);
+ */
+#define TSA_PT_GUARDED_BY(x) TSA(pt_guarded_by(x))
+
+/* The TSA_REQUIRES() is used to annotate functions: the caller of the
+ * function MUST hold the resource, the function will NOT release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_REQUIRES(mutex);
+ */
+#define TSA_REQUIRES(...) TSA(requires_capability(__VA_ARGS__))
+#define TSA_REQUIRES_SHARED(...) TSA(requires_shared_capability(__VA_ARGS__))
+
+/* TSA_EXCLUDES() is used to annotate functions: the caller of the
+ * function MUST NOT hold resource, the function first acquires the
+ * resource, and then releases it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_EXCLUDES(mutex);
+ */
+#define TSA_EXCLUDES(...) TSA(locks_excluded(__VA_ARGS__))
+
+/* TSA_ACQUIRE() is used to annotate functions: the caller of the
+ * function MUST NOT hold the resource, the function will acquire the
+ * resource, but NOT release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_ACQUIRE(mutex);
+ */
+#define TSA_ACQUIRE(...) TSA(acquire_capability(__VA_ARGS__))
+#define TSA_ACQUIRE_SHARED(...) TSA(acquire_shared_capability(__VA_ARGS__))
+
+/* TSA_RELEASE() is used to annotate functions: the caller of the
+ * function MUST hold the resource, but the function will then release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_RELEASE(mutex);
+ */
+#define TSA_RELEASE(...) TSA(release_capability(__VA_ARGS__))
+#define TSA_RELEASE_SHARED(...) TSA(release_shared_capability(__VA_ARGS__))
+
+/* TSA_NO_TSA is used to annotate functions. Use only when you need to.
+ *
+ * void Foo(void) TSA_NO_TSA;
+ */
+#define TSA_NO_TSA TSA(no_thread_safety_analysis)
+
+/*
+ * TSA_ASSERT() is used to annotate functions: This function will assert that
+ * the lock is held. When it returns, the caller of the function is assumed to
+ * already hold the resource.
+ *
+ * More than one mutex may be specified, comma-separated.
+ */
+#define TSA_ASSERT(...) TSA(assert_capability(__VA_ARGS__))
+#define TSA_ASSERT_SHARED(...) TSA(assert_shared_capability(__VA_ARGS__))
+
+#endif /* #ifndef CLANG_TSA_H */
diff --git a/include/qemu/co-shared-resource.h b/include/qemu/co-shared-resource.h
new file mode 100644
index 0000000000..78ca5850f8
--- /dev/null
+++ b/include/qemu/co-shared-resource.h
@@ -0,0 +1,69 @@
+/*
+ * Helper functionality for distributing a fixed total amount of
+ * an abstract resource among multiple coroutines.
+ *
+ * Copyright (c) 2019 Virtuozzo International GmbH
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_CO_SHARED_RESOURCE_H
+#define QEMU_CO_SHARED_RESOURCE_H
+
+/* Accesses to co-shared-resource API are thread-safe */
+typedef struct SharedResource SharedResource;
+
+/*
+ * Create SharedResource structure
+ *
+ * @total: total amount of some resource to be shared between clients
+ */
+SharedResource *shres_create(uint64_t total);
+
+/*
+ * Release SharedResource structure
+ *
+ * This function may only be called once everything allocated by all
+ * clients has been deallocated.
+ */
+void shres_destroy(SharedResource *s);
+
+/*
+ * Try to allocate an amount of @n. Return true on success, and false
+ * if there is too little left of the collective resource to fulfill
+ * the request.
+ */
+bool co_try_get_from_shres(SharedResource *s, uint64_t n);
+
+/*
+ * Allocate an amount of @n, and, if necessary, yield until
+ * that becomes possible.
+ */
+void coroutine_fn co_get_from_shres(SharedResource *s, uint64_t n);
+
+/*
+ * Deallocate an amount of @n. The total amount allocated by a caller
+ * does not need to be deallocated/released with a single call, but may
+ * be split over several calls. For example, get(4), get(3), and then
+ * put(5), put(2).
+ */
+void coroutine_fn co_put_to_shres(SharedResource *s, uint64_t n);
+
+
+#endif /* QEMU_CO_SHARED_RESOURCE_H */
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
index 261842beae..c797f0d457 100644
--- a/include/qemu/compiler.h
+++ b/include/qemu/compiler.h
@@ -7,28 +7,22 @@
#ifndef COMPILER_H
#define COMPILER_H
+#define HOST_BIG_ENDIAN (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+
+/* HOST_LONG_BITS is the size of a native pointer in bits. */
+#define HOST_LONG_BITS (__SIZEOF_POINTER__ * 8)
+
#if defined __clang_analyzer__ || defined __COVERITY__
#define QEMU_STATIC_ANALYSIS 1
#endif
-/*----------------------------------------------------------------------------
-| The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler.
-| The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h.
-*----------------------------------------------------------------------------*/
-#if defined(__GNUC__) && defined(__GNUC_MINOR__)
-# define QEMU_GNUC_PREREQ(maj, min) \
- ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
+#ifdef __cplusplus
+#define QEMU_EXTERN_C extern "C"
#else
-# define QEMU_GNUC_PREREQ(maj, min) 0
+#define QEMU_EXTERN_C extern
#endif
-#define QEMU_NORETURN __attribute__ ((__noreturn__))
-
-#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
-
-#define QEMU_SENTINEL __attribute__((sentinel))
-
-#if defined(_WIN32)
+#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
# define QEMU_PACKED __attribute__((gcc_struct, packed))
#else
# define QEMU_PACKED __attribute__((packed))
@@ -39,15 +33,14 @@
#ifndef glue
#define xglue(x, y) x ## y
#define glue(x, y) xglue(x, y)
-#define stringify(s) tostring(s)
-#define tostring(s) #s
+#define stringify(s) tostring(s)
+#define tostring(s) #s
#endif
-#ifndef likely
-#if __GNUC__ < 3
-#define __builtin_expect(x, n) (x)
-#endif
+/* Expands into an identifier stemN, where N is another number each time */
+#define MAKE_IDENTFIER(stem) glue(stem, __COUNTER__)
+#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
@@ -60,15 +53,18 @@
#define sizeof_field(type, field) sizeof(((type *)0)->field)
+/*
+ * Calculate the number of bytes up to and including the given 'field' of
+ * 'container'.
+ */
+#define endof(container, field) \
+ (offsetof(container, field) + sizeof_field(container, field))
+
/* Convert from a base type to a parent type, with compile time checking. */
-#ifdef __GNUC__
#define DO_UPCAST(type, field, dev) ( __extension__ ( { \
char __attribute__((unused)) offset_must_be_zero[ \
-offsetof(type, field)]; \
container_of(dev, type, field);}))
-#else
-#define DO_UPCAST(type, field, dev) container_of(dev, type, field)
-#endif
#define typeof_field(type, field) typeof(((type *)0)->field)
#define type_check(t1,t2) ((t1*)0 - (t2*)0)
@@ -78,39 +74,23 @@
int:(x) ? -1 : 1; \
}
-/* QEMU_BUILD_BUG_MSG() emits the message given if _Static_assert is
- * supported; otherwise, it will be omitted from the compiler error
- * message (but as it remains present in the source code, it can still
- * be useful when debugging). */
-#if defined(CONFIG_STATIC_ASSERT)
#define QEMU_BUILD_BUG_MSG(x, msg) _Static_assert(!(x), msg)
-#elif defined(__COUNTER__)
-#define QEMU_BUILD_BUG_MSG(x, msg) typedef QEMU_BUILD_BUG_ON_STRUCT(x) \
- glue(qemu_build_bug_on__, __COUNTER__) __attribute__((unused))
-#else
-#define QEMU_BUILD_BUG_MSG(x, msg)
-#endif
#define QEMU_BUILD_BUG_ON(x) QEMU_BUILD_BUG_MSG(x, "not expecting: " #x)
#define QEMU_BUILD_BUG_ON_ZERO(x) (sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)) - \
sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)))
-#if defined __GNUC__
-# if !QEMU_GNUC_PREREQ(4, 4)
- /* gcc versions before 4.4.x don't support gnu_printf, so use printf. */
-# define GCC_FMT_ATTR(n, m) __attribute__((format(printf, n, m)))
-# else
- /* Use gnu_printf when supported (qemu uses standard format strings). */
-# define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m)))
-# if defined(_WIN32)
- /* Map __printf__ to __gnu_printf__ because we want standard format strings
- * even when MinGW or GLib include files use __printf__. */
-# define __printf__ __gnu_printf__
-# endif
-# endif
-#else
-#define GCC_FMT_ATTR(n, m)
+#if !defined(__clang__) && defined(_WIN32)
+/*
+ * Map __printf__ to __gnu_printf__ because we want standard format strings even
+ * when MinGW or GLib include files use __printf__.
+ */
+# define __printf__ __gnu_printf__
+#endif
+
+#ifndef __has_warning
+#define __has_warning(x) 0 /* compatibility with non-clang compilers */
#endif
#ifndef __has_feature
@@ -129,6 +109,14 @@
#define __has_attribute(x) 0 /* compatibility with older GCC */
#endif
+#if defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer)
+# define QEMU_SANITIZE_ADDRESS 1
+#endif
+
+#if defined(__SANITIZE_THREAD__) || __has_feature(thread_sanitizer)
+# define QEMU_SANITIZE_THREAD 1
+#endif
+
/*
* GCC doesn't provide __has_attribute() until GCC 5, but we know all the GCC
* versions we support have the "flatten" attribute. Clang may not have the
@@ -151,44 +139,92 @@
# define QEMU_ERROR(X)
#endif
-/* Implement C11 _Generic via GCC builtins. Example:
- *
- * QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
- *
- * The first argument is the discriminator. The last is the default value.
- * The middle ones are tuples in "(type, expansion)" format.
+/*
+ * The nonstring variable attribute specifies that an object or member
+ * declaration with type array of char or pointer to char is intended
+ * to store character arrays that do not necessarily contain a terminating
+ * NUL character. This is useful in detecting uses of such arrays or pointers
+ * with functions that expect NUL-terminated strings, and to avoid warnings
+ * when such an array or pointer is used as an argument to a bounded string
+ * manipulation function such as strncpy.
*/
+#if __has_attribute(nonstring)
+# define QEMU_NONSTRING __attribute__((nonstring))
+#else
+# define QEMU_NONSTRING
+#endif
-/* First, find out the number of generic cases. */
-#define QEMU_GENERIC(x, ...) \
- QEMU_GENERIC_(typeof(x), __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+/*
+ * Forced inlining may be desired to encourage constant propagation
+ * of function parameters. However, it can also make debugging harder,
+ * so disable it for a non-optimizing build.
+ */
+#if defined(__OPTIMIZE__)
+#define QEMU_ALWAYS_INLINE __attribute__((always_inline))
+#else
+#define QEMU_ALWAYS_INLINE
+#endif
-/* There will be extra arguments, but they are not used. */
-#define QEMU_GENERIC_(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, count, ...) \
- QEMU_GENERIC##count(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+/**
+ * In most cases, normal "fallthrough" comments are good enough for
+ * switch-case statements, but sometimes the compiler has problems
+ * with those. In that case you can use QEMU_FALLTHROUGH instead.
+ */
+#if __has_attribute(fallthrough)
+# define QEMU_FALLTHROUGH __attribute__((fallthrough))
+#else
+# define QEMU_FALLTHROUGH do {} while (0) /* fallthrough */
+#endif
-/* Two more helper macros, this time to extract items from a parenthesized
- * list.
+#ifdef CONFIG_CFI
+/*
+ * If CFI is enabled, use an attribute to disable cfi-icall on the following
+ * function
*/
-#define QEMU_FIRST_(a, b) a
-#define QEMU_SECOND_(a, b) b
-
-/* ... and a final one for the common part of the "recursion". */
-#define QEMU_GENERIC_IF(x, type_then, else_) \
- __builtin_choose_expr(__builtin_types_compatible_p(x, \
- QEMU_FIRST_ type_then), \
- QEMU_SECOND_ type_then, else_)
-
-/* CPP poor man's "recursion". */
-#define QEMU_GENERIC1(x, a0, ...) (a0)
-#define QEMU_GENERIC2(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC1(x, __VA_ARGS__))
-#define QEMU_GENERIC3(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC2(x, __VA_ARGS__))
-#define QEMU_GENERIC4(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC3(x, __VA_ARGS__))
-#define QEMU_GENERIC5(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC4(x, __VA_ARGS__))
-#define QEMU_GENERIC6(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC5(x, __VA_ARGS__))
-#define QEMU_GENERIC7(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC6(x, __VA_ARGS__))
-#define QEMU_GENERIC8(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC7(x, __VA_ARGS__))
-#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
-#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
+#define QEMU_DISABLE_CFI __attribute__((no_sanitize("cfi-icall")))
+#else
+/* If CFI is not enabled, use an empty define to not change the behavior */
+#define QEMU_DISABLE_CFI
+#endif
+
+/*
+ * Apple clang version 14 has a bug in its __builtin_subcll(); define
+ * BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it.
+ * When a version of Apple clang which has this bug fixed is released
+ * we can add an upper bound to this check.
+ * See https://gitlab.com/qemu-project/qemu/-/issues/1631
+ * and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details.
+ * The bug never made it into any upstream LLVM releases, only Apple ones.
+ */
+#if defined(__apple_build_version__) && __clang_major__ >= 14
+#define BUILTIN_SUBCLL_BROKEN
+#endif
+
+#if __has_attribute(annotate)
+#define QEMU_ANNOTATE(x) __attribute__((annotate(x)))
+#else
+#define QEMU_ANNOTATE(x)
+#endif
+
+#if __has_attribute(used)
+# define QEMU_USED __attribute__((used))
+#else
+# define QEMU_USED
+#endif
+
+/*
+ * Ugly CPP trick that is like "defined FOO", but also works in C
+ * code. Useful to replace #ifdef with "if" statements; assumes
+ * the symbol was defined with Meson's "config.set()", so it is empty
+ * if defined.
+ */
+#define IS_ENABLED(x) IS_EMPTY(x)
+
+#define IS_EMPTY_JUNK_ junk,
+#define IS_EMPTY(value) IS_EMPTY_(IS_EMPTY_JUNK_##value)
+
+/* Expands to either SECOND_ARG(junk, 1, 0) or SECOND_ARG(IS_EMPTY_JUNK_CONFIG_FOO 1, 0) */
+#define SECOND_ARG(first, second, ...) second
+#define IS_EMPTY_(junk_maybecomma) SECOND_ARG(junk_maybecomma 1, 0)
#endif /* COMPILER_H */
diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h
index d74f920152..51b310fa3b 100644
--- a/include/qemu/config-file.h
+++ b/include/qemu/config-file.h
@@ -1,24 +1,31 @@
#ifndef QEMU_CONFIG_FILE_H
#define QEMU_CONFIG_FILE_H
+typedef void QEMUConfigCB(const char *group, QDict *qdict, void *opaque, Error **errp);
+void qemu_load_module_for_opts(const char *group);
QemuOptsList *qemu_find_opts(const char *group);
QemuOptsList *qemu_find_opts_err(const char *group, Error **errp);
QemuOpts *qemu_find_opts_singleton(const char *group);
+extern QemuOptsList *vm_config_groups[];
+extern QemuOptsList *drive_config_groups[];
+
void qemu_add_opts(QemuOptsList *list);
void qemu_add_drive_opts(QemuOptsList *list);
-int qemu_set_option(const char *str);
int qemu_global_option(const char *str);
-void qemu_config_write(FILE *fp);
-int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname);
+int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname,
+ Error **errp);
+
+/* A default callback for qemu_read_config_file(). */
+void qemu_config_do_parse(const char *group, QDict *qdict, void *opaque, Error **errp);
-int qemu_read_config_file(const char *filename);
+int qemu_read_config_file(const char *filename, QEMUConfigCB *f, Error **errp);
/* Parse QDict options as a replacement for a config file (allowing multiple
enumerated (0..(n-1)) configuration "sections") */
-void qemu_config_parse_qdict(QDict *options, QemuOptsList **lists,
+bool qemu_config_parse_qdict(QDict *options, QemuOptsList **lists,
Error **errp);
#endif /* QEMU_CONFIG_FILE_H */
diff --git a/include/qemu/coroutine-core.h b/include/qemu/coroutine-core.h
new file mode 100644
index 0000000000..503bad6e0e
--- /dev/null
+++ b/include/qemu/coroutine-core.h
@@ -0,0 +1,154 @@
+/*
+ * QEMU coroutine implementation
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ * Kevin Wolf <kwolf@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_COROUTINE_CORE_H
+#define QEMU_COROUTINE_CORE_H
+
+/**
+ * Coroutines are a mechanism for stack switching and can be used for
+ * cooperative userspace threading. These functions provide a simple but
+ * useful flavor of coroutines that is suitable for writing sequential code,
+ * rather than callbacks, for operations that need to give up control while
+ * waiting for events to complete.
+ *
+ * These functions are re-entrant and may be used outside the BQL.
+ *
+ * Functions that execute in coroutine context cannot be called
+ * directly from normal functions. Use @coroutine_fn to mark such
+ * functions. For example:
+ *
+ * static void coroutine_fn foo(void) {
+ * ....
+ * }
+ *
+ * In the future it would be nice to have the compiler or a static
+ * checker catch misuse of such functions. This annotation might make
+ * it possible and in the meantime it serves as documentation.
+ */
+
+/**
+ * Mark a function that executes in coroutine context
+ *
+ *
+ * Functions that execute in coroutine context cannot be called
+ * directly from normal functions. Use @coroutine_fn to mark such
+ * functions. For example:
+ *
+ * static void coroutine_fn foo(void) {
+ * ....
+ * }
+ *
+ * In the future it would be nice to have the compiler or a static
+ * checker catch misuse of such functions. This annotation might make
+ * it possible and in the meantime it serves as documentation.
+ */
+
+typedef struct Coroutine Coroutine;
+typedef struct CoMutex CoMutex;
+
+/**
+ * Coroutine entry point
+ *
+ * When the coroutine is entered for the first time, opaque is passed in as an
+ * argument.
+ *
+ * When this function returns, the coroutine is destroyed automatically and
+ * execution continues in the caller who last entered the coroutine.
+ */
+typedef void coroutine_fn CoroutineEntry(void *opaque);
+
+/**
+ * Create a new coroutine
+ *
+ * Use qemu_coroutine_enter() to actually transfer control to the coroutine.
+ * The opaque argument is passed as the argument to the entry point.
+ */
+Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque);
+
+/**
+ * Transfer control to a coroutine
+ */
+void qemu_coroutine_enter(Coroutine *coroutine);
+
+/**
+ * Transfer control to a coroutine if it's not active (i.e. part of the call
+ * stack of the running coroutine). Otherwise, do nothing.
+ */
+void qemu_coroutine_enter_if_inactive(Coroutine *co);
+
+/**
+ * Transfer control to a coroutine and associate it with ctx
+ */
+void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co);
+
+/**
+ * Transfer control back to a coroutine's caller
+ *
+ * This function does not return until the coroutine is re-entered using
+ * qemu_coroutine_enter().
+ */
+void coroutine_fn qemu_coroutine_yield(void);
+
+/**
+ * Get the AioContext of the given coroutine
+ */
+AioContext *qemu_coroutine_get_aio_context(Coroutine *co);
+
+/**
+ * Get the currently executing coroutine
+ */
+Coroutine *qemu_coroutine_self(void);
+
+/**
+ * Return whether or not currently inside a coroutine
+ *
+ * This can be used to write functions that work both when in coroutine context
+ * and when not in coroutine context. Note that such functions cannot use the
+ * coroutine_fn annotation since they work outside coroutine context.
+ */
+bool qemu_in_coroutine(void);
+
+/**
+ * Return true if the coroutine is currently entered
+ *
+ * A coroutine is "entered" if it has not yielded from the current
+ * qemu_coroutine_enter() call used to run it. This does not mean that the
+ * coroutine is currently executing code since it may have transferred control
+ * to another coroutine using qemu_coroutine_enter().
+ *
+ * When several coroutines enter each other there may be no way to know which
+ * ones have already been entered. In such situations this function can be
+ * used to avoid recursively entering coroutines.
+ */
+bool qemu_coroutine_entered(Coroutine *co);
+
+/**
+ * Initialises a CoMutex. This must be called before any other operation is used
+ * on the CoMutex.
+ */
+void qemu_co_mutex_init(CoMutex *mutex);
+
+/**
+ * Locks the mutex. If the lock cannot be taken immediately, control is
+ * transferred to the caller of the current coroutine.
+ */
+void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex);
+
+/**
+ * Unlocks the mutex and schedules the next coroutine that was waiting for this
+ * lock to be run.
+ */
+void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
+
+#endif
diff --git a/include/qemu/coroutine-tls.h b/include/qemu/coroutine-tls.h
new file mode 100644
index 0000000000..1558a826aa
--- /dev/null
+++ b/include/qemu/coroutine-tls.h
@@ -0,0 +1,165 @@
+/*
+ * QEMU Thread Local Storage for coroutines
+ *
+ * Copyright Red Hat
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ * It is forbidden to access Thread Local Storage in coroutines because
+ * compiler optimizations may cause values to be cached across coroutine
+ * re-entry. Coroutines can run in more than one thread through the course of
+ * their life, leading bugs when stale TLS values from the wrong thread are
+ * used as a result of compiler optimization.
+ *
+ * An example is:
+ *
+ * ..code-block:: c
+ * :caption: A coroutine that may see the wrong TLS value
+ *
+ * static __thread AioContext *current_aio_context;
+ * ...
+ * static void coroutine_fn foo(void)
+ * {
+ * aio_notify(current_aio_context);
+ * qemu_coroutine_yield();
+ * aio_notify(current_aio_context); // <-- may be stale after yielding!
+ * }
+ *
+ * This header provides macros for safely defining variables in Thread Local
+ * Storage:
+ *
+ * ..code-block:: c
+ * :caption: A coroutine that safely uses TLS
+ *
+ * QEMU_DEFINE_STATIC_CO_TLS(AioContext *, current_aio_context)
+ * ...
+ * static void coroutine_fn foo(void)
+ * {
+ * aio_notify(get_current_aio_context());
+ * qemu_coroutine_yield();
+ * aio_notify(get_current_aio_context()); // <-- safe
+ * }
+ */
+
+#ifndef QEMU_COROUTINE_TLS_H
+#define QEMU_COROUTINE_TLS_H
+
+/*
+ * To stop the compiler from caching TLS values we define accessor functions
+ * with __attribute__((noinline)) plus asm volatile("") to prevent
+ * optimizations that override noinline.
+ *
+ * The compiler can still analyze noinline code and make optimizations based on
+ * that knowledge, so an inline asm output operand is used to prevent
+ * optimizations that make assumptions about the address of the TLS variable.
+ *
+ * This is fragile and ultimately needs to be solved by a mechanism that is
+ * guaranteed to work by the compiler (e.g. stackless coroutines), but for now
+ * we use this approach to prevent issues.
+ */
+
+/**
+ * QEMU_DECLARE_CO_TLS:
+ * @type: the variable's C type
+ * @var: the variable name
+ *
+ * Declare an extern variable in Thread Local Storage from a header file:
+ *
+ * .. code-block:: c
+ * :caption: Declaring an extern variable in Thread Local Storage
+ *
+ * QEMU_DECLARE_CO_TLS(int, my_count)
+ * ...
+ * int c = get_my_count();
+ * set_my_count(c + 1);
+ * *get_ptr_my_count() = 0;
+ *
+ * This is a coroutine-safe replacement for the __thread keyword and is
+ * equivalent to the following code:
+ *
+ * .. code-block:: c
+ * :caption: Declaring a TLS variable using __thread
+ *
+ * extern __thread int my_count;
+ * ...
+ * int c = my_count;
+ * my_count = c + 1;
+ * *(&my_count) = 0;
+ */
+#define QEMU_DECLARE_CO_TLS(type, var) \
+ __attribute__((noinline)) type get_##var(void); \
+ __attribute__((noinline)) void set_##var(type v); \
+ __attribute__((noinline)) type *get_ptr_##var(void);
+
+/**
+ * QEMU_DEFINE_CO_TLS:
+ * @type: the variable's C type
+ * @var: the variable name
+ *
+ * Define a variable in Thread Local Storage that was previously declared from
+ * a header file with QEMU_DECLARE_CO_TLS():
+ *
+ * .. code-block:: c
+ * :caption: Defining a variable in Thread Local Storage
+ *
+ * QEMU_DEFINE_CO_TLS(int, my_count)
+ *
+ * This is a coroutine-safe replacement for the __thread keyword and is
+ * equivalent to the following code:
+ *
+ * .. code-block:: c
+ * :caption: Defining a TLS variable using __thread
+ *
+ * __thread int my_count;
+ */
+#define QEMU_DEFINE_CO_TLS(type, var) \
+ static __thread type co_tls_##var; \
+ type get_##var(void) { asm volatile(""); return co_tls_##var; } \
+ void set_##var(type v) { asm volatile(""); co_tls_##var = v; } \
+ type *get_ptr_##var(void) \
+ { type *ptr = &co_tls_##var; asm volatile("" : "+rm" (ptr)); return ptr; }
+
+/**
+ * QEMU_DEFINE_STATIC_CO_TLS:
+ * @type: the variable's C type
+ * @var: the variable name
+ *
+ * Define a static variable in Thread Local Storage:
+ *
+ * .. code-block:: c
+ * :caption: Defining a static variable in Thread Local Storage
+ *
+ * QEMU_DEFINE_STATIC_CO_TLS(int, my_count)
+ * ...
+ * int c = get_my_count();
+ * set_my_count(c + 1);
+ * *get_ptr_my_count() = 0;
+ *
+ * This is a coroutine-safe replacement for the __thread keyword and is
+ * equivalent to the following code:
+ *
+ * .. code-block:: c
+ * :caption: Defining a static TLS variable using __thread
+ *
+ * static __thread int my_count;
+ * ...
+ * int c = my_count;
+ * my_count = c + 1;
+ * *(&my_count) = 0;
+ */
+#define QEMU_DEFINE_STATIC_CO_TLS(type, var) \
+ static __thread type co_tls_##var; \
+ static __attribute__((noinline, unused)) \
+ type get_##var(void) \
+ { asm volatile(""); return co_tls_##var; } \
+ static __attribute__((noinline, unused)) \
+ void set_##var(type v) \
+ { asm volatile(""); co_tls_##var = v; } \
+ static __attribute__((noinline, unused)) \
+ type *get_ptr_##var(void) \
+ { type *ptr = &co_tls_##var; asm volatile("" : "+rm" (ptr)); return ptr; }
+
+#endif /* QEMU_COROUTINE_TLS_H */
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index 9801e7f5a4..e6aff45301 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -15,6 +15,7 @@
#ifndef QEMU_COROUTINE_H
#define QEMU_COROUTINE_H
+#include "qemu/coroutine-core.h"
#include "qemu/queue.h"
#include "qemu/timer.h"
@@ -25,102 +26,20 @@
* rather than callbacks, for operations that need to give up control while
* waiting for events to complete.
*
- * These functions are re-entrant and may be used outside the global mutex.
- */
-
-/**
- * Mark a function that executes in coroutine context
+ * These functions are re-entrant and may be used outside the BQL.
*
- * Functions that execute in coroutine context cannot be called directly from
- * normal functions. In the future it would be nice to enable compiler or
- * static checker support for catching such errors. This annotation might make
- * it possible and in the meantime it serves as documentation.
- *
- * For example:
+ * Functions that execute in coroutine context cannot be called
+ * directly from normal functions. Use @coroutine_fn to mark such
+ * functions. For example:
*
* static void coroutine_fn foo(void) {
* ....
* }
- */
-#define coroutine_fn
-
-typedef struct Coroutine Coroutine;
-
-/**
- * Coroutine entry point
- *
- * When the coroutine is entered for the first time, opaque is passed in as an
- * argument.
- *
- * When this function returns, the coroutine is destroyed automatically and
- * execution continues in the caller who last entered the coroutine.
- */
-typedef void coroutine_fn CoroutineEntry(void *opaque);
-
-/**
- * Create a new coroutine
- *
- * Use qemu_coroutine_enter() to actually transfer control to the coroutine.
- * The opaque argument is passed as the argument to the entry point.
- */
-Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque);
-
-/**
- * Transfer control to a coroutine
- */
-void qemu_coroutine_enter(Coroutine *coroutine);
-
-/**
- * Transfer control to a coroutine if it's not active (i.e. part of the call
- * stack of the running coroutine). Otherwise, do nothing.
- */
-void qemu_coroutine_enter_if_inactive(Coroutine *co);
-
-/**
- * Transfer control to a coroutine and associate it with ctx
- */
-void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co);
-
-/**
- * Transfer control back to a coroutine's caller
- *
- * This function does not return until the coroutine is re-entered using
- * qemu_coroutine_enter().
- */
-void coroutine_fn qemu_coroutine_yield(void);
-
-/**
- * Get the AioContext of the given coroutine
- */
-AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co);
-
-/**
- * Get the currently executing coroutine
- */
-Coroutine *coroutine_fn qemu_coroutine_self(void);
-
-/**
- * Return whether or not currently inside a coroutine
- *
- * This can be used to write functions that work both when in coroutine context
- * and when not in coroutine context. Note that such functions cannot use the
- * coroutine_fn annotation since they work outside coroutine context.
- */
-bool qemu_in_coroutine(void);
-
-/**
- * Return true if the coroutine is currently entered
- *
- * A coroutine is "entered" if it has not yielded from the current
- * qemu_coroutine_enter() call used to run it. This does not mean that the
- * coroutine is currently executing code since it may have transferred control
- * to another coroutine using qemu_coroutine_enter().
*
- * When several coroutines enter each other there may be no way to know which
- * ones have already been entered. In such situations this function can be
- * used to avoid recursively entering coroutines.
+ * In the future it would be nice to have the compiler or a static
+ * checker catch misuse of such functions. This annotation might make
+ * it possible and in the meantime it serves as documentation.
*/
-bool qemu_coroutine_entered(Coroutine *co);
/**
* Provides a mutex that can be used to synchronise coroutines
@@ -150,23 +69,20 @@ struct CoMutex {
};
/**
- * Initialises a CoMutex. This must be called before any other operation is used
- * on the CoMutex.
- */
-void qemu_co_mutex_init(CoMutex *mutex);
-
-/**
- * Locks the mutex. If the lock cannot be taken immediately, control is
- * transferred to the caller of the current coroutine.
- */
-void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex);
-
-/**
- * Unlocks the mutex and schedules the next coroutine that was waiting for this
- * lock to be run.
- */
-void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
-
+ * Assert that the current coroutine holds @mutex.
+ */
+static inline coroutine_fn void qemu_co_mutex_assert_locked(CoMutex *mutex)
+{
+ /*
+ * mutex->holder doesn't need any synchronisation if the assertion holds
+ * true because the mutex protects it. If it doesn't hold true, we still
+ * don't mind if another thread takes or releases mutex behind our back,
+ * because the condition will be false no matter whether we read NULL or
+ * the pointer for any other coroutine.
+ */
+ assert(qatomic_read(&mutex->locked) &&
+ mutex->holder == qemu_coroutine_self());
+}
/**
* CoQueues are a mechanism to queue coroutines in order to continue executing
@@ -183,23 +99,38 @@ typedef struct CoQueue {
*/
void qemu_co_queue_init(CoQueue *queue);
+typedef enum {
+ /*
+ * Enqueue at front instead of back. Use this to re-queue a request when
+ * its wait condition is not satisfied after being woken up.
+ */
+ CO_QUEUE_WAIT_FRONT = 0x1,
+} CoQueueWaitFlags;
+
/**
* Adds the current coroutine to the CoQueue and transfers control to the
* caller of the coroutine. The mutex is unlocked during the wait and
* locked again afterwards.
*/
#define qemu_co_queue_wait(queue, lock) \
- qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock))
-void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock);
+ qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock), 0)
+#define qemu_co_queue_wait_flags(queue, lock, flags) \
+ qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock), (flags))
+void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock,
+ CoQueueWaitFlags flags);
/**
- * Removes the next coroutine from the CoQueue, and wake it up.
+ * Removes the next coroutine from the CoQueue, and queue it to run after
+ * the currently-running coroutine yields.
* Returns true if a coroutine was removed, false if the queue is empty.
+ * Used from coroutine context, use qemu_co_enter_next outside.
*/
bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
/**
- * Empties the CoQueue; all coroutines are woken up.
+ * Empties the CoQueue and queues the coroutine to run after
+ * the currently-running coroutine yields.
+ * Used from coroutine context, use qemu_co_enter_all outside.
*/
void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
@@ -217,16 +148,33 @@ void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock);
/**
+ * Empties the CoQueue, waking the waiting coroutine one at a time. Unlike
+ * qemu_co_queue_all, this function releases the lock during aio_co_wake
+ * because it is meant to be used outside coroutine context; in that case, the
+ * coroutine is entered immediately, before qemu_co_enter_all returns.
+ *
+ * If used in coroutine context, qemu_co_enter_all is equivalent to
+ * qemu_co_queue_all.
+ */
+#define qemu_co_enter_all(queue, lock) \
+ qemu_co_enter_all_impl(queue, QEMU_MAKE_LOCKABLE(lock))
+void qemu_co_enter_all_impl(CoQueue *queue, QemuLockable *lock);
+
+/**
* Checks if the CoQueue is empty.
*/
bool qemu_co_queue_empty(CoQueue *queue);
+typedef struct CoRwTicket CoRwTicket;
typedef struct CoRwlock {
- int pending_writer;
- int reader;
CoMutex mutex;
- CoQueue queue;
+
+ /* Number of readers, or -1 if owned for writing. */
+ int owners;
+
+ /* Waiting coroutines. */
+ QSIMPLEQ_HEAD(, CoRwTicket) tickets;
} CoRwlock;
/**
@@ -240,17 +188,16 @@ void qemu_co_rwlock_init(CoRwlock *lock);
* of a parallel writer, control is transferred to the caller of the current
* coroutine.
*/
-void qemu_co_rwlock_rdlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock);
/**
* Write Locks the CoRwlock from a reader. This is a bit more efficient than
* @qemu_co_rwlock_unlock followed by a separate @qemu_co_rwlock_wrlock.
- * However, if the lock cannot be upgraded immediately, control is transferred
- * to the caller of the current coroutine. Also, @qemu_co_rwlock_upgrade
- * only overrides CoRwlock fairness if there are no concurrent readers, so
- * another writer might run while @qemu_co_rwlock_upgrade blocks.
+ * Note that if the lock cannot be upgraded immediately, control is transferred
+ * to the caller of the current coroutine; another writer might run while
+ * @qemu_co_rwlock_upgrade blocks.
*/
-void qemu_co_rwlock_upgrade(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock);
/**
* Downgrades a write-side critical section to a reader. Downgrading with
@@ -258,25 +205,64 @@ void qemu_co_rwlock_upgrade(CoRwlock *lock);
* followed by @qemu_co_rwlock_rdlock. This makes it more efficient, but
* may also sometimes be necessary for correctness.
*/
-void qemu_co_rwlock_downgrade(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock);
/**
* Write Locks the mutex. If the lock cannot be taken immediately because
* of a parallel reader, control is transferred to the caller of the current
* coroutine.
*/
-void qemu_co_rwlock_wrlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock);
/**
* Unlocks the read/write lock and schedules the next coroutine that was
* waiting for this lock to be run.
*/
-void qemu_co_rwlock_unlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock);
+
+typedef struct QemuCoSleep {
+ Coroutine *to_wake;
+} QemuCoSleep;
/**
- * Yield the coroutine for a given duration
+ * Yield the coroutine for a given duration. Initializes @w so that,
+ * during this yield, it can be passed to qemu_co_sleep_wake() to
+ * terminate the sleep.
*/
-void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns);
+void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w,
+ QEMUClockType type, int64_t ns);
+
+/**
+ * Yield the coroutine until the next call to qemu_co_sleep_wake.
+ */
+void coroutine_fn qemu_co_sleep(QemuCoSleep *w);
+
+static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns)
+{
+ QemuCoSleep w = { 0 };
+ qemu_co_sleep_ns_wakeable(&w, type, ns);
+}
+
+typedef void CleanupFunc(void *opaque);
+/**
+ * Run entry in a coroutine and start timer. Wait for entry to finish or for
+ * timer to elapse, what happen first. If entry finished, return 0, if timer
+ * elapsed earlier, return -ETIMEDOUT.
+ *
+ * Be careful, entry execution is not canceled, user should handle it somehow.
+ * If @clean is provided, it's called after coroutine finish if timeout
+ * happened.
+ */
+int coroutine_fn qemu_co_timeout(CoroutineEntry *entry, void *opaque,
+ uint64_t timeout_ns, CleanupFunc clean);
+
+/**
+ * Wake a coroutine if it is sleeping in qemu_co_sleep_ns. The timer will be
+ * deleted. @sleep_state must be the variable whose address was given to
+ * qemu_co_sleep_ns() and should be checked to be non-NULL before calling
+ * qemu_co_sleep_wake().
+ */
+void qemu_co_sleep_wake(QemuCoSleep *w);
/**
* Yield until a file descriptor becomes readable
@@ -285,6 +271,41 @@ void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns);
*/
void coroutine_fn yield_until_fd_readable(int fd);
+/**
+ * Increase coroutine pool size
+ */
+void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size);
+
+/**
+ * Decrease coroutine pool size
+ */
+void qemu_coroutine_dec_pool_size(unsigned int additional_pool_size);
+
#include "qemu/lockable.h"
+/**
+ * Sends a (part of) iovec down a socket, yielding when the socket is full, or
+ * Receives data into a (part of) iovec from a socket,
+ * yielding when there is no data in the socket.
+ * The same interface as qemu_sendv_recvv(), with added yielding.
+ * XXX should mark these as coroutine_fn
+ */
+ssize_t coroutine_fn qemu_co_sendv_recvv(int sockfd, struct iovec *iov,
+ unsigned iov_cnt, size_t offset,
+ size_t bytes, bool do_send);
+#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \
+ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false)
+#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \
+ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true)
+
+/**
+ * The same as above, but with just a single buffer
+ */
+ssize_t coroutine_fn qemu_co_send_recv(int sockfd, void *buf, size_t bytes,
+ bool do_send);
+#define qemu_co_recv(sockfd, buf, bytes) \
+ qemu_co_send_recv(sockfd, buf, bytes, false)
+#define qemu_co_send(sockfd, buf, bytes) \
+ qemu_co_send_recv(sockfd, buf, bytes, true)
+
#endif /* QEMU_COROUTINE_H */
diff --git a/include/qemu/coroutine_int.h b/include/qemu/coroutine_int.h
index bd6b0468e1..1da148552f 100644
--- a/include/qemu/coroutine_int.h
+++ b/include/qemu/coroutine_int.h
@@ -28,6 +28,11 @@
#include "qemu/queue.h"
#include "qemu/coroutine.h"
+#ifdef CONFIG_SAFESTACK
+/* Pointer to the unsafe stack, defined by the compiler */
+extern __thread void *__safestack_unsafe_stack_ptr;
+#endif
+
#define COROUTINE_STACK_SIZE (1 << 20)
typedef enum {
diff --git a/include/qemu/cpu-float.h b/include/qemu/cpu-float.h
new file mode 100644
index 0000000000..a26b9faf68
--- /dev/null
+++ b/include/qemu/cpu-float.h
@@ -0,0 +1,64 @@
+#ifndef QEMU_CPU_FLOAT_H
+#define QEMU_CPU_FLOAT_H
+
+#include "fpu/softfloat-types.h"
+
+/* Unions for reinterpreting between floats and integers. */
+
+typedef union {
+ float32 f;
+ uint32_t l;
+} CPU_FloatU;
+
+typedef union {
+ float64 d;
+#if HOST_BIG_ENDIAN
+ struct {
+ uint32_t upper;
+ uint32_t lower;
+ } l;
+#else
+ struct {
+ uint32_t lower;
+ uint32_t upper;
+ } l;
+#endif
+ uint64_t ll;
+} CPU_DoubleU;
+
+typedef union {
+ floatx80 d;
+ struct {
+ uint64_t lower;
+ uint16_t upper;
+ } l;
+} CPU_LDoubleU;
+
+typedef union {
+ float128 q;
+#if HOST_BIG_ENDIAN
+ struct {
+ uint32_t upmost;
+ uint32_t upper;
+ uint32_t lower;
+ uint32_t lowest;
+ } l;
+ struct {
+ uint64_t upper;
+ uint64_t lower;
+ } ll;
+#else
+ struct {
+ uint32_t lowest;
+ uint32_t lower;
+ uint32_t upper;
+ uint32_t upmost;
+ } l;
+ struct {
+ uint64_t lower;
+ uint64_t upper;
+ } ll;
+#endif
+} CPU_QuadU;
+
+#endif /* QEMU_CPU_FLOAT_H */
diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h
index 69301700bd..b11161555b 100644
--- a/include/qemu/cpuid.h
+++ b/include/qemu/cpuid.h
@@ -25,6 +25,9 @@
#endif
/* Leaf 1, %ecx */
+#ifndef bit_PCLMUL
+#define bit_PCLMUL (1 << 1)
+#endif
#ifndef bit_SSE4_1
#define bit_SSE4_1 (1 << 19)
#endif
@@ -48,10 +51,52 @@
#ifndef bit_BMI2
#define bit_BMI2 (1 << 8)
#endif
+#ifndef bit_AVX512F
+#define bit_AVX512F (1 << 16)
+#endif
+#ifndef bit_AVX512DQ
+#define bit_AVX512DQ (1 << 17)
+#endif
+#ifndef bit_AVX512BW
+#define bit_AVX512BW (1 << 30)
+#endif
+#ifndef bit_AVX512VL
+#define bit_AVX512VL (1u << 31)
+#endif
+
+/* Leaf 7, %ecx */
+#ifndef bit_AVX512VBMI2
+#define bit_AVX512VBMI2 (1 << 6)
+#endif
/* Leaf 0x80000001, %ecx */
#ifndef bit_LZCNT
#define bit_LZCNT (1 << 5)
#endif
+/*
+ * Signatures for different CPU implementations as returned from Leaf 0.
+ */
+
+#ifndef signature_INTEL_ecx
+/* "Genu" "ineI" "ntel" */
+#define signature_INTEL_ebx 0x756e6547
+#define signature_INTEL_edx 0x49656e69
+#define signature_INTEL_ecx 0x6c65746e
+#endif
+
+#ifndef signature_AMD_ecx
+/* "Auth" "enti" "cAMD" */
+#define signature_AMD_ebx 0x68747541
+#define signature_AMD_edx 0x69746e65
+#define signature_AMD_ecx 0x444d4163
+#endif
+
+static inline unsigned xgetbv_low(unsigned c)
+{
+ unsigned a, d;
+ asm("xgetbv" : "=a"(a), "=d"(d) : "c"(c));
+ return a;
+}
+
#endif /* QEMU_CPUID_H */
diff --git a/include/qemu/crc-ccitt.h b/include/qemu/crc-ccitt.h
new file mode 100644
index 0000000000..8918dafe07
--- /dev/null
+++ b/include/qemu/crc-ccitt.h
@@ -0,0 +1,33 @@
+/*
+ * CRC16 (CCITT) Checksum Algorithm
+ *
+ * Copyright (c) 2021 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * From Linux kernel v5.10 include/linux/crc-ccitt.h
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef CRC_CCITT_H
+#define CRC_CCITT_H
+
+extern uint16_t const crc_ccitt_table[256];
+extern uint16_t const crc_ccitt_false_table[256];
+
+uint16_t crc_ccitt(uint16_t crc, const uint8_t *buffer, size_t len);
+uint16_t crc_ccitt_false(uint16_t crc, const uint8_t *buffer, size_t len);
+
+static inline uint16_t crc_ccitt_byte(uint16_t crc, const uint8_t c)
+{
+ return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
+}
+
+static inline uint16_t crc_ccitt_false_byte(uint16_t crc, const uint8_t c)
+{
+ return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
+}
+
+#endif /* CRC_CCITT_H */
diff --git a/include/qemu/crc32c.h b/include/qemu/crc32c.h
index dafb6a1ada..88b4d2b3b3 100644
--- a/include/qemu/crc32c.h
+++ b/include/qemu/crc32c.h
@@ -28,8 +28,8 @@
#ifndef QEMU_CRC32C_H
#define QEMU_CRC32C_H
-#include "qemu-common.h"
uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length);
+uint32_t iov_crc32c(uint32_t crc, const struct iovec *iov, size_t iov_cnt);
#endif
diff --git a/include/qemu/ctype.h b/include/qemu/ctype.h
new file mode 100644
index 0000000000..3691f0984d
--- /dev/null
+++ b/include/qemu/ctype.h
@@ -0,0 +1,27 @@
+/*
+ * QEMU TCG support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_CTYPE_H
+#define QEMU_CTYPE_H
+
+#define qemu_isalnum(c) isalnum((unsigned char)(c))
+#define qemu_isalpha(c) isalpha((unsigned char)(c))
+#define qemu_iscntrl(c) iscntrl((unsigned char)(c))
+#define qemu_isdigit(c) isdigit((unsigned char)(c))
+#define qemu_isgraph(c) isgraph((unsigned char)(c))
+#define qemu_islower(c) islower((unsigned char)(c))
+#define qemu_isprint(c) isprint((unsigned char)(c))
+#define qemu_ispunct(c) ispunct((unsigned char)(c))
+#define qemu_isspace(c) isspace((unsigned char)(c))
+#define qemu_isupper(c) isupper((unsigned char)(c))
+#define qemu_isxdigit(c) isxdigit((unsigned char)(c))
+#define qemu_tolower(c) tolower((unsigned char)(c))
+#define qemu_toupper(c) toupper((unsigned char)(c))
+#define qemu_isascii(c) isascii((unsigned char)(c))
+#define qemu_toascii(c) toascii((unsigned char)(c))
+
+#endif
diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h
index d2dad3057c..92c927a6a3 100644
--- a/include/qemu/cutils.h
+++ b/include/qemu/cutils.h
@@ -1,7 +1,23 @@
#ifndef QEMU_CUTILS_H
#define QEMU_CUTILS_H
-#include "qemu/fprintf-fn.h"
+/*
+ * si_prefix:
+ * @exp10: exponent of 10, a multiple of 3 between -18 and 18 inclusive.
+ *
+ * Return a SI prefix (n, u, m, K, M, etc.) corresponding
+ * to the given exponent of 10.
+ */
+const char *si_prefix(unsigned int exp10);
+
+/*
+ * iec_binary_prefix:
+ * @exp2: exponent of 2, a multiple of 10 between 0 and 60 inclusive.
+ *
+ * Return an IEC binary prefix (Ki, Mi, etc.) corresponding
+ * to the given exponent of 2.
+ */
+const char *iec_binary_prefix(unsigned int exp2);
/**
* pstrcpy:
@@ -131,8 +147,6 @@ static inline const char *qemu_strchrnul(const char *s, int c)
const char *qemu_strchrnul(const char *s, int c);
#endif
time_t mktimegm(struct tm *tm);
-int qemu_fdatasync(int fd);
-int fcntl_setfl(int fd, int flag);
int qemu_parse_fd(const char *param);
int qemu_strtoi(const char *nptr, const char **endptr, int base,
int *result);
@@ -149,14 +163,27 @@ int qemu_strtou64(const char *nptr, const char **endptr, int base,
int qemu_strtod(const char *nptr, const char **endptr, double *result);
int qemu_strtod_finite(const char *nptr, const char **endptr, double *result);
-int parse_uint(const char *s, unsigned long long *value, char **endptr,
- int base);
-int parse_uint_full(const char *s, unsigned long long *value, int base);
+int parse_uint(const char *s, const char **endptr, int base, uint64_t *value);
+int parse_uint_full(const char *s, int base, uint64_t *value);
int qemu_strtosz(const char *nptr, const char **end, uint64_t *result);
int qemu_strtosz_MiB(const char *nptr, const char **end, uint64_t *result);
int qemu_strtosz_metric(const char *nptr, const char **end, uint64_t *result);
+char *size_to_str(uint64_t val);
+
+/**
+ * freq_to_str:
+ * @freq_hz: frequency to stringify
+ *
+ * Return human readable string for frequency @freq_hz.
+ * Use SI units like KHz, MHz, and so forth.
+ *
+ * The caller is responsible for releasing the value returned
+ * with g_free() after use.
+ */
+char *freq_to_str(uint64_t freq_hz);
+
/* used to print char* safely */
#define STR_OR_NULL(str) ((str) ? (str) : "null")
@@ -183,4 +210,61 @@ int uleb128_decode_small(const uint8_t *in, uint32_t *n);
*/
int qemu_pstrcmp0(const char **str1, const char **str2);
+/* Find program directory, and save it for later usage with
+ * qemu_get_exec_dir().
+ * Try OS specific API first, if not working, parse from argv0. */
+void qemu_init_exec_dir(const char *argv0);
+
+/* Get the saved exec dir. */
+const char *qemu_get_exec_dir(void);
+
+/**
+ * get_relocated_path:
+ * @dir: the directory (typically a `CONFIG_*DIR` variable) to be relocated.
+ *
+ * Returns a path for @dir that uses the directory of the running executable
+ * as the prefix.
+ *
+ * When a directory named `qemu-bundle` exists in the directory of the running
+ * executable, the path to the directory will be prepended to @dir. For
+ * example, if the directory of the running executable is `/qemu/build` @dir
+ * is `/usr/share/qemu`, the result will be
+ * `/qemu/build/qemu-bundle/usr/share/qemu`. The directory is expected to exist
+ * in the build tree.
+ *
+ * Otherwise, the directory of the running executable will be used as the
+ * prefix and it appends the relative path from `bindir` to @dir. For example,
+ * if the directory of the running executable is `/opt/qemu/bin`, `bindir` is
+ * `/usr/bin` and @dir is `/usr/share/qemu`, the result will be
+ * `/opt/qemu/bin/../share/qemu`.
+ *
+ * The returned string should be freed by the caller.
+ */
+char *get_relocated_path(const char *dir);
+
+static inline const char *yes_no(bool b)
+{
+ return b ? "yes" : "no";
+}
+
+/*
+ * helper to parse debug environment variables
+ */
+int parse_debug_env(const char *name, int max, int initial);
+
+/*
+ * Hexdump a line of a byte buffer into a hexadecimal/ASCII buffer
+ */
+#define QEMU_HEXDUMP_LINE_BYTES 16 /* Number of bytes to dump */
+#define QEMU_HEXDUMP_LINE_LEN 75 /* Number of characters in line */
+void qemu_hexdump_line(char *line, unsigned int b, const void *bufptr,
+ unsigned int len, bool ascii);
+
+/*
+ * Hexdump a buffer to a file. An optional string prefix is added to every line
+ */
+
+void qemu_hexdump(FILE *fp, const char *prefix,
+ const void *bufptr, size_t size);
+
#endif
diff --git a/include/qemu/datadir.h b/include/qemu/datadir.h
new file mode 100644
index 0000000000..21f9097f58
--- /dev/null
+++ b/include/qemu/datadir.h
@@ -0,0 +1,28 @@
+#ifndef QEMU_DATADIR_H
+#define QEMU_DATADIR_H
+
+#define QEMU_FILE_TYPE_BIOS 0
+#define QEMU_FILE_TYPE_KEYMAP 1
+/**
+ * qemu_find_file:
+ * @type: QEMU_FILE_TYPE_BIOS (for BIOS, VGA BIOS)
+ * or QEMU_FILE_TYPE_KEYMAP (for keymaps).
+ * @name: Relative or absolute file name
+ *
+ * If @name exists on disk as an absolute path, or a path relative
+ * to the current directory, then returns @name unchanged.
+ * Otherwise searches for @name file in the data directories, either
+ * configured at build time (DATADIR) or registered with the -L command
+ * line option.
+ *
+ * The caller must use g_free() to free the returned data when it is
+ * no longer required.
+ *
+ * Returns: a path that can access @name, or NULL if no matching file exists.
+ */
+char *qemu_find_file(int type, const char *name);
+void qemu_add_default_firmwarepath(void);
+void qemu_add_data_dir(char *path);
+void qemu_list_data_dirs(void);
+
+#endif
diff --git a/include/qemu/dbus.h b/include/qemu/dbus.h
new file mode 100644
index 0000000000..81d3de8a5a
--- /dev/null
+++ b/include/qemu/dbus.h
@@ -0,0 +1,42 @@
+/*
+ * Helpers for using D-Bus
+ *
+ * Copyright (C) 2019 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef DBUS_H
+#define DBUS_H
+
+#include <gio/gio.h>
+
+#include "qom/object.h"
+#include "chardev/char.h"
+#include "qemu/notify.h"
+
+/* glib/gio 2.68 */
+#define DBUS_METHOD_INVOCATION_HANDLED TRUE
+#define DBUS_METHOD_INVOCATION_UNHANDLED FALSE
+
+/* in msec */
+#define DBUS_DEFAULT_TIMEOUT 1000
+
+#define DBUS_DISPLAY1_ROOT "/org/qemu/Display1"
+
+#define DBUS_DISPLAY_ERROR (dbus_display_error_quark())
+GQuark dbus_display_error_quark(void);
+
+typedef enum {
+ DBUS_DISPLAY_ERROR_FAILED,
+ DBUS_DISPLAY_ERROR_INVALID,
+ DBUS_DISPLAY_ERROR_UNSUPPORTED,
+ DBUS_DISPLAY_N_ERRORS,
+} DBusDisplayError;
+
+GStrv qemu_dbus_get_queued_owners(GDBusConnection *connection,
+ const char *name,
+ Error **errp);
+
+#endif /* DBUS_H */
diff --git a/include/qemu/defer-call.h b/include/qemu/defer-call.h
new file mode 100644
index 0000000000..e2c1d24572
--- /dev/null
+++ b/include/qemu/defer-call.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Deferred calls
+ *
+ * Copyright Red Hat.
+ */
+
+#ifndef QEMU_DEFER_CALL_H
+#define QEMU_DEFER_CALL_H
+
+/* See documentation in util/defer-call.c */
+void defer_call_begin(void);
+void defer_call_end(void);
+void defer_call(void (*fn)(void *), void *opaque);
+
+#endif /* QEMU_DEFER_CALL_H */
diff --git a/include/qemu/drm.h b/include/qemu/drm.h
index 4c3e622f5c..fab74d4be9 100644
--- a/include/qemu/drm.h
+++ b/include/qemu/drm.h
@@ -1,5 +1,5 @@
-#ifndef QEMU_DRM_H_
-#define QEMU_DRM_H_
+#ifndef QEMU_DRM_H
+#define QEMU_DRM_H
int qemu_drm_rendernode_open(const char *rendernode);
diff --git a/include/qemu/envlist.h b/include/qemu/envlist.h
index b9addcc11f..6006dfae44 100644
--- a/include/qemu/envlist.h
+++ b/include/qemu/envlist.h
@@ -1,10 +1,6 @@
#ifndef ENVLIST_H
#define ENVLIST_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
typedef struct envlist envlist_t;
envlist_t *envlist_create(void);
@@ -15,8 +11,4 @@ int envlist_parse_set(envlist_t *, const char *);
int envlist_parse_unset(envlist_t *, const char *);
char **envlist_to_environ(const envlist_t *, size_t *);
-#ifdef __cplusplus
-}
-#endif
-
#endif /* ENVLIST_H */
diff --git a/include/qemu/error-report.h b/include/qemu/error-report.h
index 0a8d9cc9ea..3ae2357fda 100644
--- a/include/qemu/error-report.h
+++ b/include/qemu/error-report.h
@@ -30,24 +30,23 @@ void loc_set_none(void);
void loc_set_cmdline(char **argv, int idx, int cnt);
void loc_set_file(const char *fname, int lno);
-void error_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-void error_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
-void error_vprintf_unless_qmp(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-void error_printf_unless_qmp(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
-void error_set_progname(const char *argv0);
+int error_vprintf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+int error_printf(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
-void error_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-void warn_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-void info_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
+void error_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+void warn_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+void info_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
-void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
-void warn_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
-void info_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+void error_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
+void warn_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
+void info_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
bool error_report_once_cond(bool *printed, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
bool warn_report_once_cond(bool *printed, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
+
+void error_init(const char *argv0);
/*
* Similar to error_report(), except it prints the message just once.
@@ -71,7 +70,8 @@ bool warn_report_once_cond(bool *printed, const char *fmt, ...)
fmt, ##__VA_ARGS__); \
})
-const char *error_get_progname(void);
-extern bool enable_timestamp_msg;
+extern bool message_with_timestamp;
+extern bool error_with_guestname;
+extern const char *error_guest_name;
#endif
diff --git a/include/qemu/event_notifier.h b/include/qemu/event_notifier.h
index 599c99f1a5..8a4ff308e1 100644
--- a/include/qemu/event_notifier.h
+++ b/include/qemu/event_notifier.h
@@ -13,7 +13,6 @@
#ifndef QEMU_EVENT_NOTIFIER_H
#define QEMU_EVENT_NOTIFIER_H
-#include "qemu-common.h"
#ifdef _WIN32
#include <windows.h>
@@ -25,6 +24,7 @@ struct EventNotifier {
#else
int rfd;
int wfd;
+ bool initialized;
#endif
};
@@ -38,6 +38,7 @@ int event_notifier_test_and_clear(EventNotifier *);
#ifdef CONFIG_POSIX
void event_notifier_init_fd(EventNotifier *, int fd);
int event_notifier_get_fd(const EventNotifier *);
+int event_notifier_get_wfd(const EventNotifier *);
#else
HANDLE event_notifier_get_handle(EventNotifier *);
#endif
diff --git a/include/qemu/fifo8.h b/include/qemu/fifo8.h
index 24b364462d..c6295c6ff0 100644
--- a/include/qemu/fifo8.h
+++ b/include/qemu/fifo8.h
@@ -1,7 +1,6 @@
#ifndef QEMU_FIFO8_H
#define QEMU_FIFO8_H
-#include "migration/vmstate.h"
typedef struct {
/* All fields are private */
@@ -47,7 +46,7 @@ void fifo8_push(Fifo8 *fifo, uint8_t data);
* fifo8_push_all:
* @fifo: FIFO to push to
* @data: data to push
- * @size: number of bytes to push
+ * @num: number of bytes to push
*
* Push a byte array to the FIFO. Behaviour is undefined if the FIFO is full.
* Clients are responsible for checking the space left in the FIFO using
@@ -72,7 +71,7 @@ uint8_t fifo8_pop(Fifo8 *fifo);
* fifo8_pop_buf:
* @fifo: FIFO to pop from
* @max: maximum number of bytes to pop
- * @num: actual number of returned bytes
+ * @numptr: pointer filled with number of bytes returned (can be NULL)
*
* Pop a number of elements from the FIFO up to a maximum of max. The buffer
* containing the popped data is returned. This buffer points directly into
@@ -83,16 +82,43 @@ uint8_t fifo8_pop(Fifo8 *fifo);
* around in the ring buffer; in this case only a contiguous part of the data
* is returned.
*
- * The number of valid bytes returned is populated in *num; will always return
- * at least 1 byte. max must not be 0 or greater than the number of bytes in
- * the FIFO.
+ * The number of valid bytes returned is populated in *numptr; will always
+ * return at least 1 byte. max must not be 0 or greater than the number of
+ * bytes in the FIFO.
*
* Clients are responsible for checking the availability of requested data
* using fifo8_num_used().
*
* Returns: A pointer to popped data.
*/
-const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num);
+const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
+
+/**
+ * fifo8_peek_buf: read upto max bytes from the fifo
+ * @fifo: FIFO to read from
+ * @max: maximum number of bytes to peek
+ * @numptr: pointer filled with number of bytes returned (can be NULL)
+ *
+ * Peek into a number of elements from the FIFO up to a maximum of max.
+ * The buffer containing the data peeked into is returned. This buffer points
+ * directly into the FIFO backing store. Since data is invalidated once any
+ * of the fifo8_* APIs are called on the FIFO, it is the caller responsibility
+ * to access it before doing further API calls.
+ *
+ * The function may return fewer bytes than requested when the data wraps
+ * around in the ring buffer; in this case only a contiguous part of the data
+ * is returned.
+ *
+ * The number of valid bytes returned is populated in *numptr; will always
+ * return at least 1 byte. max must not be 0 or greater than the number of
+ * bytes in the FIFO.
+ *
+ * Clients are responsible for checking the availability of requested data
+ * using fifo8_num_used().
+ *
+ * Returns: A pointer to peekable data.
+ */
+const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
/**
* fifo8_reset:
@@ -149,12 +175,16 @@ uint32_t fifo8_num_used(Fifo8 *fifo);
extern const VMStateDescription vmstate_fifo8;
-#define VMSTATE_FIFO8(_field, _state) { \
- .name = (stringify(_field)), \
- .size = sizeof(Fifo8), \
- .vmsd = &vmstate_fifo8, \
- .flags = VMS_STRUCT, \
- .offset = vmstate_offset_value(_state, _field, Fifo8), \
+#define VMSTATE_FIFO8_TEST(_field, _state, _test) { \
+ .name = (stringify(_field)), \
+ .field_exists = (_test), \
+ .size = sizeof(Fifo8), \
+ .vmsd = &vmstate_fifo8, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, Fifo8), \
}
+#define VMSTATE_FIFO8(_field, _state) \
+ VMSTATE_FIFO8_TEST(_field, _state, NULL)
+
#endif /* QEMU_FIFO8_H */
diff --git a/include/qemu/filemonitor.h b/include/qemu/filemonitor.h
new file mode 100644
index 0000000000..8715d5c048
--- /dev/null
+++ b/include/qemu/filemonitor.h
@@ -0,0 +1,127 @@
+/*
+ * QEMU file monitor helper
+ *
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QEMU_FILEMONITOR_H
+#define QEMU_FILEMONITOR_H
+
+
+
+typedef struct QFileMonitor QFileMonitor;
+
+typedef enum {
+ /* File has been created in a dir */
+ QFILE_MONITOR_EVENT_CREATED,
+ /* File has been modified in a dir */
+ QFILE_MONITOR_EVENT_MODIFIED,
+ /* File has been deleted in a dir */
+ QFILE_MONITOR_EVENT_DELETED,
+ /* File has attributes changed */
+ QFILE_MONITOR_EVENT_ATTRIBUTES,
+ /* Dir is no longer being monitored (due to deletion) */
+ QFILE_MONITOR_EVENT_IGNORED,
+} QFileMonitorEvent;
+
+
+/**
+ * QFileMonitorHandler:
+ * @id: id from qemu_file_monitor_add_watch()
+ * @event: the file change that occurred
+ * @filename: the name of the file affected
+ * @opaque: opaque data provided to qemu_file_monitor_add_watch()
+ *
+ * Invoked whenever a file changes. If @event is
+ * QFILE_MONITOR_EVENT_IGNORED, @filename will be
+ * empty.
+ *
+ */
+typedef void (*QFileMonitorHandler)(int64_t id,
+ QFileMonitorEvent event,
+ const char *filename,
+ void *opaque);
+
+/**
+ * qemu_file_monitor_new:
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Create a handle for a file monitoring object.
+ *
+ * This object does locking internally to enable it to be
+ * safe to use from multiple threads
+ *
+ * If the platform does not support file monitoring, an
+ * error will be reported. Likewise if file monitoring
+ * is supported, but cannot be initialized
+ *
+ * Currently this is implemented on Linux platforms with
+ * the inotify subsystem.
+ *
+ * Returns: the new monitoring object, or NULL on error
+ */
+QFileMonitor *qemu_file_monitor_new(Error **errp);
+
+/**
+ * qemu_file_monitor_free:
+ * @mon: the file monitor context
+ *
+ * Free resources associated with the file monitor,
+ * including any currently registered watches.
+ */
+void qemu_file_monitor_free(QFileMonitor *mon);
+
+/**
+ * qemu_file_monitor_add_watch:
+ * @mon: the file monitor context
+ * @dirpath: the directory whose contents to watch
+ * @filename: optional filename to filter on
+ * @cb: the function to invoke when @dirpath has changes
+ * @opaque: data to pass to @cb
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Register to receive notifications of changes
+ * in the directory @dirpath. All files in the
+ * directory will be monitored. If the caller is
+ * only interested in one specific file, @filename
+ * can be used to filter events.
+ *
+ * Returns: a positive integer watch ID, or -1 on error
+ */
+int64_t qemu_file_monitor_add_watch(QFileMonitor *mon,
+ const char *dirpath,
+ const char *filename,
+ QFileMonitorHandler cb,
+ void *opaque,
+ Error **errp);
+
+/**
+ * qemu_file_monitor_remove_watch:
+ * @mon: the file monitor context
+ * @dirpath: the directory whose contents to unwatch
+ * @id: id of the watch to remove
+ *
+ * Removes the file monitoring watch @id, associated
+ * with the directory @dirpath. This must never be
+ * called from a QFileMonitorHandler callback, or a
+ * deadlock will result.
+ */
+void qemu_file_monitor_remove_watch(QFileMonitor *mon,
+ const char *dirpath,
+ int64_t id);
+
+#endif /* QEMU_FILEMONITOR_H */
diff --git a/include/qemu/fprintf-fn.h b/include/qemu/fprintf-fn.h
deleted file mode 100644
index 9068a960b3..0000000000
--- a/include/qemu/fprintf-fn.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Typedef for fprintf-alike function pointers.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef QEMU_FPRINTF_FN_H
-#define QEMU_FPRINTF_FN_H
-
-typedef int (*fprintf_function)(FILE *f, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
-
-#endif
diff --git a/include/qemu/guest-random.h b/include/qemu/guest-random.h
new file mode 100644
index 0000000000..5060d49d60
--- /dev/null
+++ b/include/qemu/guest-random.h
@@ -0,0 +1,68 @@
+/*
+ * QEMU guest-visible random functions
+ *
+ * Copyright 2019 Linaro, Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef QEMU_GUEST_RANDOM_H
+#define QEMU_GUEST_RANDOM_H
+
+/**
+ * qemu_guest_random_seed_main(const char *seedstr, Error **errp)
+ * @seedstr: a non-NULL pointer to a C string
+ * @errp: an error indicator
+ *
+ * The @seedstr value is that which accompanies the -seed argument.
+ * This forces qemu_guest_getrandom into deterministic mode.
+ *
+ * Returns 0 on success, < 0 on failure while setting *errp.
+ */
+int qemu_guest_random_seed_main(const char *seedstr, Error **errp);
+
+/**
+ * qemu_guest_random_seed_thread_part1(void)
+ *
+ * If qemu_getrandom is in deterministic mode, returns an
+ * independent seed for the new thread. Otherwise returns 0.
+ */
+uint64_t qemu_guest_random_seed_thread_part1(void);
+
+/**
+ * qemu_guest_random_seed_thread_part2(uint64_t seed)
+ * @seed: a value for the new thread.
+ *
+ * If qemu_guest_getrandom is in deterministic mode, this stores an
+ * independent seed for the new thread. Otherwise a no-op.
+ */
+void qemu_guest_random_seed_thread_part2(uint64_t seed);
+
+/**
+ * qemu_guest_getrandom(void *buf, size_t len, Error **errp)
+ * @buf: a buffer of bytes to be written
+ * @len: the number of bytes in @buf
+ * @errp: an error indicator
+ *
+ * Fills len bytes in buf with random data. This should only be used
+ * for data presented to the guest. Host-side crypto services should
+ * use qcrypto_random_bytes.
+ *
+ * Returns 0 on success, < 0 on failure while setting *errp.
+ */
+int qemu_guest_getrandom(void *buf, size_t len, Error **errp);
+
+/**
+ * qemu_guest_getrandom_nofail(void *buf, size_t len)
+ * @buf: a buffer of bytes to be written
+ * @len: the number of bytes in @buf
+ *
+ * Like qemu_guest_getrandom, but will assert for failure.
+ * Use this when there is no reasonable recovery.
+ */
+void qemu_guest_getrandom_nofail(void *buf, size_t len);
+
+#endif /* QEMU_GUEST_RANDOM_H */
diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index 4afbe6292e..8136e33674 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -76,20 +76,9 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size);
*
* Store result of merging @a and @b into @result.
* @result is allowed to be equal to @a or @b.
- *
- * Return true if the merge was successful,
- * false if it was not attempted.
+ * All bitmaps must have same size.
*/
-bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result);
-
-/**
- * hbitmap_can_merge:
- *
- * hbitmap_can_merge(a, b) && hbitmap_can_merge(a, result) is sufficient and
- * necessary for hbitmap_merge will not fail.
- *
- */
-bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b);
+void hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result);
/**
* hbitmap_empty:
@@ -132,6 +121,11 @@ void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count);
* @count: Number of bits to reset.
*
* Reset a consecutive range of bits in an HBitmap.
+ * @start and @count must be aligned to bitmap granularity. The only exception
+ * is resetting the tail of the bitmap: @count may be equal to hb->orig_size -
+ * @start, in this case @count may be not aligned. The sum of @start + @count is
+ * allowed to be greater than hb->orig_size, but only if @start < hb->orig_size
+ * and @start + @count = ALIGN_UP(hb->orig_size, granularity).
*/
void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count);
@@ -292,12 +286,18 @@ void hbitmap_free(HBitmap *hb);
*/
void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first);
-/* hbitmap_iter_skip_words:
- * @hbi: HBitmapIter to operate on.
+/*
+ * hbitmap_next_dirty:
+ *
+ * Find next dirty bit within selected range. If not found, return -1.
*
- * Internal function used by hbitmap_iter_next and hbitmap_iter_next_word.
+ * @hb: The HBitmap to operate on
+ * @start: The bit to start from.
+ * @count: Number of bits to proceed. If @start+@count > bitmap size, the whole
+ * bitmap is looked through. You can use INT64_MAX as @count to search up to
+ * the bitmap end.
*/
-unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
+int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count);
/* hbitmap_next_zero:
*
@@ -306,47 +306,40 @@ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
* @hb: The HBitmap to operate on
* @start: The bit to start from.
* @count: Number of bits to proceed. If @start+@count > bitmap size, the whole
- * bitmap is looked through. You can use UINT64_MAX as @count to search up to
+ * bitmap is looked through. You can use INT64_MAX as @count to search up to
* the bitmap end.
*/
-int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count);
+int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count);
/* hbitmap_next_dirty_area:
* @hb: The HBitmap to operate on
- * @start: in-out parameter.
- * in: the offset to start from
- * out: (if area found) start of found area
- * @count: in-out parameter.
- * in: length of requested region
- * out: length of found area
- *
- * If dirty area found within [@start, @start + @count), returns true and sets
- * @offset and @bytes appropriately. Otherwise returns false and leaves @offset
- * and @bytes unchanged.
- */
-bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
- uint64_t *count);
+ * @start: the offset to start from
+ * @end: end of requested area
+ * @max_dirty_count: limit for out parameter dirty_count
+ * @dirty_start: on success: start of found area
+ * @dirty_count: on success: length of found area
+ *
+ * If dirty area found within [@start, @end), returns true and sets
+ * @dirty_start and @dirty_count appropriately. @dirty_count will not exceed
+ * @max_dirty_count.
+ * If dirty area was not found, returns false and leaves @dirty_start and
+ * @dirty_count unchanged.
+ */
+bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end,
+ int64_t max_dirty_count,
+ int64_t *dirty_start, int64_t *dirty_count);
-/* hbitmap_create_meta:
- * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
- * The caller owns the created bitmap and must call hbitmap_free_meta(hb) to
- * free it.
- *
- * Currently, we only guarantee that if a bit in the hbitmap is changed it
- * will be reflected in the meta bitmap, but we do not yet guarantee the
- * opposite.
- *
- * @hb: The HBitmap to operate on.
- * @chunk_size: How many bits in @hb does one bit in the meta track.
- */
-HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size);
-
-/* hbitmap_free_meta:
- * Free the meta bitmap of @hb.
+/*
+ * hbitmap_status:
+ * @hb: The HBitmap to operate on
+ * @start: The bit to start from
+ * @count: Number of bits to proceed
+ * @pnum: Out-parameter. How many bits has same value starting from @start
*
- * @hb: The HBitmap whose meta bitmap should be freed.
+ * Returns true if bitmap is dirty at @start, false otherwise.
*/
-void hbitmap_free_meta(HBitmap *hb);
+bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count,
+ int64_t *pnum);
/**
* hbitmap_iter_next:
@@ -357,34 +350,4 @@ void hbitmap_free_meta(HBitmap *hb);
*/
int64_t hbitmap_iter_next(HBitmapIter *hbi);
-/**
- * hbitmap_iter_next_word:
- * @hbi: HBitmapIter to operate on.
- * @p_cur: Location where to store the next non-zero word.
- *
- * Return the index of the next nonzero word that is set in @hbi's
- * associated HBitmap, and set *p_cur to the content of that word
- * (bits before the index that was passed to hbitmap_iter_init are
- * trimmed on the first call). Return -1, and set *p_cur to zero,
- * if all remaining words are zero.
- */
-static inline size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur)
-{
- unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1];
-
- if (cur == 0) {
- cur = hbitmap_iter_skip_words(hbi);
- if (cur == 0) {
- *p_cur = 0;
- return -1;
- }
- }
-
- /* The next call will resume work from the next word. */
- hbi->cur[HBITMAP_LEVELS - 1] = 0;
- *p_cur = cur;
- return hbi->pos;
-}
-
-
#endif
diff --git a/include/qemu/help-texts.h b/include/qemu/help-texts.h
new file mode 100644
index 0000000000..353ab2ad8b
--- /dev/null
+++ b/include/qemu/help-texts.h
@@ -0,0 +1,13 @@
+#ifndef QEMU_HELP_TEXTS_H
+#define QEMU_HELP_TEXTS_H
+
+/* Copyright string for -version arguments, About dialogs, etc */
+#define QEMU_COPYRIGHT "Copyright (c) 2003-2024 " \
+ "Fabrice Bellard and the QEMU Project developers"
+
+/* Bug reporting information for --help arguments, About dialogs, etc */
+#define QEMU_HELP_BOTTOM \
+ "See <https://qemu.org/contribute/report-a-bug> for how to report bugs.\n" \
+ "More information on the QEMU project at <https://qemu.org>."
+
+#endif
diff --git a/include/qemu/help_option.h b/include/qemu/help_option.h
index 328d2a89fd..ca6389a154 100644
--- a/include/qemu/help_option.h
+++ b/include/qemu/help_option.h
@@ -19,4 +19,15 @@ static inline bool is_help_option(const char *s)
return !strcmp(s, "?") || !strcmp(s, "help");
}
+static inline int starts_with_help_option(const char *s)
+{
+ if (*s == '?') {
+ return 1;
+ }
+ if (g_str_has_prefix(s, "help")) {
+ return 4;
+ }
+ return 0;
+}
+
#endif
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index 4cd170e6cd..ead97d354d 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -23,10 +23,15 @@
* THE SOFTWARE.
*/
+/* Portions of this work are licensed under the terms of the GNU GPL,
+ * version 2 or later. See the COPYING file in the top-level directory.
+ */
+
#ifndef HOST_UTILS_H
#define HOST_UTILS_H
#include "qemu/bswap.h"
+#include "qemu/int128.h"
#ifdef CONFIG_INT128
static inline void mulu64(uint64_t *plow, uint64_t *phigh,
@@ -51,43 +56,45 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
return (__int128_t)a * b / c;
}
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
+static inline uint64_t muldiv64_round_up(uint64_t a, uint32_t b, uint32_t c)
{
- if (divisor == 0) {
- return 1;
- } else {
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
- __uint128_t result = dividend / divisor;
- *plow = result;
- *phigh = dividend % divisor;
- return result > UINT64_MAX;
- }
+ return ((__int128_t)a * b + c - 1) / c;
}
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
+ uint64_t divisor)
{
- if (divisor == 0) {
- return 1;
- } else {
- __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
- __int128_t result = dividend / divisor;
- *plow = result;
- *phigh = dividend % divisor;
- return result != *plow;
- }
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
+ __uint128_t result = dividend / divisor;
+
+ *plow = result;
+ *phigh = result >> 64;
+ return dividend % divisor;
+}
+
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
+ int64_t divisor)
+{
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
+ __int128_t result = dividend / divisor;
+
+ *plow = result;
+ *phigh = result >> 64;
+ return dividend % divisor;
}
#else
-void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b);
-void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b);
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
+void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
+void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
-static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
+static inline uint64_t muldiv64_rounding(uint64_t a, uint32_t b, uint32_t c,
+ bool round_up)
{
union {
uint64_t ll;
struct {
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint32_t high, low;
#else
uint32_t low, high;
@@ -98,15 +105,58 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
u.ll = a;
rl = (uint64_t)u.l.low * (uint64_t)b;
+ if (round_up) {
+ rl += c - 1;
+ }
rh = (uint64_t)u.l.high * (uint64_t)b;
rh += (rl >> 32);
res.l.high = rh / c;
res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
return res.ll;
}
+
+static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
+{
+ return muldiv64_rounding(a, b, c, false);
+}
+
+static inline uint64_t muldiv64_round_up(uint64_t a, uint32_t b, uint32_t c)
+{
+ return muldiv64_rounding(a, b, c, true);
+}
#endif
/**
+ * clz8 - count leading zeros in a 8-bit value.
+ * @val: The value to search
+ *
+ * Returns 8 if the value is zero. Note that the GCC builtin is
+ * undefined if the value is zero.
+ *
+ * Note that the GCC builtin will upcast its argument to an `unsigned int`
+ * so this function subtracts off the number of prepended zeroes.
+ */
+static inline int clz8(uint8_t val)
+{
+ return val ? __builtin_clz(val) - 24 : 8;
+}
+
+/**
+ * clz16 - count leading zeros in a 16-bit value.
+ * @val: The value to search
+ *
+ * Returns 16 if the value is zero. Note that the GCC builtin is
+ * undefined if the value is zero.
+ *
+ * Note that the GCC builtin will upcast its argument to an `unsigned int`
+ * so this function subtracts off the number of prepended zeroes.
+ */
+static inline int clz16(uint16_t val)
+{
+ return val ? __builtin_clz(val) - 16 : 16;
+}
+
+/**
* clz32 - count leading zeros in a 32-bit value.
* @val: The value to search
*
@@ -153,6 +203,30 @@ static inline int clo64(uint64_t val)
}
/**
+ * ctz8 - count trailing zeros in a 8-bit value.
+ * @val: The value to search
+ *
+ * Returns 8 if the value is zero. Note that the GCC builtin is
+ * undefined if the value is zero.
+ */
+static inline int ctz8(uint8_t val)
+{
+ return val ? __builtin_ctz(val) : 8;
+}
+
+/**
+ * ctz16 - count trailing zeros in a 16-bit value.
+ * @val: The value to search
+ *
+ * Returns 16 if the value is zero. Note that the GCC builtin is
+ * undefined if the value is zero.
+ */
+static inline int ctz16(uint16_t val)
+{
+ return val ? __builtin_ctz(val) : 16;
+}
+
+/**
* ctz32 - count trailing zeros in a 32-bit value.
* @val: The value to search
*
@@ -272,6 +346,9 @@ static inline int ctpop64(uint64_t val)
*/
static inline uint8_t revbit8(uint8_t x)
{
+#if __has_builtin(__builtin_bitreverse8)
+ return __builtin_bitreverse8(x);
+#else
/* Assign the correct nibble position. */
x = ((x & 0xf0) >> 4)
| ((x & 0x0f) << 4);
@@ -281,6 +358,7 @@ static inline uint8_t revbit8(uint8_t x)
| ((x & 0x22) << 1)
| ((x & 0x11) << 3);
return x;
+#endif
}
/**
@@ -289,6 +367,9 @@ static inline uint8_t revbit8(uint8_t x)
*/
static inline uint16_t revbit16(uint16_t x)
{
+#if __has_builtin(__builtin_bitreverse16)
+ return __builtin_bitreverse16(x);
+#else
/* Assign the correct byte position. */
x = bswap16(x);
/* Assign the correct nibble position. */
@@ -300,6 +381,7 @@ static inline uint16_t revbit16(uint16_t x)
| ((x & 0x2222) << 1)
| ((x & 0x1111) << 3);
return x;
+#endif
}
/**
@@ -308,6 +390,9 @@ static inline uint16_t revbit16(uint16_t x)
*/
static inline uint32_t revbit32(uint32_t x)
{
+#if __has_builtin(__builtin_bitreverse32)
+ return __builtin_bitreverse32(x);
+#else
/* Assign the correct byte position. */
x = bswap32(x);
/* Assign the correct nibble position. */
@@ -319,6 +404,7 @@ static inline uint32_t revbit32(uint32_t x)
| ((x & 0x22222222u) << 1)
| ((x & 0x11111111u) << 3);
return x;
+#endif
}
/**
@@ -327,6 +413,9 @@ static inline uint32_t revbit32(uint32_t x)
*/
static inline uint64_t revbit64(uint64_t x)
{
+#if __has_builtin(__builtin_bitreverse64)
+ return __builtin_bitreverse64(x);
+#else
/* Assign the correct byte position. */
x = bswap64(x);
/* Assign the correct nibble position. */
@@ -338,6 +427,259 @@ static inline uint64_t revbit64(uint64_t x)
| ((x & 0x2222222222222222ull) << 1)
| ((x & 0x1111111111111111ull) << 3);
return x;
+#endif
+}
+
+/**
+ * Return the absolute value of a 64-bit integer as an unsigned 64-bit value
+ */
+static inline uint64_t uabs64(int64_t v)
+{
+ return v < 0 ? -v : v;
+}
+
+/**
+ * sadd32_overflow - addition with overflow indication
+ * @x, @y: addends
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x + @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
+{
+ return __builtin_add_overflow(x, y, ret);
+}
+
+/**
+ * sadd64_overflow - addition with overflow indication
+ * @x, @y: addends
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x + @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
+{
+ return __builtin_add_overflow(x, y, ret);
+}
+
+/**
+ * uadd32_overflow - addition with overflow indication
+ * @x, @y: addends
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x + @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
+{
+ return __builtin_add_overflow(x, y, ret);
+}
+
+/**
+ * uadd64_overflow - addition with overflow indication
+ * @x, @y: addends
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x + @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
+{
+ return __builtin_add_overflow(x, y, ret);
+}
+
+/**
+ * ssub32_overflow - subtraction with overflow indication
+ * @x: Minuend
+ * @y: Subtrahend
+ * @ret: Output for difference
+ *
+ * Computes *@ret = @x - @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
+{
+ return __builtin_sub_overflow(x, y, ret);
+}
+
+/**
+ * ssub64_overflow - subtraction with overflow indication
+ * @x: Minuend
+ * @y: Subtrahend
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x - @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
+{
+ return __builtin_sub_overflow(x, y, ret);
+}
+
+/**
+ * usub32_overflow - subtraction with overflow indication
+ * @x: Minuend
+ * @y: Subtrahend
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x - @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
+{
+ return __builtin_sub_overflow(x, y, ret);
+}
+
+/**
+ * usub64_overflow - subtraction with overflow indication
+ * @x: Minuend
+ * @y: Subtrahend
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x - @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
+{
+ return __builtin_sub_overflow(x, y, ret);
+}
+
+/**
+ * smul32_overflow - multiplication with overflow indication
+ * @x, @y: Input multipliers
+ * @ret: Output for product
+ *
+ * Computes *@ret = @x * @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
+{
+ return __builtin_mul_overflow(x, y, ret);
+}
+
+/**
+ * smul64_overflow - multiplication with overflow indication
+ * @x, @y: Input multipliers
+ * @ret: Output for product
+ *
+ * Computes *@ret = @x * @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
+{
+ return __builtin_mul_overflow(x, y, ret);
+}
+
+/**
+ * umul32_overflow - multiplication with overflow indication
+ * @x, @y: Input multipliers
+ * @ret: Output for product
+ *
+ * Computes *@ret = @x * @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
+{
+ return __builtin_mul_overflow(x, y, ret);
+}
+
+/**
+ * umul64_overflow - multiplication with overflow indication
+ * @x, @y: Input multipliers
+ * @ret: Output for product
+ *
+ * Computes *@ret = @x * @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
+{
+ return __builtin_mul_overflow(x, y, ret);
+}
+
+/*
+ * Unsigned 128x64 multiplication.
+ * Returns true if the result got truncated to 128 bits.
+ * Otherwise, returns false and the multiplication result via plow and phigh.
+ */
+static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor)
+{
+#if defined(CONFIG_INT128)
+ bool res;
+ __uint128_t r;
+ __uint128_t f = ((__uint128_t)*phigh << 64) | *plow;
+ res = __builtin_mul_overflow(f, factor, &r);
+
+ *plow = r;
+ *phigh = r >> 64;
+
+ return res;
+#else
+ uint64_t dhi = *phigh;
+ uint64_t dlo = *plow;
+ uint64_t ahi;
+ uint64_t blo, bhi;
+
+ if (dhi == 0) {
+ mulu64(plow, phigh, dlo, factor);
+ return false;
+ }
+
+ mulu64(plow, &ahi, dlo, factor);
+ mulu64(&blo, &bhi, dhi, factor);
+
+ return uadd64_overflow(ahi, blo, phigh) || bhi != 0;
+#endif
+}
+
+/**
+ * uadd64_carry - addition with carry-in and carry-out
+ * @x, @y: addends
+ * @pcarry: in-out carry value
+ *
+ * Computes @x + @y + *@pcarry, placing the carry-out back
+ * into *@pcarry and returning the 64-bit sum.
+ */
+static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
+{
+#if __has_builtin(__builtin_addcll)
+ unsigned long long c = *pcarry;
+ x = __builtin_addcll(x, y, c, &c);
+ *pcarry = c & 1;
+ return x;
+#else
+ bool c = *pcarry;
+ /* This is clang's internal expansion of __builtin_addc. */
+ c = uadd64_overflow(x, c, &x);
+ c |= uadd64_overflow(x, y, &x);
+ *pcarry = c;
+ return x;
+#endif
+}
+
+/**
+ * usub64_borrow - subtraction with borrow-in and borrow-out
+ * @x, @y: addends
+ * @pborrow: in-out borrow value
+ *
+ * Computes @x - @y - *@pborrow, placing the borrow-out back
+ * into *@pborrow and returning the 64-bit sum.
+ */
+static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
+{
+#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN)
+ unsigned long long b = *pborrow;
+ x = __builtin_subcll(x, y, b, &b);
+ *pborrow = b & 1;
+ return x;
+#else
+ bool b = *pborrow;
+ b = usub64_overflow(x, b, &x);
+ b |= usub64_overflow(x, y, &x);
+ *pborrow = b;
+ return x;
+#endif
}
/* Host type specific sizes of these routines. */
@@ -437,4 +779,83 @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
*/
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
+ *
+ * Licensed under the GPLv2/LGPLv3
+ */
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
+ uint64_t n0, uint64_t d)
+{
+#if defined(__x86_64__)
+ uint64_t q;
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
+ return q;
+#elif defined(__s390x__) && !defined(__clang__)
+ /* Need to use a TImode type to get an even register pair for DLGR. */
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
+ *r = n >> 64;
+ return n;
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
+ /* From Power ISA 2.06, programming note for divdeu. */
+ uint64_t q1, q2, Q, r1, r2, R;
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
+ : "=&r"(q1), "=r"(q2)
+ : "r"(n1), "r"(n0), "r"(d));
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
+ r2 = n0 - (q2 * d);
+ Q = q1 + q2;
+ R = r1 + r2;
+ if (R >= d || R < r2) { /* overflow implies R > d */
+ Q += 1;
+ R -= d;
+ }
+ *r = R;
+ return Q;
+#else
+ uint64_t d0, d1, q0, q1, r1, r0, m;
+
+ d0 = (uint32_t)d;
+ d1 = d >> 32;
+
+ r1 = n1 % d1;
+ q1 = n1 / d1;
+ m = q1 * d0;
+ r1 = (r1 << 32) | (n0 >> 32);
+ if (r1 < m) {
+ q1 -= 1;
+ r1 += d;
+ if (r1 >= d) {
+ if (r1 < m) {
+ q1 -= 1;
+ r1 += d;
+ }
+ }
+ }
+ r1 -= m;
+
+ r0 = r1 % d1;
+ q0 = r1 / d1;
+ m = q0 * d0;
+ r0 = (r0 << 32) | (uint32_t)n0;
+ if (r0 < m) {
+ q0 -= 1;
+ r0 += d;
+ if (r0 >= d) {
+ if (r0 < m) {
+ q0 -= 1;
+ r0 += d;
+ }
+ }
+ }
+ r0 -= m;
+
+ *r = r0;
+ return (q1 << 32) | q0;
+#endif
+}
+
+Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor);
+Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor);
#endif
diff --git a/include/qemu/hw-version.h b/include/qemu/hw-version.h
new file mode 100644
index 0000000000..730a8c904d
--- /dev/null
+++ b/include/qemu/hw-version.h
@@ -0,0 +1,27 @@
+/*
+ * QEMU "hardware version" machinery
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_HW_VERSION_H
+#define QEMU_HW_VERSION_H
+
+/*
+ * Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default
+ * instead of QEMU_VERSION, so setting hw_version on MachineClass
+ * is no longer mandatory.
+ *
+ * Do NOT change this string, or it will break compatibility on all
+ * machine classes that don't set hw_version.
+ */
+#define QEMU_HW_VERSION "2.5+"
+
+/* QEMU "hardware version" setting. Used to replace code that exposed
+ * QEMU_VERSION to guests in the past and need to keep compatibility.
+ * Do not use qemu_hw_version() in new code.
+ */
+void qemu_set_hw_version(const char *);
+const char *qemu_hw_version(void);
+
+#endif
diff --git a/include/qemu/id.h b/include/qemu/id.h
index 40c70103e4..46b759b284 100644
--- a/include/qemu/id.h
+++ b/include/qemu/id.h
@@ -4,6 +4,8 @@
typedef enum IdSubSystems {
ID_QDEV,
ID_BLOCK,
+ ID_CHR,
+ ID_NET,
ID_MAX /* last element, used as array size */
} IdSubSystems;
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
index 5c9890db8b..174bd7dafb 100644
--- a/include/qemu/int128.h
+++ b/include/qemu/int128.h
@@ -1,16 +1,27 @@
#ifndef INT128_H
#define INT128_H
-#ifdef CONFIG_INT128
#include "qemu/bswap.h"
+/*
+ * With TCI, we need to use libffi for interfacing with TCG helpers.
+ * But libffi does not support __int128_t, and therefore cannot pass
+ * or return values of this type, force use of the Int128 struct.
+ */
+#if defined(CONFIG_INT128) && !defined(CONFIG_TCG_INTERPRETER)
typedef __int128_t Int128;
+typedef __int128_t __attribute__((aligned(16))) Int128Aligned;
static inline Int128 int128_make64(uint64_t a)
{
return a;
}
+static inline Int128 int128_makes64(int64_t a)
+{
+ return a;
+}
+
static inline Int128 int128_make128(uint64_t lo, uint64_t hi)
{
return (__uint128_t)hi << 64 | lo;
@@ -53,16 +64,41 @@ static inline Int128 int128_exts64(int64_t a)
return a;
}
+static inline Int128 int128_not(Int128 a)
+{
+ return ~a;
+}
+
static inline Int128 int128_and(Int128 a, Int128 b)
{
return a & b;
}
+static inline Int128 int128_or(Int128 a, Int128 b)
+{
+ return a | b;
+}
+
+static inline Int128 int128_xor(Int128 a, Int128 b)
+{
+ return a ^ b;
+}
+
static inline Int128 int128_rshift(Int128 a, int n)
{
return a >> n;
}
+static inline Int128 int128_urshift(Int128 a, int n)
+{
+ return (__uint128_t)a >> n;
+}
+
+static inline Int128 int128_lshift(Int128 a, int n)
+{
+ return a << n;
+}
+
static inline Int128 int128_add(Int128 a, Int128 b)
{
return a + b;
@@ -98,11 +134,21 @@ static inline bool int128_ge(Int128 a, Int128 b)
return a >= b;
}
+static inline bool int128_uge(Int128 a, Int128 b)
+{
+ return ((__uint128_t)a) >= ((__uint128_t)b);
+}
+
static inline bool int128_lt(Int128 a, Int128 b)
{
return a < b;
}
+static inline bool int128_ult(Int128 a, Int128 b)
+{
+ return (__uint128_t)a < (__uint128_t)b;
+}
+
static inline bool int128_le(Int128 a, Int128 b)
{
return a <= b;
@@ -140,26 +186,78 @@ static inline void int128_subfrom(Int128 *a, Int128 b)
static inline Int128 bswap128(Int128 a)
{
+#if __has_builtin(__builtin_bswap128)
+ return __builtin_bswap128(a);
+#else
return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a)));
+#endif
+}
+
+static inline int clz128(Int128 a)
+{
+ if (a >> 64) {
+ return __builtin_clzll(a >> 64);
+ } else {
+ return (a) ? __builtin_clzll((uint64_t)a) + 64 : 128;
+ }
+}
+
+static inline Int128 int128_divu(Int128 a, Int128 b)
+{
+ return (__uint128_t)a / (__uint128_t)b;
+}
+
+static inline Int128 int128_remu(Int128 a, Int128 b)
+{
+ return (__uint128_t)a % (__uint128_t)b;
+}
+
+static inline Int128 int128_divs(Int128 a, Int128 b)
+{
+ return a / b;
+}
+
+static inline Int128 int128_rems(Int128 a, Int128 b)
+{
+ return a % b;
}
#else /* !CONFIG_INT128 */
typedef struct Int128 Int128;
-
+typedef struct Int128 __attribute__((aligned(16))) Int128Aligned;
+
+/*
+ * We guarantee that the in-memory byte representation of an
+ * Int128 is that of a host-endian-order 128-bit integer
+ * (whether using this struct or the __int128_t version of the type).
+ * Some code using this type relies on this (eg when copying it into
+ * guest memory or a gdb protocol buffer, or by using Int128 in
+ * a union with other integer types).
+ */
struct Int128 {
+#if HOST_BIG_ENDIAN
+ int64_t hi;
+ uint64_t lo;
+#else
uint64_t lo;
int64_t hi;
+#endif
};
static inline Int128 int128_make64(uint64_t a)
{
- return (Int128) { a, 0 };
+ return (Int128) { .lo = a, .hi = 0 };
+}
+
+static inline Int128 int128_makes64(int64_t a)
+{
+ return (Int128) { .lo = a, .hi = a >> 63 };
}
static inline Int128 int128_make128(uint64_t lo, uint64_t hi)
{
- return (Int128) { lo, hi };
+ return (Int128) { .lo = lo, .hi = hi };
}
static inline uint64_t int128_get64(Int128 a)
@@ -190,17 +288,32 @@ static inline Int128 int128_one(void)
static inline Int128 int128_2_64(void)
{
- return (Int128) { 0, 1 };
+ return int128_make128(0, 1);
}
static inline Int128 int128_exts64(int64_t a)
{
- return (Int128) { .lo = a, .hi = (a < 0) ? -1 : 0 };
+ return int128_make128(a, (a < 0) ? -1 : 0);
+}
+
+static inline Int128 int128_not(Int128 a)
+{
+ return int128_make128(~a.lo, ~a.hi);
}
static inline Int128 int128_and(Int128 a, Int128 b)
{
- return (Int128) { a.lo & b.lo, a.hi & b.hi };
+ return int128_make128(a.lo & b.lo, a.hi & b.hi);
+}
+
+static inline Int128 int128_or(Int128 a, Int128 b)
+{
+ return int128_make128(a.lo | b.lo, a.hi | b.hi);
+}
+
+static inline Int128 int128_xor(Int128 a, Int128 b)
+{
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
}
static inline Int128 int128_rshift(Int128 a, int n)
@@ -217,6 +330,31 @@ static inline Int128 int128_rshift(Int128 a, int n)
}
}
+static inline Int128 int128_urshift(Int128 a, int n)
+{
+ uint64_t h = a.hi;
+ if (!n) {
+ return a;
+ }
+ h = h >> (n & 63);
+ if (n >= 64) {
+ return int128_make64(h);
+ } else {
+ return int128_make128((a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h);
+ }
+}
+
+static inline Int128 int128_lshift(Int128 a, int n)
+{
+ uint64_t l = a.lo << (n & 63);
+ if (n >= 64) {
+ return int128_make128(0, l);
+ } else if (n > 0) {
+ return int128_make128(l, (a.hi << n) | (a.lo >> (64 - n)));
+ }
+ return a;
+}
+
static inline Int128 int128_add(Int128 a, Int128 b)
{
uint64_t lo = a.lo + b.lo;
@@ -261,11 +399,21 @@ static inline bool int128_ge(Int128 a, Int128 b)
return a.hi > b.hi || (a.hi == b.hi && a.lo >= b.lo);
}
+static inline bool int128_uge(Int128 a, Int128 b)
+{
+ return (uint64_t)a.hi > (uint64_t)b.hi || (a.hi == b.hi && a.lo >= b.lo);
+}
+
static inline bool int128_lt(Int128 a, Int128 b)
{
return !int128_ge(a, b);
}
+static inline bool int128_ult(Int128 a, Int128 b)
+{
+ return !int128_uge(a, b);
+}
+
static inline bool int128_le(Int128 a, Int128 b)
{
return int128_ge(b, a);
@@ -301,5 +449,48 @@ static inline void int128_subfrom(Int128 *a, Int128 b)
*a = int128_sub(*a, b);
}
-#endif /* CONFIG_INT128 */
+static inline Int128 bswap128(Int128 a)
+{
+ return int128_make128(bswap64(a.hi), bswap64(a.lo));
+}
+
+static inline int clz128(Int128 a)
+{
+ if (a.hi) {
+ return __builtin_clzll(a.hi);
+ } else {
+ return (a.lo) ? __builtin_clzll(a.lo) + 64 : 128;
+ }
+}
+
+Int128 int128_divu(Int128, Int128);
+Int128 int128_remu(Int128, Int128);
+Int128 int128_divs(Int128, Int128);
+Int128 int128_rems(Int128, Int128);
+#endif /* CONFIG_INT128 && !CONFIG_TCG_INTERPRETER */
+
+static inline void bswap128s(Int128 *s)
+{
+ *s = bswap128(*s);
+}
+
+#define UINT128_MAX int128_make128(~0LL, ~0LL)
+#define INT128_MAX int128_make128(UINT64_MAX, INT64_MAX)
+#define INT128_MIN int128_make128(0, INT64_MIN)
+
+/*
+ * When compiler supports a 128-bit type, define a combination of
+ * a possible structure and the native types. Ease parameter passing
+ * via use of the transparent union extension.
+ */
+#ifdef CONFIG_INT128_TYPE
+typedef union {
+ __uint128_t u;
+ __int128_t i;
+ Int128 s;
+} Int128Alias __attribute__((transparent_union));
+#else
+typedef Int128 Int128Alias;
+#endif /* CONFIG_INT128_TYPE */
+
#endif /* INT128_H */
diff --git a/include/qemu/interval-tree.h b/include/qemu/interval-tree.h
new file mode 100644
index 0000000000..25006debe8
--- /dev/null
+++ b/include/qemu/interval-tree.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Interval trees.
+ *
+ * Derived from include/linux/interval_tree.h and its dependencies.
+ */
+
+#ifndef QEMU_INTERVAL_TREE_H
+#define QEMU_INTERVAL_TREE_H
+
+/*
+ * For now, don't expose Linux Red-Black Trees separately, but retain the
+ * separate type definitions to keep the implementation sane, and allow
+ * the possibility of disentangling them later.
+ */
+typedef struct RBNode
+{
+ /* Encodes parent with color in the lsb. */
+ uintptr_t rb_parent_color;
+ struct RBNode *rb_right;
+ struct RBNode *rb_left;
+} RBNode;
+
+typedef struct RBRoot
+{
+ RBNode *rb_node;
+} RBRoot;
+
+typedef struct RBRootLeftCached {
+ RBRoot rb_root;
+ RBNode *rb_leftmost;
+} RBRootLeftCached;
+
+typedef struct IntervalTreeNode
+{
+ RBNode rb;
+
+ uint64_t start; /* Start of interval */
+ uint64_t last; /* Last location _in_ interval */
+ uint64_t subtree_last;
+} IntervalTreeNode;
+
+typedef RBRootLeftCached IntervalTreeRoot;
+
+/**
+ * interval_tree_is_empty
+ * @root: root of the tree.
+ *
+ * Returns true if the tree contains no nodes.
+ */
+static inline bool interval_tree_is_empty(const IntervalTreeRoot *root)
+{
+ return root->rb_root.rb_node == NULL;
+}
+
+/**
+ * interval_tree_insert
+ * @node: node to insert,
+ * @root: root of the tree.
+ *
+ * Insert @node into @root, and rebalance.
+ */
+void interval_tree_insert(IntervalTreeNode *node, IntervalTreeRoot *root);
+
+/**
+ * interval_tree_remove
+ * @node: node to remove,
+ * @root: root of the tree.
+ *
+ * Remove @node from @root, and rebalance.
+ */
+void interval_tree_remove(IntervalTreeNode *node, IntervalTreeRoot *root);
+
+/**
+ * interval_tree_iter_first:
+ * @root: root of the tree,
+ * @start, @last: the inclusive interval [start, last].
+ *
+ * Locate the "first" of a set of nodes within the tree at @root
+ * that overlap the interval, where "first" is sorted by start.
+ * Returns NULL if no overlap found.
+ */
+IntervalTreeNode *interval_tree_iter_first(IntervalTreeRoot *root,
+ uint64_t start, uint64_t last);
+
+/**
+ * interval_tree_iter_next:
+ * @node: previous search result
+ * @start, @last: the inclusive interval [start, last].
+ *
+ * Locate the "next" of a set of nodes within the tree that overlap the
+ * interval; @next is the result of a previous call to
+ * interval_tree_iter_{first,next}. Returns NULL if @next was the last
+ * node in the set.
+ */
+IntervalTreeNode *interval_tree_iter_next(IntervalTreeNode *node,
+ uint64_t start, uint64_t last);
+
+#endif /* QEMU_INTERVAL_TREE_H */
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
index 5f433c7768..63a1c01965 100644
--- a/include/qemu/iov.h
+++ b/include/qemu/iov.h
@@ -130,22 +130,111 @@ size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
size_t bytes);
+/* Information needed to undo an iov_discard_*() operation */
+typedef struct {
+ struct iovec *modified_iov;
+ struct iovec orig;
+} IOVDiscardUndo;
+
+/*
+ * Undo an iov_discard_front_undoable() or iov_discard_back_undoable()
+ * operation. If multiple operations are made then each one needs a separate
+ * IOVDiscardUndo and iov_discard_undo() must be called in the reverse order
+ * that the operations were made.
+ */
+void iov_discard_undo(IOVDiscardUndo *undo);
+
+/*
+ * Undoable versions of iov_discard_front() and iov_discard_back(). Use
+ * iov_discard_undo() to reset to the state before the discard operations.
+ */
+size_t iov_discard_front_undoable(struct iovec **iov, unsigned int *iov_cnt,
+ size_t bytes, IOVDiscardUndo *undo);
+size_t iov_discard_back_undoable(struct iovec *iov, unsigned int *iov_cnt,
+ size_t bytes, IOVDiscardUndo *undo);
+
typedef struct QEMUIOVector {
struct iovec *iov;
int niov;
- int nalloc;
- size_t size;
+
+ /*
+ * For external @iov (qemu_iovec_init_external()) or allocated @iov
+ * (qemu_iovec_init()), @size is the cumulative size of iovecs and
+ * @local_iov is invalid and unused.
+ *
+ * For embedded @iov (QEMU_IOVEC_INIT_BUF() or qemu_iovec_init_buf()),
+ * @iov is equal to &@local_iov, and @size is valid, as it has same
+ * offset and type as @local_iov.iov_len, which is guaranteed by
+ * static assertion below.
+ *
+ * @nalloc is always valid and is -1 both for embedded and external
+ * cases. It is included in the union only to ensure the padding prior
+ * to the @size field will not result in a 0-length array.
+ */
+ union {
+ struct {
+ int nalloc;
+ struct iovec local_iov;
+ };
+ struct {
+ char __pad[sizeof(int) + offsetof(struct iovec, iov_len)];
+ size_t size;
+ };
+ };
} QEMUIOVector;
+QEMU_BUILD_BUG_ON(offsetof(QEMUIOVector, size) !=
+ offsetof(QEMUIOVector, local_iov.iov_len));
+
+#define QEMU_IOVEC_INIT_BUF(self, buf, len) \
+{ \
+ .iov = &(self).local_iov, \
+ .niov = 1, \
+ .nalloc = -1, \
+ .local_iov = { \
+ .iov_base = (void *)(buf), /* cast away const */ \
+ .iov_len = (len), \
+ }, \
+}
+
+/*
+ * qemu_iovec_init_buf
+ *
+ * Initialize embedded QEMUIOVector.
+ *
+ * Note: "const" is used over @buf pointer to make it simple to pass
+ * const pointers, appearing in read functions. Then this "const" is
+ * cast away by QEMU_IOVEC_INIT_BUF().
+ */
+static inline void qemu_iovec_init_buf(QEMUIOVector *qiov,
+ const void *buf, size_t len)
+{
+ *qiov = (QEMUIOVector) QEMU_IOVEC_INIT_BUF(*qiov, buf, len);
+}
+
+static inline void *qemu_iovec_buf(QEMUIOVector *qiov)
+{
+ /* Only supports embedded iov */
+ assert(qiov->nalloc == -1 && qiov->iov == &qiov->local_iov);
+
+ return qiov->local_iov.iov_base;
+}
+
void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
+void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source,
+ size_t offset, size_t len);
+struct iovec *qemu_iovec_slice(QEMUIOVector *qiov,
+ size_t offset, size_t len,
+ size_t *head, size_t *tail, int *niov);
+int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len);
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
void qemu_iovec_concat(QEMUIOVector *dst,
QEMUIOVector *src, size_t soffset, size_t sbytes);
size_t qemu_iovec_concat_iov(QEMUIOVector *dst,
struct iovec *src_iov, unsigned int src_cnt,
size_t soffset, size_t sbytes);
-bool qemu_iovec_is_zero(QEMUIOVector *qiov);
+bool qemu_iovec_is_zero(QEMUIOVector *qiov, size_t qiov_offeset, size_t bytes);
void qemu_iovec_destroy(QEMUIOVector *qiov);
void qemu_iovec_reset(QEMUIOVector *qiov);
size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset,
diff --git a/include/qemu/iova-tree.h b/include/qemu/iova-tree.h
index b66cf93c4b..2a10a7052e 100644
--- a/include/qemu/iova-tree.h
+++ b/include/qemu/iova-tree.h
@@ -15,7 +15,7 @@
* Currently the iova tree will only allow to keep ranges
* information, and no extra user data is allowed for each element. A
* benefit is that we can merge adjacent ranges internally within the
- * tree. It can save a lot of memory when the ranges are splitted but
+ * tree. It can save a lot of memory when the ranges are split but
* mostly continuous.
*
* Note that current implementation does not provide any thread
@@ -29,6 +29,7 @@
#define IOVA_OK (0)
#define IOVA_ERR_INVALID (-1) /* Invalid parameters */
#define IOVA_ERR_OVERLAP (-2) /* IOVA range overlapped */
+#define IOVA_ERR_NOMEM (-3) /* Cannot allocate */
typedef struct IOVATree IOVATree;
typedef struct DMAMap {
@@ -59,7 +60,7 @@ IOVATree *iova_tree_new(void);
*
* Return: 0 if succeeded, or <0 if error.
*/
-int iova_tree_insert(IOVATree *tree, DMAMap *map);
+int iova_tree_insert(IOVATree *tree, const DMAMap *map);
/**
* iova_tree_remove:
@@ -71,10 +72,8 @@ int iova_tree_insert(IOVATree *tree, DMAMap *map);
* provided. The range does not need to be exactly what has inserted,
* all the mappings that are included in the provided range will be
* removed from the tree. Here map->translated_addr is meaningless.
- *
- * Return: 0 if succeeded, or <0 if error.
*/
-int iova_tree_remove(IOVATree *tree, DMAMap *map);
+void iova_tree_remove(IOVATree *tree, DMAMap map);
/**
* iova_tree_find:
@@ -82,7 +81,7 @@ int iova_tree_remove(IOVATree *tree, DMAMap *map);
* @tree: the iova tree to search from
* @map: the mapping to search
*
- * Search for a mapping in the iova tree that overlaps with the
+ * Search for a mapping in the iova tree that iova overlaps with the
* mapping range specified. Only the first found mapping will be
* returned.
*
@@ -92,7 +91,25 @@ int iova_tree_remove(IOVATree *tree, DMAMap *map);
* user is responsible to make sure the pointer is valid (say, no
* concurrent deletion in progress).
*/
-DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map);
+const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map);
+
+/**
+ * iova_tree_find_iova:
+ *
+ * @tree: the iova tree to search from
+ * @map: the mapping to search
+ *
+ * Search for a mapping in the iova tree that translated_addr overlaps with the
+ * mapping range specified. Only the first found mapping will be
+ * returned.
+ *
+ * Return: DMAMap pointer if found, or NULL if not found. Note that
+ * the returned DMAMap pointer is maintained internally. User should
+ * only read the content but never modify or free the content. Also,
+ * user is responsible to make sure the pointer is valid (say, no
+ * concurrent deletion in progress).
+ */
+const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map);
/**
* iova_tree_find_address:
@@ -105,13 +122,13 @@ DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map);
*
* Return: same as iova_tree_find().
*/
-DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova);
+const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova);
/**
* iova_tree_foreach:
*
* @tree: the iova tree to iterate on
- * @iterator: the interator for the mappings, return true to stop
+ * @iterator: the iterator for the mappings, return true to stop
*
* Iterate over the iova tree.
*
@@ -120,6 +137,23 @@ DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova);
void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator);
/**
+ * iova_tree_alloc_map:
+ *
+ * @tree: the iova tree to allocate from
+ * @map: the new map (as translated addr & size) to allocate in the iova region
+ * @iova_begin: the minimum address of the allocation
+ * @iova_end: the maximum addressable direction of the allocation
+ *
+ * Allocates a new region of a given size, between iova_min and iova_max.
+ *
+ * Return: Same as iova_tree_insert, but cannot overlap and can return error if
+ * iova tree is out of free contiguous range. The caller gets the assigned iova
+ * in map->iova.
+ */
+int iova_tree_alloc_map(IOVATree *tree, DMAMap *map, hwaddr iova_begin,
+ hwaddr iova_end);
+
+/**
* iova_tree_destroy:
*
* @tree: the iova tree to destroy
diff --git a/include/qemu/jhash.h b/include/qemu/jhash.h
index 7222242615..84d14dc7f7 100644
--- a/include/qemu/jhash.h
+++ b/include/qemu/jhash.h
@@ -21,8 +21,8 @@
* Jozsef
*/
-#ifndef QEMU_JHASH_H__
-#define QEMU_JHASH_H__
+#ifndef QEMU_JHASH_H
+#define QEMU_JHASH_H
#include "qemu/bitops.h"
@@ -56,4 +56,4 @@
/* An arbitrary initial parameter */
#define JHASH_INITVAL 0xdeadbeef
-#endif /* QEMU_JHASH_H__ */
+#endif /* QEMU_JHASH_H */
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 9e7cd1e4a0..2b873f2576 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -26,8 +26,9 @@
#ifndef JOB_H
#define JOB_H
-#include "qapi/qapi-types-block-core.h"
+#include "qapi/qapi-types-job.h"
#include "qemu/queue.h"
+#include "qemu/progress_meter.h"
#include "qemu/coroutine.h"
#include "block/aio.h"
@@ -39,27 +40,60 @@ typedef struct JobTxn JobTxn;
* Long-running operation.
*/
typedef struct Job {
+
+ /* Fields set at initialization (job_create), and never modified */
+
/** The ID of the job. May be NULL for internal jobs. */
char *id;
- /** The type of this job. */
+ /**
+ * The type of this job.
+ * All callbacks are called with job_mutex *not* held.
+ */
const JobDriver *driver;
- /** Reference count of the block job */
- int refcnt;
-
- /** Current state; See @JobStatus for details. */
- JobStatus status;
-
- /** AioContext to run the job coroutine in */
- AioContext *aio_context;
-
/**
* The coroutine that executes the job. If not NULL, it is reentered when
* busy is false and the job is cancelled.
+ * Initialized in job_start()
*/
Coroutine *co;
+ /** True if this job should automatically finalize itself */
+ bool auto_finalize;
+
+ /** True if this job should automatically dismiss itself */
+ bool auto_dismiss;
+
+ /**
+ * The completion function that will be called when the job completes.
+ */
+ BlockCompletionFunc *cb;
+
+ /** The opaque value that is passed to the completion function. */
+ void *opaque;
+
+ /* ProgressMeter API is thread-safe */
+ ProgressMeter progress;
+
+ /**
+ * AioContext to run the job coroutine in.
+ * The job Aiocontext can be read when holding *either*
+ * the BQL (so we are in the main loop) or the job_mutex.
+ * It can only be written when we hold *both* BQL
+ * and the job_mutex.
+ */
+ AioContext *aio_context;
+
+
+ /** Protected by job_mutex */
+
+ /** Reference count of the block job */
+ int refcnt;
+
+ /** Current state; See @JobStatus for details. */
+ JobStatus status;
+
/**
* Timer that is used by @job_sleep_ns. Accessed under job_mutex (in
* job.c).
@@ -75,7 +109,7 @@ typedef struct Job {
/**
* Set to false by the job while the coroutine has yielded and may be
* re-entered by job_enter(). There may still be I/O or event loop activity
- * pending. Accessed under block_job_mutex (in blockjob.c).
+ * pending. Accessed under job_mutex.
*
* When the job is deferred to the main loop, busy is true as long as the
* bottom half is still pending.
@@ -111,22 +145,6 @@ typedef struct Job {
/** Set to true when the job has deferred work to the main loop. */
bool deferred_to_main_loop;
- /** True if this job should automatically finalize itself */
- bool auto_finalize;
-
- /** True if this job should automatically dismiss itself */
- bool auto_dismiss;
-
- /**
- * Current progress. The unit is arbitrary as long as the ratio between
- * progress_current and progress_total represents the estimated percentage
- * of work already done.
- */
- int64_t progress_current;
-
- /** Estimated progress_current value at the completion of the job */
- int64_t progress_total;
-
/**
* Return code from @run and/or @prepare callback(s).
* Not final until the job has reached the CONCLUDED status.
@@ -141,12 +159,6 @@ typedef struct Job {
*/
Error *err;
- /** The completion function that will be called when the job completes. */
- BlockCompletionFunc *cb;
-
- /** The opaque value that is passed to the completion function. */
- void *opaque;
-
/** Notifiers called when a cancelled job is finalised */
NotifierList on_finalize_cancelled;
@@ -174,8 +186,15 @@ typedef struct Job {
/**
* Callbacks and other information about a Job driver.
+ * All callbacks are invoked with job_mutex *not* held.
*/
struct JobDriver {
+
+ /*
+ * These fields are initialized when this object is created,
+ * and are never changed afterwards
+ */
+
/** Derived Job struct size */
size_t instance_size;
@@ -191,9 +210,18 @@ struct JobDriver {
* aborted. If it returns zero, the job moves into the WAITING state. If it
* is the last job to complete in its transaction, all jobs in the
* transaction move from WAITING to PENDING.
+ *
+ * This callback must be run in the job's context.
*/
int coroutine_fn (*run)(Job *job, Error **errp);
+ /*
+ * Functions run without regard to the BQL that may run in any
+ * arbitrary thread. These functions do not need to be thread-safe
+ * because the caller ensures that they are invoked from one
+ * thread at time.
+ */
+
/**
* If the callback is not NULL, it will be invoked when the job transitions
* into the paused state. Paused jobs must not perform any asynchronous
@@ -208,6 +236,13 @@ struct JobDriver {
*/
void coroutine_fn (*resume)(Job *job);
+ /*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
/**
* Called when the job is resumed by the user (i.e. user_paused becomes
* false). .user_resume is called before .resume.
@@ -220,13 +255,6 @@ struct JobDriver {
*/
void (*complete)(Job *job, Error **errp);
- /*
- * If the callback is not NULL, it will be invoked when the job has to be
- * synchronously cancelled or completed; it should drain any activities
- * as required to ensure progress.
- */
- void (*drain)(Job *job);
-
/**
* If the callback is not NULL, prepare will be invoked when all the jobs
* belonging to the same transaction complete; or upon this job's completion
@@ -265,8 +293,24 @@ struct JobDriver {
*/
void (*clean)(Job *job);
+ /**
+ * If the callback is not NULL, it will be invoked in job_cancel_async
+ *
+ * This function must return true if the job will be cancelled
+ * immediately without any further I/O (mandatory if @force is
+ * true), and false otherwise. This lets the generic job layer
+ * know whether a job has been truly (force-)cancelled, or whether
+ * it is just in a special completion mode (like mirror after
+ * READY).
+ * (If the callback is NULL, the job is assumed to terminate
+ * without I/O.)
+ */
+ bool (*cancel)(Job *job, bool force);
+
- /** Called when the job is freed */
+ /**
+ * Called when the job is freed.
+ */
void (*free)(Job *job);
};
@@ -281,6 +325,30 @@ typedef enum JobCreateFlags {
JOB_MANUAL_DISMISS = 0x04,
} JobCreateFlags;
+extern QemuMutex job_mutex;
+
+#define JOB_LOCK_GUARD() QEMU_LOCK_GUARD(&job_mutex)
+
+#define WITH_JOB_LOCK_GUARD() WITH_QEMU_LOCK_GUARD(&job_mutex)
+
+/**
+ * job_lock:
+ *
+ * Take the mutex protecting the list of jobs and their status.
+ * Most functions called by the monitor need to call job_lock
+ * and job_unlock manually. On the other hand, function called
+ * by the block jobs themselves and by the block layer will take the
+ * lock for you.
+ */
+void job_lock(void);
+
+/**
+ * job_unlock:
+ *
+ * Release the mutex protecting the list of jobs and their status.
+ */
+void job_unlock(void);
+
/**
* Allocate and return a new job transaction. Jobs can be added to the
* transaction using job_txn_add_job().
@@ -297,23 +365,20 @@ JobTxn *job_txn_new(void);
/**
* Release a reference that was previously acquired with job_txn_add_job or
* job_txn_new. If it's the last reference to the object, it will be freed.
+ *
+ * Called with job lock *not* held.
*/
void job_txn_unref(JobTxn *txn);
-/**
- * @txn: The transaction (may be NULL)
- * @job: Job to add to the transaction
- *
- * Add @job to the transaction. The @job must not already be in a transaction.
- * The caller must call either job_txn_unref() or job_completed() to release
- * the reference that is automatically grabbed here.
- *
- * If @txn is NULL, the function does nothing.
+/*
+ * Same as job_txn_unref(), but called with job lock held.
+ * Might release the lock temporarily.
*/
-void job_txn_add_job(JobTxn *txn, Job *job);
+void job_txn_unref_locked(JobTxn *txn);
/**
* Create a new long-running job and return it.
+ * Called with job_mutex *not* held.
*
* @job_id: The id of the newly-created job, or %NULL for internal jobs
* @driver: The class object for the newly-created job.
@@ -331,20 +396,26 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
/**
* Add a reference to Job refcnt, it will be decreased with job_unref, and then
* be freed if it comes to be the last reference.
+ *
+ * Called with job lock held.
*/
-void job_ref(Job *job);
+void job_ref_locked(Job *job);
/**
- * Release a reference that was previously acquired with job_ref() or
+ * Release a reference that was previously acquired with job_ref_locked() or
* job_create(). If it's the last reference to the object, it will be freed.
+ *
+ * Called with job lock held.
*/
-void job_unref(Job *job);
+void job_unref_locked(Job *job);
/**
* @job: The job that has made progress
* @done: How much progress the job made since the last call
*
* Updates the progress counter of the job.
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_update(Job *job, uint64_t done);
@@ -355,6 +426,8 @@ void job_progress_update(Job *job, uint64_t done);
*
* Sets the expected end value of the progress counter of a job so that a
* completion percentage can be calculated when the progress is updated.
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_set_remaining(Job *job, uint64_t remaining);
@@ -370,27 +443,27 @@ void job_progress_set_remaining(Job *job, uint64_t remaining);
* length before, and job_progress_update() afterwards.
* (So the operation acts as a parenthesis in regards to the main job
* operation running in background.)
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_increase_remaining(Job *job, uint64_t delta);
-/** To be called when a cancelled job is finalised. */
-void job_event_cancelled(Job *job);
-
-/** To be called when a successfully completed job is finalised. */
-void job_event_completed(Job *job);
-
/**
* Conditionally enter the job coroutine if the job is ready to run, not
* already busy and fn() returns true. fn() is called while under the job_lock
* critical section.
+ *
+ * Called with job lock held, but might release it temporarily.
*/
-void job_enter_cond(Job *job, bool(*fn)(Job *job));
+void job_enter_cond_locked(Job *job, bool(*fn)(Job *job));
/**
* @job: A job that has not yet been started.
*
* Begins execution of a job.
* Takes ownership of one reference to the job object.
+ *
+ * Called with job_mutex *not* held.
*/
void job_start(Job *job);
@@ -398,6 +471,7 @@ void job_start(Job *job);
* @job: The job to enter.
*
* Continue the specified job by entering the coroutine.
+ * Called with job_mutex *not* held.
*/
void job_enter(Job *job);
@@ -406,15 +480,18 @@ void job_enter(Job *job);
*
* Pause now if job_pause() has been called. Jobs that perform lots of I/O
* must call this between requests so that the job can be paused.
+ *
+ * Called with job_mutex *not* held.
*/
-void coroutine_fn job_pause_point(Job *job);
+void coroutine_fn GRAPH_UNLOCKED job_pause_point(Job *job);
/**
* @job: The job that calls the function.
*
* Yield the job coroutine.
+ * Called with job_mutex *not* held.
*/
-void job_yield(Job *job);
+void coroutine_fn job_yield(Job *job);
/**
* @job: The job that calls the function.
@@ -423,10 +500,11 @@ void job_yield(Job *job);
* Put the job to sleep (assuming that it wasn't canceled) for @ns
* %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately
* interrupt the wait.
+ *
+ * Called with job_mutex *not* held.
*/
void coroutine_fn job_sleep_ns(Job *job, int64_t ns);
-
/** Returns the JobType of a given Job. */
JobType job_type(const Job *job);
@@ -436,108 +514,165 @@ const char *job_type_str(const Job *job);
/** Returns true if the job should not be visible to the management layer. */
bool job_is_internal(Job *job);
-/** Returns whether the job is scheduled for cancellation. */
+/**
+ * Returns whether the job is being cancelled.
+ * Called with job_mutex *not* held.
+ */
bool job_is_cancelled(Job *job);
-/** Returns whether the job is in a completed state. */
-bool job_is_completed(Job *job);
+/* Same as job_is_cancelled(), but called with job lock held. */
+bool job_is_cancelled_locked(Job *job);
+
+/**
+ * Returns whether the job is scheduled for cancellation (at an
+ * indefinite point).
+ * Called with job_mutex *not* held.
+ */
+bool job_cancel_requested(Job *job);
-/** Returns whether the job is ready to be completed. */
+/**
+ * Returns whether the job is in a completed state.
+ * Called with job lock held.
+ */
+bool job_is_completed_locked(Job *job);
+
+/**
+ * Returns whether the job is ready to be completed.
+ * Called with job_mutex *not* held.
+ */
bool job_is_ready(Job *job);
+/* Same as job_is_ready(), but called with job lock held. */
+bool job_is_ready_locked(Job *job);
+
/**
* Request @job to pause at the next pause point. Must be paired with
* job_resume(). If the job is supposed to be resumed by user action, call
- * job_user_pause() instead.
+ * job_user_pause_locked() instead.
+ *
+ * Called with job lock *not* held.
*/
void job_pause(Job *job);
-/** Resumes a @job paused with job_pause. */
+/* Same as job_pause(), but called with job lock held. */
+void job_pause_locked(Job *job);
+
+/** Resumes a @job paused with job_pause. Called with job lock *not* held. */
void job_resume(Job *job);
+/*
+ * Same as job_resume(), but called with job lock held.
+ * Might release the lock temporarily.
+ */
+void job_resume_locked(Job *job);
+
/**
* Asynchronously pause the specified @job.
* Do not allow a resume until a matching call to job_user_resume.
+ * Called with job lock held.
*/
-void job_user_pause(Job *job, Error **errp);
-
-/** Returns true if the job is user-paused. */
-bool job_user_paused(Job *job);
+void job_user_pause_locked(Job *job, Error **errp);
/**
- * Resume the specified @job.
- * Must be paired with a preceding job_user_pause.
+ * Returns true if the job is user-paused.
+ * Called with job lock held.
*/
-void job_user_resume(Job *job, Error **errp);
+bool job_user_paused_locked(Job *job);
-/*
- * Drain any activities as required to ensure progress. This can be called in a
- * loop to synchronously complete a job.
+/**
+ * Resume the specified @job.
+ * Must be paired with a preceding job_user_pause_locked.
+ * Called with job lock held, but might release it temporarily.
*/
-void job_drain(Job *job);
+void job_user_resume_locked(Job *job, Error **errp);
/**
* Get the next element from the list of block jobs after @job, or the
* first one if @job is %NULL.
*
* Returns the requested job, or %NULL if there are no more jobs left.
+ * Called with job lock *not* held.
*/
Job *job_next(Job *job);
+/* Same as job_next(), but called with job lock held. */
+Job *job_next_locked(Job *job);
+
/**
* Get the job identified by @id (which must not be %NULL).
*
* Returns the requested job, or %NULL if it doesn't exist.
+ * Called with job lock held.
*/
-Job *job_get(const char *id);
+Job *job_get_locked(const char *id);
/**
* Check whether the verb @verb can be applied to @job in its current state.
* Returns 0 if the verb can be applied; otherwise errp is set and -EPERM
* returned.
+ *
+ * Called with job lock held.
*/
-int job_apply_verb(Job *job, JobVerb verb, Error **errp);
+int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp);
-/** The @job could not be started, free it. */
+/**
+ * The @job could not be started, free it.
+ * Called with job_mutex *not* held.
+ */
void job_early_fail(Job *job);
-/** Moves the @job from RUNNING to READY */
+/**
+ * Moves the @job from RUNNING to READY.
+ * Called with job_mutex *not* held.
+ */
void job_transition_to_ready(Job *job);
-/** Asynchronously complete the specified @job. */
-void job_complete(Job *job, Error **errp);
+/**
+ * Asynchronously complete the specified @job.
+ * Called with job lock held, but might release it temporarily.
+ */
+void job_complete_locked(Job *job, Error **errp);
/**
* Asynchronously cancel the specified @job. If @force is true, the job should
* be cancelled immediately without waiting for a consistent state.
+ * Called with job lock held.
*/
-void job_cancel(Job *job, bool force);
+void job_cancel_locked(Job *job, bool force);
/**
- * Cancels the specified job like job_cancel(), but may refuse to do so if the
- * operation isn't meaningful in the current state of the job.
+ * Cancels the specified job like job_cancel_locked(), but may refuse
+ * to do so if the operation isn't meaningful in the current state of the job.
+ * Called with job lock held.
*/
-void job_user_cancel(Job *job, bool force, Error **errp);
+void job_user_cancel_locked(Job *job, bool force, Error **errp);
/**
* Synchronously cancel the @job. The completion callback is called
- * before the function returns. The job may actually complete
- * instead of canceling itself; the circumstances under which this
- * happens depend on the kind of job that is active.
+ * before the function returns. If @force is false, the job may
+ * actually complete instead of canceling itself; the circumstances
+ * under which this happens depend on the kind of job that is active.
*
* Returns the return value from the job if the job actually completed
* during the call, or -ECANCELED if it was canceled.
*
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock *not* held.
*/
-int job_cancel_sync(Job *job);
+int job_cancel_sync(Job *job, bool force);
+
+/* Same as job_cancel_sync, but called with job lock held. */
+int job_cancel_sync_locked(Job *job, bool force);
-/** Synchronously cancels all jobs using job_cancel_sync(). */
+/**
+ * Synchronously force-cancels all jobs using job_cancel_sync_locked().
+ *
+ * Called with job_lock *not* held.
+ */
void job_cancel_sync_all(void);
/**
* @job: The job to be completed.
- * @errp: Error object which may be set by job_complete(); this is not
+ * @errp: Error object which may be set by job_complete_locked(); this is not
* necessarily set on every error, the job return value has to be
* checked as well.
*
@@ -546,10 +681,9 @@ void job_cancel_sync_all(void);
* function).
*
* Returns the return value from the job.
- *
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock held.
*/
-int job_complete_sync(Job *job, Error **errp);
+int job_complete_sync_locked(Job *job, Error **errp);
/**
* For a @job that has finished its work and is pending awaiting explicit
@@ -558,14 +692,18 @@ int job_complete_sync(Job *job, Error **errp);
* FIXME: Make the below statement universally true:
* For jobs that support the manual workflow mode, all graph changes that occur
* as a result will occur after this command and before a successful reply.
+ *
+ * Called with job lock held.
*/
-void job_finalize(Job *job, Error **errp);
+void job_finalize_locked(Job *job, Error **errp);
/**
* Remove the concluded @job from the query list and resets the passed pointer
* to %NULL. Returns an error if the job is not actually concluded.
+ *
+ * Called with job lock held.
*/
-void job_dismiss(Job **job, Error **errp);
+void job_dismiss_locked(Job **job, Error **errp);
/**
* Synchronously finishes the given @job. If @finish is given, it is called to
@@ -574,8 +712,20 @@ void job_dismiss(Job **job, Error **errp);
* Returns 0 if the job is successfully completed, -ECANCELED if the job was
* cancelled before completing, and -errno in other error cases.
*
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock held, but might release it temporarily.
+ */
+int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp),
+ Error **errp);
+
+/**
+ * Sets the @job->aio_context.
+ * Called with job_mutex *not* held.
+ *
+ * This function must run in the main thread to protect against
+ * concurrent read in job_finish_sync_locked(), takes the job_mutex
+ * lock to protect against the read in job_do_yield_locked(), and must
+ * be called when the job is quiescent.
*/
-int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp);
+void job_set_aio_context(Job *job, AioContext *ctx);
#endif
diff --git a/include/qemu/keyval.h b/include/qemu/keyval.h
new file mode 100644
index 0000000000..1187b68303
--- /dev/null
+++ b/include/qemu/keyval.h
@@ -0,0 +1,15 @@
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef KEYVAL_H
+#define KEYVAL_H
+
+QDict *keyval_parse_into(QDict *qdict, const char *params, const char *implied_key,
+ bool *p_help, Error **errp);
+QDict *keyval_parse(const char *params, const char *implied_key,
+ bool *help, Error **errp);
+void keyval_merge(QDict *old, const QDict *new, Error **errp);
+
+#endif /* KEYVAL_H */
diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
index 84ea794bcf..9823220446 100644
--- a/include/qemu/lockable.h
+++ b/include/qemu/lockable.h
@@ -13,7 +13,7 @@
#ifndef QEMU_LOCKABLE_H
#define QEMU_LOCKABLE_H
-#include "qemu/coroutine.h"
+#include "qemu/coroutine-core.h"
#include "qemu/thread.h"
typedef void QemuLockUnlockFunc(void *);
@@ -24,64 +24,71 @@ struct QemuLockable {
QemuLockUnlockFunc *unlock;
};
-/* This function gives an error if an invalid, non-NULL pointer type is passed
- * to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
- * from the compiler, and give the errors already at link time.
- */
-#if defined(__OPTIMIZE__) && !defined(__SANITIZE_ADDRESS__)
-void unknown_lock_type(void *);
-#else
-static inline void unknown_lock_type(void *unused)
-{
- abort();
-}
-#endif
-
static inline __attribute__((__always_inline__)) QemuLockable *
qemu_make_lockable(void *x, QemuLockable *lockable)
{
- /* We cannot test this in a macro, otherwise we get compiler
+ /*
+ * We cannot test this in a macro, otherwise we get compiler
* warnings like "the address of 'm' will always evaluate as 'true'".
*/
return x ? lockable : NULL;
}
-/* Auxiliary macros to simplify QEMU_MAKE_LOCABLE. */
-#define QEMU_LOCK_FUNC(x) ((QemuLockUnlockFunc *) \
- QEMU_GENERIC(x, \
- (QemuMutex *, qemu_mutex_lock), \
- (CoMutex *, qemu_co_mutex_lock), \
- (QemuSpin *, qemu_spin_lock), \
- unknown_lock_type))
-
-#define QEMU_UNLOCK_FUNC(x) ((QemuLockUnlockFunc *) \
- QEMU_GENERIC(x, \
- (QemuMutex *, qemu_mutex_unlock), \
- (CoMutex *, qemu_co_mutex_unlock), \
- (QemuSpin *, qemu_spin_unlock), \
- unknown_lock_type))
-
-/* In C, compound literals have the lifetime of an automatic variable.
+static inline __attribute__((__always_inline__)) QemuLockable *
+qemu_null_lockable(void *x)
+{
+ if (x != NULL) {
+ qemu_build_not_reached();
+ }
+ return NULL;
+}
+
+/*
+ * In C, compound literals have the lifetime of an automatic variable.
* In C++ it would be different, but then C++ wouldn't need QemuLockable
* either...
*/
-#define QEMU_MAKE_LOCKABLE_(x) qemu_make_lockable((x), &(QemuLockable) { \
- .object = (x), \
- .lock = QEMU_LOCK_FUNC(x), \
- .unlock = QEMU_UNLOCK_FUNC(x), \
+#define QML_OBJ_(x, name) (&(QemuLockable) { \
+ .object = (x), \
+ .lock = (QemuLockUnlockFunc *) qemu_ ## name ## _lock, \
+ .unlock = (QemuLockUnlockFunc *) qemu_ ## name ## _unlock \
})
-/* QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable
+/**
+ * QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable
*
- * @x: a lock object (currently one of QemuMutex, CoMutex, QemuSpin).
+ * @x: a lock object (currently one of QemuMutex, QemuRecMutex,
+ * CoMutex, QemuSpin).
+ *
+ * Returns a QemuLockable object that can be passed around
+ * to a function that can operate with locks of any kind, or
+ * NULL if @x is %NULL.
+ *
+ * Note the special case for void *, so that we may pass "NULL".
+ */
+#define QEMU_MAKE_LOCKABLE(x) \
+ _Generic((x), QemuLockable *: (x), \
+ void *: qemu_null_lockable(x), \
+ QemuMutex *: qemu_make_lockable(x, QML_OBJ_(x, mutex)), \
+ QemuRecMutex *: qemu_make_lockable(x, QML_OBJ_(x, rec_mutex)), \
+ CoMutex *: qemu_make_lockable(x, QML_OBJ_(x, co_mutex)), \
+ QemuSpin *: qemu_make_lockable(x, QML_OBJ_(x, spin)))
+
+/**
+ * QEMU_MAKE_LOCKABLE_NONNULL - Make a polymorphic QemuLockable
+ *
+ * @x: a lock object (currently one of QemuMutex, QemuRecMutex,
+ * CoMutex, QemuSpin).
*
* Returns a QemuLockable object that can be passed around
* to a function that can operate with locks of any kind.
*/
-#define QEMU_MAKE_LOCKABLE(x) \
- QEMU_GENERIC(x, \
- (QemuLockable *, (x)), \
- QEMU_MAKE_LOCKABLE_(x))
+#define QEMU_MAKE_LOCKABLE_NONNULL(x) \
+ _Generic((x), QemuLockable *: (x), \
+ QemuMutex *: QML_OBJ_(x, mutex), \
+ QemuRecMutex *: QML_OBJ_(x, rec_mutex), \
+ CoMutex *: QML_OBJ_(x, co_mutex), \
+ QemuSpin *: QML_OBJ_(x, spin))
static inline void qemu_lockable_lock(QemuLockable *x)
{
@@ -93,4 +100,70 @@ static inline void qemu_lockable_unlock(QemuLockable *x)
x->unlock(x->object);
}
+static inline QemuLockable *qemu_lockable_auto_lock(QemuLockable *x)
+{
+ qemu_lockable_lock(x);
+ return x;
+}
+
+static inline void qemu_lockable_auto_unlock(QemuLockable *x)
+{
+ if (x) {
+ qemu_lockable_unlock(x);
+ }
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
+
+#define WITH_QEMU_LOCK_GUARD_(x, var) \
+ for (g_autoptr(QemuLockable) var = \
+ qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE_NONNULL((x))); \
+ var; \
+ qemu_lockable_auto_unlock(var), var = NULL)
+
+/**
+ * WITH_QEMU_LOCK_GUARD - Lock a lock object for scope
+ *
+ * @x: a lock object (currently one of QemuMutex, CoMutex, QemuSpin).
+ *
+ * This macro defines a lock scope such that entering the scope takes the lock
+ * and leaving the scope releases the lock. Return statements are allowed
+ * within the scope and release the lock. Break and continue statements leave
+ * the scope early and release the lock.
+ *
+ * WITH_QEMU_LOCK_GUARD(&mutex) {
+ * ...
+ * if (error) {
+ * return; <-- mutex is automatically unlocked
+ * }
+ *
+ * if (early_exit) {
+ * break; <-- leave this scope early
+ * }
+ * ...
+ * }
+ */
+#define WITH_QEMU_LOCK_GUARD(x) \
+ WITH_QEMU_LOCK_GUARD_((x), glue(qemu_lockable_auto, __COUNTER__))
+
+/**
+ * QEMU_LOCK_GUARD - Lock an object until the end of the scope
+ *
+ * @x: a lock object (currently one of QemuMutex, CoMutex, QemuSpin).
+ *
+ * This macro takes a lock until the end of the scope. Return statements
+ * release the lock.
+ *
+ * ... <-- mutex not locked
+ * QEMU_LOCK_GUARD(&mutex); <-- mutex locked from here onwards
+ * ...
+ * if (error) {
+ * return; <-- mutex is automatically unlocked
+ * }
+ */
+#define QEMU_LOCK_GUARD(x) \
+ g_autoptr(QemuLockable) \
+ glue(qemu_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
+ qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x)))
+
#endif
diff --git a/include/qemu/log-for-trace.h b/include/qemu/log-for-trace.h
index 2f0a5b080e..d47c9cd446 100644
--- a/include/qemu/log-for-trace.h
+++ b/include/qemu/log-for-trace.h
@@ -30,6 +30,6 @@ static inline bool qemu_loglevel_mask(int mask)
}
/* main logging function */
-int GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...);
+void G_GNUC_PRINTF(1, 2) qemu_log(const char *fmt, ...);
#endif
diff --git a/include/qemu/log.h b/include/qemu/log.h
index b097a6cae1..df59bfabcd 100644
--- a/include/qemu/log.h
+++ b/include/qemu/log.h
@@ -4,29 +4,15 @@
/* A small part of this API is split into its own header */
#include "qemu/log-for-trace.h"
-/* Private global variable, don't use */
-extern FILE *qemu_logfile;
-
/*
* The new API:
- *
*/
-/* Log settings checking macros: */
+/* Returns true if qemu_log() will really write somewhere. */
+bool qemu_log_enabled(void);
-/* Returns true if qemu_log() will really write somewhere
- */
-static inline bool qemu_log_enabled(void)
-{
- return qemu_logfile != NULL;
-}
-
-/* Returns true if qemu_log() will write somewhere else than stderr
- */
-static inline bool qemu_log_separate(void)
-{
- return qemu_logfile != NULL && qemu_logfile != stderr;
-}
+/* Returns true if qemu_log() will write somewhere other than stderr. */
+bool qemu_log_separate(void);
#define CPU_LOG_TB_OUT_ASM (1 << 0)
#define CPU_LOG_TB_IN_ASM (1 << 1)
@@ -45,35 +31,19 @@ static inline bool qemu_log_separate(void)
/* LOG_TRACE (1 << 15) is defined in log-for-trace.h */
#define CPU_LOG_TB_OP_IND (1 << 16)
#define CPU_LOG_TB_FPU (1 << 17)
+#define CPU_LOG_PLUGIN (1 << 18)
+/* LOG_STRACE is used for user-mode strace logging. */
+#define LOG_STRACE (1 << 19)
+#define LOG_PER_THREAD (1 << 20)
+#define CPU_LOG_TB_VPU (1 << 21)
-/* Lock output for a series of related logs. Since this is not needed
- * for a single qemu_log / qemu_log_mask / qemu_log_mask_and_addr, we
- * assume that qemu_loglevel_mask has already been tested, and that
- * qemu_loglevel is never set when qemu_logfile is unset.
- */
+/* Lock/unlock output. */
-static inline void qemu_log_lock(void)
-{
- qemu_flockfile(qemu_logfile);
-}
-
-static inline void qemu_log_unlock(void)
-{
- qemu_funlockfile(qemu_logfile);
-}
+FILE *qemu_log_trylock(void) G_GNUC_WARN_UNUSED_RESULT;
+void qemu_log_unlock(FILE *fd);
/* Logging functions: */
-/* vfprintf-like logging function
- */
-static inline void GCC_FMT_ATTR(1, 0)
-qemu_log_vprintf(const char *fmt, va_list va)
-{
- if (qemu_logfile) {
- vfprintf(qemu_logfile, fmt, va);
- }
-}
-
/* log only if a bit is set on the current loglevel mask:
* @mask: bit to check in the mask
* @fmt: printf-style format string
@@ -112,9 +82,9 @@ typedef struct QEMULogItem {
extern const QEMULogItem qemu_log_items[];
-void qemu_set_log(int log_flags);
-void qemu_log_needs_buffers(void);
-void qemu_set_log_filename(const char *filename, Error **errp);
+bool qemu_set_log(int log_flags, Error **errp);
+bool qemu_set_log_filename(const char *filename, Error **errp);
+bool qemu_set_log_filename_flags(const char *name, int flags, Error **errp);
void qemu_set_dfilter_ranges(const char *ranges, Error **errp);
bool qemu_log_in_addr_range(uint64_t addr);
int qemu_str_to_log_mask(const char *str);
@@ -124,9 +94,4 @@ int qemu_str_to_log_mask(const char *str);
*/
void qemu_print_log_usage(FILE *f);
-/* fflush() the log file */
-void qemu_log_flush(void);
-/* Close the log file */
-void qemu_log_close(void);
-
#endif
diff --git a/include/qemu/madvise.h b/include/qemu/madvise.h
new file mode 100644
index 0000000000..e155f59a0d
--- /dev/null
+++ b/include/qemu/madvise.h
@@ -0,0 +1,95 @@
+/*
+ * QEMU madvise wrapper functions
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_MADVISE_H
+#define QEMU_MADVISE_H
+
+#define QEMU_MADV_INVALID -1
+
+#if defined(CONFIG_MADVISE)
+
+#define QEMU_MADV_WILLNEED MADV_WILLNEED
+#define QEMU_MADV_DONTNEED MADV_DONTNEED
+#ifdef MADV_DONTFORK
+#define QEMU_MADV_DONTFORK MADV_DONTFORK
+#else
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#endif
+#ifdef MADV_MERGEABLE
+#define QEMU_MADV_MERGEABLE MADV_MERGEABLE
+#else
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_UNMERGEABLE
+#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE
+#else
+#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_DODUMP
+#define QEMU_MADV_DODUMP MADV_DODUMP
+#else
+#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
+#endif
+#ifdef MADV_DONTDUMP
+#define QEMU_MADV_DONTDUMP MADV_DONTDUMP
+#else
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#endif
+#ifdef MADV_HUGEPAGE
+#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE
+#else
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_NOHUGEPAGE
+#define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE
+#else
+#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_REMOVE
+#define QEMU_MADV_REMOVE MADV_REMOVE
+#else
+#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
+#endif
+#ifdef MADV_POPULATE_WRITE
+#define QEMU_MADV_POPULATE_WRITE MADV_POPULATE_WRITE
+#else
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
+#endif
+
+#elif defined(CONFIG_POSIX_MADVISE)
+
+#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED
+#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
+#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
+
+#else /* no-op */
+
+#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID
+#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
+#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
+
+#endif
+
+int qemu_madvise(void *addr, size_t len, int advice);
+
+#endif
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index e59f9ae1e9..5764db157c 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -26,9 +26,19 @@
#define QEMU_MAIN_LOOP_H
#include "block/aio.h"
+#include "qom/object.h"
+#include "sysemu/event-loop-base.h"
#define SIG_IPI SIGUSR1
+#define TYPE_MAIN_LOOP "main-loop"
+OBJECT_DECLARE_TYPE(MainLoop, MainLoopClass, MAIN_LOOP)
+
+struct MainLoop {
+ EventLoopBase parent_obj;
+};
+typedef struct MainLoop MainLoop;
+
/**
* qemu_init_main_loop: Set up the process so that it can run the main loop.
*
@@ -52,7 +62,7 @@ int qemu_init_main_loop(Error **errp);
* repeatedly calls main_loop_wait(false).
*
* Main loop services include file descriptor callbacks, bottom halves
- * and timers (defined in qemu-timer.h). Bottom halves are similar to timers
+ * and timers (defined in qemu/timer.h). Bottom halves are similar to timers
* that execute immediately, but have a lower overhead and scheduling them
* is wait-free, thread-safe and signal-safe.
*
@@ -147,6 +157,8 @@ typedef void WaitObjectFunc(void *opaque);
* in the main loop's calls to WaitForMultipleObjects. When the handle
* is in a signaled state, QEMU will call @func.
*
+ * If the same HANDLE is added twice, this function returns -1.
+ *
* @handle: The Windows handle to be observed.
* @func: A function to be called when @handle is in a signaled state.
* @opaque: A pointer-size value that is passed to @func.
@@ -234,72 +246,166 @@ void event_notifier_set_handler(EventNotifier *e,
GSource *iohandler_get_g_source(void);
AioContext *iohandler_get_aio_context(void);
-#ifdef CONFIG_POSIX
+
/**
- * qemu_add_child_watch: Register a child process for reaping.
- *
- * Under POSIX systems, a parent process must read the exit status of
- * its child processes using waitpid, or the operating system will not
- * free some of the resources attached to that process.
+ * bql_locked: Return lock status of the Big QEMU Lock (BQL)
*
- * This function directs the QEMU main loop to observe a child process
- * and call waitpid as soon as it exits; the watch is then removed
- * automatically. It is useful whenever QEMU forks a child process
- * but will find out about its termination by other means such as a
- * "broken pipe".
+ * The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it
+ * must always be taken outside other locks. This function helps
+ * functions take different paths depending on whether the current
+ * thread is running within the BQL.
*
- * @pid: The pid that QEMU should observe.
+ * This function should never be used in the block layer, because
+ * unit tests, block layer tools and qemu-storage-daemon do not
+ * have a BQL.
+ * Please instead refer to qemu_in_main_thread().
*/
-int qemu_add_child_watch(pid_t pid);
-#endif
+bool bql_locked(void);
/**
- * qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
+ * qemu_in_main_thread: return whether it's possible to safely access
+ * the global state of the block layer.
*
- * The main loop mutex is the coarsest lock in QEMU, and as such it
- * must always be taken outside other locks. This function helps
- * functions take different paths depending on whether the current
- * thread is running within the main loop mutex.
+ * Global state of the block layer is not accessible from I/O threads
+ * or worker threads; only from threads that "own" the default
+ * AioContext that qemu_get_aio_context() returns. For tests, block
+ * layer tools and qemu-storage-daemon there is a designated thread that
+ * runs the event loop for qemu_get_aio_context(), and that is the
+ * main thread.
+ *
+ * For emulators, however, any thread that holds the BQL can act
+ * as the block layer main thread; this will be any of the actual
+ * main thread, the vCPU threads or the RCU thread.
+ *
+ * For clarity, do not use this function outside the block layer.
+ */
+bool qemu_in_main_thread(void);
+
+/*
+ * Mark and check that the function is part of the Global State API.
+ * Please refer to include/block/block-global-state.h for more
+ * information about GS API.
+ */
+#define GLOBAL_STATE_CODE() \
+ do { \
+ assert(qemu_in_main_thread()); \
+ } while (0)
+
+/*
+ * Mark and check that the function is part of the I/O API.
+ * Please refer to include/block/block-io.h for more
+ * information about IO API.
+ */
+#define IO_CODE() \
+ do { \
+ /* nop */ \
+ } while (0)
+
+/*
+ * Mark and check that the function is part of the "I/O OR GS" API.
+ * Please refer to include/block/block-io.h for more
+ * information about "IO or GS" API.
*/
-bool qemu_mutex_iothread_locked(void);
+#define IO_OR_GS_CODE() \
+ do { \
+ /* nop */ \
+ } while (0)
/**
- * qemu_mutex_lock_iothread: Lock the main loop mutex.
+ * bql_lock: Lock the Big QEMU Lock (BQL).
*
- * This function locks the main loop mutex. The mutex is taken by
+ * This function locks the Big QEMU Lock (BQL). The lock is taken by
* main() in vl.c and always taken except while waiting on
- * external events (such as with select). The mutex should be taken
+ * external events (such as with select). The lock should be taken
* by threads other than the main loop thread when calling
* qemu_bh_new(), qemu_set_fd_handler() and basically all other
* functions documented in this file.
*
- * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread
+ * NOTE: tools currently are single-threaded and bql_lock
* is a no-op there.
*/
-#define qemu_mutex_lock_iothread() \
- qemu_mutex_lock_iothread_impl(__FILE__, __LINE__)
-void qemu_mutex_lock_iothread_impl(const char *file, int line);
+#define bql_lock() bql_lock_impl(__FILE__, __LINE__)
+void bql_lock_impl(const char *file, int line);
/**
- * qemu_mutex_unlock_iothread: Unlock the main loop mutex.
+ * bql_unlock: Unlock the Big QEMU Lock (BQL).
*
- * This function unlocks the main loop mutex. The mutex is taken by
+ * This function unlocks the Big QEMU Lock. The lock is taken by
* main() in vl.c and always taken except while waiting on
- * external events (such as with select). The mutex should be unlocked
+ * external events (such as with select). The lock should be unlocked
* as soon as possible by threads other than the main loop thread,
* because it prevents the main loop from processing callbacks,
* including timers and bottom halves.
*
- * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread
+ * NOTE: tools currently are single-threaded and bql_unlock
* is a no-op there.
*/
-void qemu_mutex_unlock_iothread(void);
+void bql_unlock(void);
-/* internal interfaces */
+/**
+ * BQL_LOCK_GUARD
+ *
+ * Wrap a block of code in a conditional bql_{lock,unlock}.
+ */
+typedef struct BQLLockAuto BQLLockAuto;
+
+static inline BQLLockAuto *bql_auto_lock(const char *file, int line)
+{
+ if (bql_locked()) {
+ return NULL;
+ }
+ bql_lock_impl(file, line);
+ /* Anything non-NULL causes the cleanup function to be called */
+ return (BQLLockAuto *)(uintptr_t)1;
+}
+
+static inline void bql_auto_unlock(BQLLockAuto *l)
+{
+ bql_unlock();
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock)
+
+#define BQL_LOCK_GUARD() \
+ g_autoptr(BQLLockAuto) _bql_lock_auto __attribute__((unused)) \
+ = bql_auto_lock(__FILE__, __LINE__)
+
+/*
+ * qemu_cond_wait_bql: Wait on condition for the Big QEMU Lock (BQL)
+ *
+ * This function atomically releases the Big QEMU Lock (BQL) and causes
+ * the calling thread to block on the condition.
+ */
+void qemu_cond_wait_bql(QemuCond *cond);
+
+/*
+ * qemu_cond_timedwait_bql: like the previous, but with timeout
+ */
+void qemu_cond_timedwait_bql(QemuCond *cond, int ms);
-void qemu_fd_register(int fd);
+/* internal interfaces */
-QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
+#define qemu_bh_new_guarded(cb, opaque, guard) \
+ qemu_bh_new_full((cb), (opaque), (stringify(cb)), guard)
+#define qemu_bh_new(cb, opaque) \
+ qemu_bh_new_full((cb), (opaque), (stringify(cb)), NULL)
+QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
+ MemReentrancyGuard *reentrancy_guard);
void qemu_bh_schedule_idle(QEMUBH *bh);
+enum {
+ MAIN_LOOP_POLL_FILL,
+ MAIN_LOOP_POLL_ERR,
+ MAIN_LOOP_POLL_OK,
+};
+
+typedef struct MainLoopPoll {
+ int state;
+ uint32_t timeout;
+ GArray *pollfds;
+} MainLoopPoll;
+
+void main_loop_poll_add_notifier(Notifier *notify);
+void main_loop_poll_remove_notifier(Notifier *notify);
+
#endif
diff --git a/include/qemu/memalign.h b/include/qemu/memalign.h
new file mode 100644
index 0000000000..fa299f3bf6
--- /dev/null
+++ b/include/qemu/memalign.h
@@ -0,0 +1,61 @@
+/*
+ * Allocation and free functions for aligned memory
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_MEMALIGN_H
+#define QEMU_MEMALIGN_H
+
+/**
+ * qemu_try_memalign: Allocate aligned memory
+ * @alignment: required alignment, in bytes
+ * @size: size of allocation, in bytes
+ *
+ * Allocate memory on an aligned boundary (i.e. the returned
+ * address will be an exact multiple of @alignment).
+ * @alignment must be a power of 2, or the function will assert().
+ * On success, returns allocated memory; on failure, returns NULL.
+ *
+ * The memory allocated through this function must be freed via
+ * qemu_vfree() (and not via free()).
+ */
+void *qemu_try_memalign(size_t alignment, size_t size);
+/**
+ * qemu_memalign: Allocate aligned memory, without failing
+ * @alignment: required alignment, in bytes
+ * @size: size of allocation, in bytes
+ *
+ * Allocate memory in the same way as qemu_try_memalign(), but
+ * abort() with an error message if the memory allocation fails.
+ *
+ * The memory allocated through this function must be freed via
+ * qemu_vfree() (and not via free()).
+ */
+void *qemu_memalign(size_t alignment, size_t size);
+/**
+ * qemu_vfree: Free memory allocated through qemu_memalign
+ * @ptr: memory to free
+ *
+ * This function must be used to free memory allocated via qemu_memalign()
+ * or qemu_try_memalign(). (Using the wrong free function will cause
+ * subtle bugs on Windows hosts.)
+ */
+void qemu_vfree(void *ptr);
+/*
+ * It's an analog of GLIB's g_autoptr_cleanup_generic_gfree(), used to define
+ * g_autofree macro.
+ */
+static inline void qemu_cleanup_generic_vfree(void *p)
+{
+ void **pp = (void **)p;
+ qemu_vfree(*pp);
+}
+
+/*
+ * Analog of g_autofree, but qemu_vfree is called on cleanup instead of g_free.
+ */
+#define QEMU_AUTO_VFREE __attribute__((cleanup(qemu_cleanup_generic_vfree)))
+
+#endif
diff --git a/include/qemu/memfd.h b/include/qemu/memfd.h
index d551c28b68..975b6bdb77 100644
--- a/include/qemu/memfd.h
+++ b/include/qemu/memfd.h
@@ -32,6 +32,10 @@
#define MFD_HUGE_SHIFT 26
#endif
+#if defined CONFIG_LINUX && !defined CONFIG_MEMFD
+int memfd_create(const char *name, unsigned int flags);
+#endif
+
int qemu_memfd_create(const char *name, size_t size, bool hugetlb,
uint64_t hugetlbsize, unsigned int seals, Error **errp);
bool qemu_memfd_alloc_check(void);
diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h
index 50385e3f81..8344daaa03 100644
--- a/include/qemu/mmap-alloc.h
+++ b/include/qemu/mmap-alloc.h
@@ -1,14 +1,66 @@
#ifndef QEMU_MMAP_ALLOC_H
#define QEMU_MMAP_ALLOC_H
-#include "qemu-common.h"
+typedef enum {
+ QEMU_FS_TYPE_UNKNOWN = 0,
+ QEMU_FS_TYPE_TMPFS,
+ QEMU_FS_TYPE_HUGETLBFS,
+ QEMU_FS_TYPE_NUM,
+} QemuFsType;
size_t qemu_fd_getpagesize(int fd);
+QemuFsType qemu_fd_getfs(int fd);
-size_t qemu_mempath_getpagesize(const char *mem_path);
+/**
+ * qemu_ram_mmap: mmap anonymous memory, the specified file or device.
+ *
+ * mmap() abstraction to map guest RAM, simplifying flag handling, taking
+ * care of alignment requirements and installing guard pages.
+ *
+ * Parameters:
+ * @fd: the file or the device to mmap
+ * @size: the number of bytes to be mmaped
+ * @align: if not zero, specify the alignment of the starting mapping address;
+ * otherwise, the alignment in use will be determined by QEMU.
+ * @qemu_map_flags: QEMU_MAP_* flags
+ * @map_offset: map starts at offset of map_offset from the start of fd
+ *
+ * Internally, MAP_PRIVATE, MAP_ANONYMOUS and MAP_SHARED_VALIDATE are set
+ * implicitly based on other parameters.
+ *
+ * Return:
+ * On success, return a pointer to the mapped area.
+ * On failure, return MAP_FAILED.
+ */
+void *qemu_ram_mmap(int fd,
+ size_t size,
+ size_t align,
+ uint32_t qemu_map_flags,
+ off_t map_offset);
-void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared);
+void qemu_ram_munmap(int fd, void *ptr, size_t size);
-void qemu_ram_munmap(void *ptr, size_t size);
+/*
+ * Abstraction of PROT_ and MAP_ flags as passed to mmap(), for example,
+ * consumed by qemu_ram_mmap().
+ */
+
+/* Map PROT_READ instead of PROT_READ | PROT_WRITE. */
+#define QEMU_MAP_READONLY (1 << 0)
+
+/* Use MAP_SHARED instead of MAP_PRIVATE. */
+#define QEMU_MAP_SHARED (1 << 1)
+
+/*
+ * Use MAP_SYNC | MAP_SHARED_VALIDATE if supported. Ignored without
+ * QEMU_MAP_SHARED. If mapping fails, warn and fallback to !QEMU_MAP_SYNC.
+ */
+#define QEMU_MAP_SYNC (1 << 2)
+
+/*
+ * Use MAP_NORESERVE to skip reservation of swap space (or huge pages if
+ * applicable). Bail out if not supported/effective.
+ */
+#define QEMU_MAP_NORESERVE (1 << 3)
#endif
diff --git a/include/qemu/module.h b/include/qemu/module.h
index 55dd2beea8..c37ce74b16 100644
--- a/include/qemu/module.h
+++ b/include/qemu/module.h
@@ -40,11 +40,14 @@ static void __attribute__((constructor)) do_qemu_init_ ## function(void) \
#endif
typedef enum {
+ MODULE_INIT_MIGRATION,
MODULE_INIT_BLOCK,
MODULE_INIT_OPTS,
MODULE_INIT_QOM,
MODULE_INIT_TRACE,
MODULE_INIT_XEN_BACKEND,
+ MODULE_INIT_LIBQOS,
+ MODULE_INIT_FUZZ_TARGET,
MODULE_INIT_MAX
} module_init_type;
@@ -54,15 +57,136 @@ typedef enum {
#define trace_init(function) module_init(function, MODULE_INIT_TRACE)
#define xen_backend_init(function) module_init(function, \
MODULE_INIT_XEN_BACKEND)
-
-#define block_module_load_one(lib) module_load_one("block-", lib)
-#define ui_module_load_one(lib) module_load_one("ui-", lib)
-#define audio_module_load_one(lib) module_load_one("audio-", lib)
+#define libqos_init(function) module_init(function, MODULE_INIT_LIBQOS)
+#define fuzz_target_init(function) module_init(function, \
+ MODULE_INIT_FUZZ_TARGET)
+#define migration_init(function) module_init(function, MODULE_INIT_MIGRATION)
+#define block_module_load(lib, errp) module_load("block-", lib, errp)
+#define ui_module_load(lib, errp) module_load("ui-", lib, errp)
+#define audio_module_load(lib, errp) module_load("audio-", lib, errp)
void register_module_init(void (*fn)(void), module_init_type type);
void register_dso_module_init(void (*fn)(void), module_init_type type);
void module_call_init(module_init_type type);
-void module_load_one(const char *prefix, const char *lib_name);
+
+/*
+ * module_load: attempt to load a module from a set of directories
+ *
+ * directories searched are:
+ * - getenv("QEMU_MODULE_DIR")
+ * - get_relocated_path(CONFIG_QEMU_MODDIR);
+ * - /var/run/qemu/${version_dir}
+ *
+ * prefix: a subsystem prefix, or the empty string ("audio-", ..., "")
+ * name: name of the module
+ * errp: error to set in case the module is found, but load failed.
+ *
+ * Return value: -1 on error (errp set if not NULL).
+ * 0 if module or one of its dependencies are not installed,
+ * 1 if the module is found and loaded,
+ * 2 if the module is already loaded, or module is built-in.
+ */
+int module_load(const char *prefix, const char *name, Error **errp);
+
+/*
+ * module_load_qom: attempt to load a module to provide a QOM type
+ *
+ * type: the type to be provided
+ * errp: error to set.
+ *
+ * Return value: as per module_load.
+ */
+int module_load_qom(const char *type, Error **errp);
+void module_load_qom_all(void);
+void module_allow_arch(const char *arch);
+
+/**
+ * DOC: module info annotation macros
+ *
+ * ``scripts/modinfo-collect.py`` will collect module info,
+ * using the preprocessor and -DQEMU_MODINFO.
+ *
+ * ``scripts/modinfo-generate.py`` will create a module meta-data database
+ * from the collected information so qemu knows about module
+ * dependencies and QOM objects implemented by modules.
+ *
+ * See ``*.modinfo`` and ``modinfo.c`` in the build directory to check the
+ * script results.
+ */
+#ifdef QEMU_MODINFO
+# define modinfo(kind, value) \
+ MODINFO_START kind value MODINFO_END
+#else
+# define modinfo(kind, value)
+#endif
+
+/**
+ * module_obj
+ *
+ * @name: QOM type.
+ *
+ * This module implements QOM type @name.
+ */
+#define module_obj(name) modinfo(obj, name)
+
+/**
+ * module_dep
+ *
+ * @name: module name
+ *
+ * This module depends on module @name.
+ */
+#define module_dep(name) modinfo(dep, name)
+
+/**
+ * module_arch
+ *
+ * @name: target architecture
+ *
+ * This module is for target architecture @arch.
+ *
+ * Note that target-dependent modules are tagged automatically, so
+ * this is only needed in case target-independent modules should be
+ * restricted. Use case example: the ccw bus is implemented by s390x
+ * only.
+ */
+#define module_arch(name) modinfo(arch, name)
+
+/**
+ * module_opts
+ *
+ * @name: QemuOpts name
+ *
+ * This module registers QemuOpts @name.
+ */
+#define module_opts(name) modinfo(opts, name)
+
+/**
+ * module_kconfig
+ *
+ * @name: Kconfig requirement necessary to load the module
+ *
+ * This module requires a core module that should be implemented and
+ * enabled in Kconfig.
+ */
+#define module_kconfig(name) modinfo(kconfig, name)
+
+/*
+ * module info database
+ *
+ * scripts/modinfo-generate.c will build this using the data collected
+ * by scripts/modinfo-collect.py
+ */
+typedef struct QemuModinfo QemuModinfo;
+struct QemuModinfo {
+ const char *name;
+ const char *arch;
+ const char **objs;
+ const char **deps;
+ const char **opts;
+};
+extern const QemuModinfo qemu_modinfo[];
+void module_init_info(const QemuModinfo *info);
#endif
diff --git a/include/qemu/mprotect.h b/include/qemu/mprotect.h
new file mode 100644
index 0000000000..1e83d1433e
--- /dev/null
+++ b/include/qemu/mprotect.h
@@ -0,0 +1,14 @@
+/*
+ * QEMU mprotect functions
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_MPROTECT_H
+#define QEMU_MPROTECT_H
+
+int qemu_mprotect_rw(void *addr, size_t size);
+int qemu_mprotect_rwx(void *addr, size_t size);
+int qemu_mprotect_none(void *addr, size_t size);
+
+#endif
diff --git a/include/qemu/notify.h b/include/qemu/notify.h
index a3d73e4bc7..abf18dbf59 100644
--- a/include/qemu/notify.h
+++ b/include/qemu/notify.h
@@ -40,15 +40,21 @@ void notifier_remove(Notifier *notifier);
void notifier_list_notify(NotifierList *list, void *data);
+bool notifier_list_empty(NotifierList *list);
+
/* Same as Notifier but allows .notify() to return errors */
typedef struct NotifierWithReturn NotifierWithReturn;
+/* Return int to allow for different failure modes and recovery actions */
+typedef int (*NotifierWithReturnFunc)(NotifierWithReturn *notifier, void *data,
+ Error **errp);
+
struct NotifierWithReturn {
/**
* Return 0 on success (next notifier will be invoked), otherwise
* notifier_with_return_list_notify() will stop and return the value.
*/
- int (*notify)(NotifierWithReturn *notifier, void *data);
+ NotifierWithReturnFunc notify;
QLIST_ENTRY(NotifierWithReturn) node;
};
@@ -67,6 +73,6 @@ void notifier_with_return_list_add(NotifierWithReturnList *list,
void notifier_with_return_remove(NotifierWithReturn *notifier);
int notifier_with_return_list_notify(NotifierWithReturnList *list,
- void *data);
+ void *data, Error **errp);
#endif
diff --git a/include/qemu/nvdimm-utils.h b/include/qemu/nvdimm-utils.h
new file mode 100644
index 0000000000..5f45774c2c
--- /dev/null
+++ b/include/qemu/nvdimm-utils.h
@@ -0,0 +1,6 @@
+#ifndef NVDIMM_UTILS_H
+#define NVDIMM_UTILS_H
+
+
+GSList *nvdimm_get_device_list(void);
+#endif
diff --git a/include/qemu/option.h b/include/qemu/option.h
index 844587cab3..b349828782 100644
--- a/include/qemu/option.h
+++ b/include/qemu/option.h
@@ -28,12 +28,24 @@
#include "qemu/queue.h"
+/**
+ * get_opt_value
+ * @p: a pointer to the option name, delimited by commas
+ * @value: a non-NULL pointer that will received the delimited options
+ *
+ * The @value char pointer will be allocated and filled with
+ * the delimited options.
+ *
+ * Returns the position of the comma delimiter/zero byte after the
+ * option name in @p.
+ * The memory pointer in @value must be released with a call to g_free()
+ * when no longer required.
+ */
const char *get_opt_value(const char *p, char **value);
-void parse_option_size(const char *name, const char *value,
+bool parse_option_size(const char *name, const char *value,
uint64_t *ret, Error **errp);
bool has_help_option(const char *param);
-bool is_valid_option_list(const char *param);
enum QemuOptType {
QEMU_OPT_STRING = 0, /* no parsing (use string as-is) */
@@ -81,11 +93,11 @@ uint64_t qemu_opt_get_number_del(QemuOpts *opts, const char *name,
uint64_t qemu_opt_get_size_del(QemuOpts *opts, const char *name,
uint64_t defval);
int qemu_opt_unset(QemuOpts *opts, const char *name);
-void qemu_opt_set(QemuOpts *opts, const char *name, const char *value,
+bool qemu_opt_set(QemuOpts *opts, const char *name, const char *value,
Error **errp);
-void qemu_opt_set_bool(QemuOpts *opts, const char *name, bool val,
+bool qemu_opt_set_bool(QemuOpts *opts, const char *name, bool val,
Error **errp);
-void qemu_opt_set_number(QemuOpts *opts, const char *name, int64_t val,
+bool qemu_opt_set_number(QemuOpts *opts, const char *name, int64_t val,
Error **errp);
typedef int (*qemu_opt_loopfunc)(void *opaque,
const char *name, const char *value,
@@ -107,26 +119,22 @@ QemuOpts *qemu_opts_create(QemuOptsList *list, const char *id,
int fail_if_exists, Error **errp);
void qemu_opts_reset(QemuOptsList *list);
void qemu_opts_loc_restore(QemuOpts *opts);
-void qemu_opts_set(QemuOptsList *list, const char *id,
- const char *name, const char *value, Error **errp);
const char *qemu_opts_id(QemuOpts *opts);
void qemu_opts_set_id(QemuOpts *opts, char *id);
void qemu_opts_del(QemuOpts *opts);
-void qemu_opts_validate(QemuOpts *opts, const QemuOptDesc *desc, Error **errp);
-void qemu_opts_do_parse(QemuOpts *opts, const char *params,
+bool qemu_opts_validate(QemuOpts *opts, const QemuOptDesc *desc, Error **errp);
+bool qemu_opts_do_parse(QemuOpts *opts, const char *params,
const char *firstname, Error **errp);
QemuOpts *qemu_opts_parse_noisily(QemuOptsList *list, const char *params,
bool permit_abbrev);
QemuOpts *qemu_opts_parse(QemuOptsList *list, const char *params,
bool permit_abbrev, Error **errp);
-void qemu_opts_set_defaults(QemuOptsList *list, const char *params,
- int permit_abbrev);
QemuOpts *qemu_opts_from_qdict(QemuOptsList *list, const QDict *qdict,
Error **errp);
QDict *qemu_opts_to_qdict_filtered(QemuOpts *opts, QDict *qdict,
QemuOptsList *list, bool del);
QDict *qemu_opts_to_qdict(QemuOpts *opts, QDict *qdict);
-void qemu_opts_absorb_qdict(QemuOpts *opts, QDict *qdict, Error **errp);
+bool qemu_opts_absorb_qdict(QemuOpts *opts, QDict *qdict, Error **errp);
typedef int (*qemu_opts_loopfunc)(void *opaque, QemuOpts *opts, Error **errp);
int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func,
@@ -136,7 +144,6 @@ void qemu_opts_print_help(QemuOptsList *list, bool print_caption);
void qemu_opts_free(QemuOptsList *list);
QemuOptsList *qemu_opts_append(QemuOptsList *dst, QemuOptsList *list);
-QDict *keyval_parse(const char *params, const char *implied_key,
- Error **errp);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuOpts, qemu_opts_del)
#endif
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 80df7253db..c7053cdc2b 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -27,26 +27,28 @@
#ifndef QEMU_OSDEP_H
#define QEMU_OSDEP_H
+#if !defined _FORTIFY_SOURCE && defined __OPTIMIZE__ && __OPTIMIZE__ && defined __linux__
+# define _FORTIFY_SOURCE 2
+#endif
+
#include "config-host.h"
#ifdef NEED_CPU_H
-#include "config-target.h"
+#include CONFIG_TARGET
#else
#include "exec/poison.h"
#endif
-#ifdef __COVERITY__
-/* Coverity does not like the new _Float* types that are used by
- * recent glibc, and croaks on every single file that includes
- * stdlib.h. These typedefs are enough to please it.
- *
- * Note that these fix parse errors so they cannot be placed in
- * scripts/coverity-model.c.
+
+/*
+ * HOST_WORDS_BIGENDIAN was replaced with HOST_BIG_ENDIAN. Prevent it from
+ * creeping back in.
*/
-typedef float _Float32;
-typedef double _Float32x;
-typedef double _Float64;
-typedef __float80 _Float64x;
-typedef __float128 _Float128;
-#endif
+#pragma GCC poison HOST_WORDS_BIGENDIAN
+
+/*
+ * TARGET_WORDS_BIGENDIAN was replaced with TARGET_BIG_ENDIAN. Prevent it from
+ * creeping back in.
+ */
+#pragma GCC poison TARGET_WORDS_BIGENDIAN
#include "qemu/compiler.h"
@@ -71,13 +73,13 @@ typedef __float128 _Float128;
#define daemon qemu_fake_daemon_function
#include <stdlib.h>
#undef daemon
-extern int daemon(int, int);
+QEMU_EXTERN_C int daemon(int, int);
#endif
#ifdef _WIN32
/* as defined in sdkddkver.h */
#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x0600 /* Vista */
+#define _WIN32_WINNT 0x0602 /* Windows 8 API (should be >= the one from glib) */
#endif
/* reduces the number of implicitly included headers */
#ifndef WIN32_LEAN_AND_MEAN
@@ -85,17 +87,30 @@ extern int daemon(int, int);
#endif
#endif
+/* enable C99/POSIX format strings (needs mingw32-runtime 3.15 or later) */
+#ifdef __MINGW32__
+#define __USE_MINGW_ANSI_STDIO 1
+#endif
+
+/*
+ * We need the FreeBSD "legacy" definitions. Rust needs the FreeBSD 11 system
+ * calls since it doesn't use libc at all, so we have to emulate that despite
+ * FreeBSD 11 being EOL'd.
+ */
+#ifdef __FreeBSD__
+#define _WANT_FREEBSD11_STAT
+#define _WANT_FREEBSD11_STATFS
+#define _WANT_FREEBSD11_DIRENT
+#define _WANT_KERNEL_ERRNO
+#define _WANT_SEMUN
+#endif
+
#include <stdarg.h>
#include <stddef.h>
#include <stdbool.h>
#include <stdint.h>
#include <sys/types.h>
#include <stdlib.h>
-
-/* enable C99/POSIX format strings (needs mingw32-runtime 3.15 or later) */
-#ifdef __MINGW32__
-#define __USE_MINGW_ANSI_STDIO 1
-#endif
#include <stdio.h>
#include <string.h>
@@ -109,6 +124,7 @@ extern int daemon(int, int);
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
+#include <getopt.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <assert.h>
@@ -117,8 +133,13 @@ extern int daemon(int, int);
#include <setjmp.h>
#include <signal.h>
-#ifdef __OpenBSD__
-#include <sys/signal.h>
+#ifdef CONFIG_IOVEC
+#include <sys/uio.h>
+#endif
+
+#if defined(__linux__) && defined(__sparc__)
+/* The SPARC definition of QEMU_VMALLOC_ALIGN needs SHMLBA */
+#include <sys/shm.h>
#endif
#ifndef _WIN32
@@ -128,6 +149,17 @@ extern int daemon(int, int);
#define WEXITSTATUS(x) (x)
#endif
+#ifdef __APPLE__
+#include <AvailabilityMacros.h>
+#endif
+
+/*
+ * This is somewhat like a system header; it must be outside any extern "C"
+ * block because it includes system headers itself, including glib.h,
+ * which will not compile if inside an extern "C" block.
+ */
+#include "glib-compat.h"
+
#ifdef _WIN32
#include "sysemu/os-win32.h"
#endif
@@ -136,9 +168,72 @@ extern int daemon(int, int);
#include "sysemu/os-posix.h"
#endif
-#include "glib-compat.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "qemu/typedefs.h"
+/**
+ * Mark a function that executes in coroutine context
+ *
+ * Functions that execute in coroutine context cannot be called directly from
+ * normal functions. In the future it would be nice to enable compiler or
+ * static checker support for catching such errors. This annotation might make
+ * it possible and in the meantime it serves as documentation.
+ *
+ * For example:
+ *
+ * static void coroutine_fn foo(void) {
+ * ....
+ * }
+ */
+#ifdef __clang__
+#define coroutine_fn QEMU_ANNOTATE("coroutine_fn")
+#else
+#define coroutine_fn
+#endif
+
+/**
+ * Mark a function that can suspend when executed in coroutine context,
+ * but can handle running in non-coroutine context too.
+ */
+#ifdef __clang__
+#define coroutine_mixed_fn QEMU_ANNOTATE("coroutine_mixed_fn")
+#else
+#define coroutine_mixed_fn
+#endif
+
+/**
+ * Mark a function that should not be called from a coroutine context.
+ * Usually there will be an analogous, coroutine_fn function that should
+ * be used instead.
+ *
+ * When the function is also marked as coroutine_mixed_fn, the function should
+ * only be called if the caller does not know whether it is in coroutine
+ * context.
+ *
+ * Functions that are only no_coroutine_fn, on the other hand, should not
+ * be called from within coroutines at all. This for example includes
+ * functions that block.
+ *
+ * In the future it would be nice to enable compiler or static checker
+ * support for catching such errors. This annotation is the first step
+ * towards this, and in the meantime it serves as documentation.
+ *
+ * For example:
+ *
+ * static void no_coroutine_fn foo(void) {
+ * ....
+ * }
+ */
+#ifdef __clang__
+#define no_coroutine_fn QEMU_ANNOTATE("no_coroutine_fn")
+#else
+#define no_coroutine_fn
+#endif
+
+
/*
* For mingw, as of v6.0.0, the function implementing the assert macro is
* not marked as noreturn, so the compiler cannot delete code following an
@@ -151,6 +246,31 @@ extern int daemon(int, int);
#define assert(x) g_assert(x)
#endif
+/**
+ * qemu_build_not_reached()
+ *
+ * The compiler, during optimization, is expected to prove that a call
+ * to this function cannot be reached and remove it. If the compiler
+ * supports QEMU_ERROR, this will be reported at compile time; otherwise
+ * this will be reported at link time due to the missing symbol.
+ */
+G_NORETURN
+void QEMU_ERROR("code path is reachable")
+ qemu_build_not_reached_always(void);
+#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__)
+#define qemu_build_not_reached() qemu_build_not_reached_always()
+#else
+#define qemu_build_not_reached() g_assert_not_reached()
+#endif
+
+/**
+ * qemu_build_assert()
+ *
+ * The compiler, during optimization, is expected to prove that the
+ * assertion is true.
+ */
+#define qemu_build_assert(test) while (!(test)) qemu_build_not_reached()
+
/*
* According to waitpid man page:
* WCOREDUMP
@@ -186,6 +306,9 @@ extern int daemon(int, int);
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
+#ifndef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
#ifndef ENOMEDIUM
#define ENOMEDIUM ENODEV
#endif
@@ -202,6 +325,14 @@ extern int daemon(int, int);
#define ESHUTDOWN 4099
#endif
+#define RETRY_ON_EINTR(expr) \
+ (__extension__ \
+ ({ typeof(expr) __result; \
+ do { \
+ __result = (expr); \
+ } while (__result == -1 && errno == EINTR); \
+ __result; }))
+
/* time_t may be either 32 or 64 bits depending on the host OS, and
* can be either signed or unsigned, so we can't just hardcode a
* specific maximum value. This is not a C preprocessor constant,
@@ -232,42 +363,95 @@ extern int daemon(int, int);
#define TIME_MAX TYPE_MAXIMUM(time_t)
#endif
-/* HOST_LONG_BITS is the size of a native pointer in bits. */
-#if UINTPTR_MAX == UINT32_MAX
-# define HOST_LONG_BITS 32
-#elif UINTPTR_MAX == UINT64_MAX
-# define HOST_LONG_BITS 64
-#else
-# error Unknown pointer size
-#endif
-
/* Mac OSX has a <stdint.h> bug that incorrectly defines SIZE_MAX with
* the wrong type. Our replacement isn't usable in preprocessor
* expressions, but it is sufficient for our needs. */
-#if defined(HAVE_BROKEN_SIZE_MAX) && HAVE_BROKEN_SIZE_MAX
+#ifdef HAVE_BROKEN_SIZE_MAX
#undef SIZE_MAX
#define SIZE_MAX ((size_t)-1)
#endif
-#ifndef MIN
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-#endif
-#ifndef MAX
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#endif
+/*
+ * Two variations of MIN/MAX macros. The first is for runtime use, and
+ * evaluates arguments only once (so it is safe even with side
+ * effects), but will not work in constant contexts (such as array
+ * size declarations) because of the '{}'. The second is for constant
+ * expression use, where evaluating arguments twice is safe because
+ * the result is going to be constant anyway, but will not work in a
+ * runtime context because of a void expression where a value is
+ * expected. Thus, both gcc and clang will fail to compile if you use
+ * the wrong macro (even if the error may seem a bit cryptic).
+ *
+ * Note that neither form is usable as an #if condition; if you truly
+ * need to write conditional code that depends on a minimum or maximum
+ * determined by the pre-processor instead of the compiler, you'll
+ * have to open-code it. Sadly, Coverity is severely confused by the
+ * constant variants, so we have to dumb things down there.
+ *
+ * Preprocessor sorcery ahead: use different identifiers for the local
+ * variables in each expansion, so we can nest macro calls without
+ * shadowing variables.
+ */
+#define MIN_INTERNAL(a, b, _a, _b) \
+ ({ \
+ typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
+ _a < _b ? _a : _b; \
+ })
+#undef MIN
+#define MIN(a, b) \
+ MIN_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
+
+#define MAX_INTERNAL(a, b, _a, _b) \
+ ({ \
+ typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
+ _a > _b ? _a : _b; \
+ })
+#undef MAX
+#define MAX(a, b) \
+ MAX_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
-/* Minimum function that returns zero only iff both values are zero.
- * Intended for use with unsigned values only. */
-#ifndef MIN_NON_ZERO
-#define MIN_NON_ZERO(a, b) ((a) == 0 ? (b) : \
- ((b) == 0 ? (a) : (MIN(a, b))))
+#ifdef __COVERITY__
+# define MIN_CONST(a, b) ((a) < (b) ? (a) : (b))
+# define MAX_CONST(a, b) ((a) > (b) ? (a) : (b))
+#else
+# define MIN_CONST(a, b) \
+ __builtin_choose_expr( \
+ __builtin_constant_p(a) && __builtin_constant_p(b), \
+ (a) < (b) ? (a) : (b), \
+ ((void)0))
+# define MAX_CONST(a, b) \
+ __builtin_choose_expr( \
+ __builtin_constant_p(a) && __builtin_constant_p(b), \
+ (a) > (b) ? (a) : (b), \
+ ((void)0))
#endif
-/* Round number down to multiple */
+/*
+ * Minimum function that returns zero only if both values are zero.
+ * Intended for use with unsigned values only.
+ *
+ * Preprocessor sorcery ahead: use different identifiers for the local
+ * variables in each expansion, so we can nest macro calls without
+ * shadowing variables.
+ */
+#define MIN_NON_ZERO_INTERNAL(a, b, _a, _b) \
+ ({ \
+ typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
+ _a == 0 ? _b : (_b == 0 || _b > _a) ? _a : _b; \
+ })
+#define MIN_NON_ZERO(a, b) \
+ MIN_NON_ZERO_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
+
+/*
+ * Round number down to multiple. Safe when m is not a power of 2 (see
+ * ROUND_DOWN for a faster version when a power of 2 is guaranteed).
+ */
#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
-/* Round number up to multiple. Safe when m is not a power of 2 (see
- * ROUND_UP for a faster version when a power of 2 is guaranteed) */
+/*
+ * Round number up to multiple. Safe when m is not a power of 2 (see
+ * ROUND_UP for a faster version when a power of 2 is guaranteed).
+ */
#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
/* Check if n is a multiple of m */
@@ -284,11 +468,22 @@ extern int daemon(int, int);
/* Check if pointer p is n-bytes aligned */
#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n))
-/* Round number up to multiple. Requires that d be a power of 2 (see
+/*
+ * Round number down to multiple. Requires that d be a power of 2 (see
+ * QEMU_ALIGN_UP for a safer but slower version on arbitrary
+ * numbers); works even if d is a smaller type than n.
+ */
+#ifndef ROUND_DOWN
+#define ROUND_DOWN(n, d) ((n) & -(0 ? (n) : (d)))
+#endif
+
+/*
+ * Round number up to multiple. Requires that d be a power of 2 (see
* QEMU_ALIGN_UP for a safer but slower version on arbitrary
- * numbers); works even if d is a smaller type than n. */
+ * numbers); works even if d is a smaller type than n.
+ */
#ifndef ROUND_UP
-#define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d)))
+#define ROUND_UP(n, d) ROUND_DOWN((n) + (d) - 1, (d))
#endif
#ifndef DIV_ROUND_UP
@@ -307,98 +502,35 @@ extern int daemon(int, int);
#endif
int qemu_daemon(int nochdir, int noclose);
-void *qemu_try_memalign(size_t alignment, size_t size);
-void *qemu_memalign(size_t alignment, size_t size);
-void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared);
-void qemu_vfree(void *ptr);
+void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared,
+ bool noreserve);
void qemu_anon_ram_free(void *ptr, size_t size);
-#define QEMU_MADV_INVALID -1
-
-#if defined(CONFIG_MADVISE)
-
-#define QEMU_MADV_WILLNEED MADV_WILLNEED
-#define QEMU_MADV_DONTNEED MADV_DONTNEED
-#ifdef MADV_DONTFORK
-#define QEMU_MADV_DONTFORK MADV_DONTFORK
-#else
-#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
-#endif
-#ifdef MADV_MERGEABLE
-#define QEMU_MADV_MERGEABLE MADV_MERGEABLE
-#else
-#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
-#endif
-#ifdef MADV_UNMERGEABLE
-#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE
-#else
-#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
-#endif
-#ifdef MADV_DODUMP
-#define QEMU_MADV_DODUMP MADV_DODUMP
-#else
-#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
-#endif
-#ifdef MADV_DONTDUMP
-#define QEMU_MADV_DONTDUMP MADV_DONTDUMP
-#else
-#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
-#endif
-#ifdef MADV_HUGEPAGE
-#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE
+#ifdef _WIN32
+#define HAVE_CHARDEV_SERIAL 1
+#define HAVE_CHARDEV_PARALLEL 1
#else
-#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+#if defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \
+ || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \
+ || defined(__GLIBC__) || defined(__APPLE__)
+#define HAVE_CHARDEV_SERIAL 1
#endif
-#ifdef MADV_NOHUGEPAGE
-#define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE
-#else
-#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
+#if defined(__linux__) || defined(__FreeBSD__) \
+ || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
+#define HAVE_CHARDEV_PARALLEL 1
#endif
-#ifdef MADV_REMOVE
-#define QEMU_MADV_REMOVE MADV_REMOVE
-#else
-#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
#endif
-#elif defined(CONFIG_POSIX_MADVISE)
-
-#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED
-#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED
-#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
-#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
-#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
-#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
-#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
-#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
-#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
-#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
-
-#else /* no-op */
-
-#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID
-#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID
-#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
-#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
-#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
-#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
-#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
-#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
-#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
-#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
-
+#if defined(__HAIKU__)
+#define SIGIO SIGPOLL
#endif
-#ifdef _WIN32
-#define HAVE_CHARDEV_SERIAL 1
-#elif defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \
- || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \
- || defined(__GLIBC__)
-#define HAVE_CHARDEV_SERIAL 1
-#endif
-
-#if defined(__linux__) || defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || defined(__DragonFly__)
-#define HAVE_CHARDEV_PARPORT 1
+#ifdef HAVE_MADVISE_WITHOUT_PROTOTYPE
+/*
+ * See MySQL bug #7156 (http://bugs.mysql.com/bug.php?id=7156) for discussion
+ * about Solaris missing the madvise() prototype.
+ */
+int madvise(char *, size_t, int);
#endif
#if defined(CONFIG_LINUX)
@@ -421,10 +553,17 @@ void qemu_anon_ram_free(void *ptr, size_t size);
/* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
# define QEMU_VMALLOC_ALIGN (256 * 4096)
#elif defined(__linux__) && defined(__sparc__)
-#include <sys/shm.h>
-# define QEMU_VMALLOC_ALIGN MAX(getpagesize(), SHMLBA)
+# define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size(), SHMLBA)
+#elif defined(__linux__) && defined(__loongarch__)
+ /*
+ * For transparent hugepage optimization, it has better be huge page
+ * aligned. LoongArch host system supports two kinds of pagesize: 4K
+ * and 16K, here calculate huge page size from host page size
+ */
+# define QEMU_VMALLOC_ALIGN (qemu_real_host_page_size() * \
+ qemu_real_host_page_size() / sizeof(long))
#else
-# define QEMU_VMALLOC_ALIGN getpagesize()
+# define QEMU_VMALLOC_ALIGN qemu_real_host_page_size()
#endif
#ifdef CONFIG_POSIX
@@ -455,19 +594,23 @@ void sigaction_invoke(struct sigaction *action,
struct qemu_signalfd_siginfo *info);
#endif
-int qemu_madvise(void *addr, size_t len, int advice);
-int qemu_mprotect_rwx(void *addr, size_t size);
-int qemu_mprotect_none(void *addr, size_t size);
-
-int qemu_open(const char *name, int flags, ...);
+/*
+ * Don't introduce new usage of this function, prefer the following
+ * qemu_open/qemu_create that take an "Error **errp"
+ */
+int qemu_open_old(const char *name, int flags, ...);
+int qemu_open(const char *name, int flags, Error **errp);
+int qemu_create(const char *name, int flags, mode_t mode, Error **errp);
int qemu_close(int fd);
+int qemu_unlink(const char *name);
#ifndef _WIN32
+int qemu_dup_flags(int fd, int flags);
int qemu_dup(int fd);
-#endif
int qemu_lock_fd(int fd, int64_t start, int64_t len, bool exclusive);
int qemu_unlock_fd(int fd, int64_t start, int64_t len);
int qemu_lock_fd_test(int fd, int64_t start, int64_t len, bool exclusive);
bool qemu_has_ofd_lock(void);
+#endif
#if defined(__HAIKU__) && defined(__i386__)
#define FMT_pid "%ld"
@@ -493,8 +636,6 @@ struct iovec {
ssize_t readv(int fd, const struct iovec *iov, int iov_cnt);
ssize_t writev(int fd, const struct iovec *iov, int iov_cnt);
-#else
-#include <sys/uio.h>
#endif
#ifdef _WIN32
@@ -514,46 +655,18 @@ static inline void qemu_timersub(const struct timeval *val1,
#define qemu_timersub timersub
#endif
-void qemu_set_cloexec(int fd);
-
-/* Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default
- * instead of QEMU_VERSION, so setting hw_version on MachineClass
- * is no longer mandatory.
- *
- * Do NOT change this string, or it will break compatibility on all
- * machine classes that don't set hw_version.
- */
-#define QEMU_HW_VERSION "2.5+"
-
-/* QEMU "hardware version" setting. Used to replace code that exposed
- * QEMU_VERSION to guests in the past and need to keep compatibility.
- * Do not use qemu_hw_version() in new code.
- */
-void qemu_set_hw_version(const char *);
-const char *qemu_hw_version(void);
+ssize_t qemu_write_full(int fd, const void *buf, size_t count)
+ G_GNUC_WARN_UNUSED_RESULT;
-void fips_set_state(bool requested);
-bool fips_get_state(void);
+void qemu_set_cloexec(int fd);
-/* Return a dynamically allocated pathname denoting a file or directory that is
- * appropriate for storing local state.
- *
- * @relative_pathname need not start with a directory separator; one will be
- * added automatically.
+/* Return a dynamically allocated directory path that is appropriate for storing
+ * local state.
*
* The caller is responsible for releasing the value returned with g_free()
* after use.
*/
-char *qemu_get_local_state_pathname(const char *relative_pathname);
-
-/* Find program directory, and save it for later usage with
- * qemu_get_exec_dir().
- * Try OS specific API first, if not working, parse from argv0. */
-void qemu_init_exec_dir(const char *argv0);
-
-/* Get the saved exec dir.
- * Caller needs to release the returned string by g_free() */
-char *qemu_get_exec_dir(void);
+char *qemu_get_local_state_dir(void);
/**
* qemu_getauxval:
@@ -566,8 +679,41 @@ unsigned long qemu_getauxval(unsigned long type);
void qemu_set_tty_echo(int fd, bool echo);
-void os_mem_prealloc(int fd, char *area, size_t sz, int smp_cpus,
- Error **errp);
+typedef struct ThreadContext ThreadContext;
+
+/**
+ * qemu_prealloc_mem:
+ * @fd: the fd mapped into the area, -1 for anonymous memory
+ * @area: start address of the are to preallocate
+ * @sz: the size of the area to preallocate
+ * @max_threads: maximum number of threads to use
+ * @tc: prealloc context threads pointer, NULL if not in use
+ * @async: request asynchronous preallocation, requires @tc
+ * @errp: returns an error if this function fails
+ *
+ * Preallocate memory (populate/prefault page tables writable) for the virtual
+ * memory area starting at @area with the size of @sz. After a successful call,
+ * each page in the area was faulted in writable at least once, for example,
+ * after allocating file blocks for mapped files.
+ *
+ * When setting @async, allocation might be performed asynchronously.
+ * qemu_finish_async_prealloc_mem() must be called to finish any asynchronous
+ * preallocation.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads,
+ ThreadContext *tc, bool async, Error **errp);
+
+/**
+ * qemu_finish_async_prealloc_mem:
+ * @errp: returns an error if this function fails
+ *
+ * Finish all outstanding asynchronous memory preallocation.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool qemu_finish_async_prealloc_mem(Error **errp);
/**
* qemu_get_pid_name:
@@ -579,29 +725,95 @@ void os_mem_prealloc(int fd, char *area, size_t sz, int smp_cpus,
*/
char *qemu_get_pid_name(pid_t pid);
+/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
+ * when intptr_t is 32-bit and we are aligning a long long.
+ */
+static inline uintptr_t qemu_real_host_page_size(void)
+{
+ return getpagesize();
+}
+
+static inline intptr_t qemu_real_host_page_mask(void)
+{
+ return -(intptr_t)qemu_real_host_page_size();
+}
+
+/*
+ * After using getopt or getopt_long, if you need to parse another set
+ * of options, then you must reset optind. Unfortunately the way to
+ * do this varies between implementations of getopt.
+ */
+static inline void qemu_reset_optind(void)
+{
+#ifdef HAVE_OPTRESET
+ optind = 1;
+ optreset = 1;
+#else
+ optind = 0;
+#endif
+}
+
+int qemu_fdatasync(int fd);
+
+/**
+ * Sync changes made to the memory mapped file back to the backing
+ * storage. For POSIX compliant systems this will fallback
+ * to regular msync call. Otherwise it will trigger whole file sync
+ * (including the metadata case there is no support to skip that otherwise)
+ *
+ * @addr - start of the memory area to be synced
+ * @length - length of the are to be synced
+ * @fd - file descriptor for the file to be synced
+ * (mandatory only for POSIX non-compliant systems)
+ */
+int qemu_msync(void *addr, size_t length, int fd);
+
/**
- * qemu_fork:
+ * qemu_get_host_physmem:
*
- * A version of fork that avoids signal handler race
- * conditions that can lead to child process getting
- * signals that are otherwise only expected by the
- * parent. It also resets all signal handlers to the
- * default settings.
+ * Operating system agnostic way of querying host memory.
*
- * Returns 0 to child process, pid number to parent
- * or -1 on failure.
+ * Returns amount of physical memory on the system. This is purely
+ * advisery and may return 0 if we can't work it out. At the other
+ * end we saturate to SIZE_MAX if you are lucky enough to have that
+ * much memory.
*/
-pid_t qemu_fork(Error **errp);
+size_t qemu_get_host_physmem(void);
-/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
- * when intptr_t is 32-bit and we are aligning a long long.
+/*
+ * Toggle write/execute on the pages marked MAP_JIT
+ * for the current thread.
+ */
+#if defined(MAC_OS_VERSION_11_0) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
+static inline void qemu_thread_jit_execute(void)
+{
+ pthread_jit_write_protect_np(true);
+}
+
+static inline void qemu_thread_jit_write(void)
+{
+ pthread_jit_write_protect_np(false);
+}
+#else
+static inline void qemu_thread_jit_write(void) {}
+static inline void qemu_thread_jit_execute(void) {}
+#endif
+
+/**
+ * Platforms which do not support system() return ENOSYS
*/
-extern uintptr_t qemu_real_host_page_size;
-extern intptr_t qemu_real_host_page_mask;
+#ifndef HAVE_SYSTEM_FUNCTION
+#define system platform_does_not_support_system
+static inline int platform_does_not_support_system(const char *command)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif /* !HAVE_SYSTEM_FUNCTION */
-extern int qemu_icache_linesize;
-extern int qemu_icache_linesize_log;
-extern int qemu_dcache_linesize;
-extern int qemu_dcache_linesize_log;
+#ifdef __cplusplus
+}
+#endif
#endif
diff --git a/include/qemu/plugin-event.h b/include/qemu/plugin-event.h
new file mode 100644
index 0000000000..7056d8427b
--- /dev/null
+++ b/include/qemu/plugin-event.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_PLUGIN_EVENT_H
+#define QEMU_PLUGIN_EVENT_H
+
+/*
+ * Events that plugins can subscribe to.
+ */
+enum qemu_plugin_event {
+ QEMU_PLUGIN_EV_VCPU_INIT,
+ QEMU_PLUGIN_EV_VCPU_EXIT,
+ QEMU_PLUGIN_EV_VCPU_TB_TRANS,
+ QEMU_PLUGIN_EV_VCPU_IDLE,
+ QEMU_PLUGIN_EV_VCPU_RESUME,
+ QEMU_PLUGIN_EV_VCPU_SYSCALL,
+ QEMU_PLUGIN_EV_VCPU_SYSCALL_RET,
+ QEMU_PLUGIN_EV_FLUSH,
+ QEMU_PLUGIN_EV_ATEXIT,
+ QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */
+};
+
+#endif /* QEMU_PLUGIN_EVENT_H */
diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h
new file mode 100644
index 0000000000..71c1123308
--- /dev/null
+++ b/include/qemu/plugin-memory.h
@@ -0,0 +1,36 @@
+/*
+ * Plugin Memory API
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PLUGIN_MEMORY_H
+#define PLUGIN_MEMORY_H
+
+#include "exec/cpu-defs.h"
+#include "exec/hwaddr.h"
+
+struct qemu_plugin_hwaddr {
+ bool is_io;
+ bool is_store;
+ hwaddr phys_addr;
+ MemoryRegion *mr;
+};
+
+/**
+ * tlb_plugin_lookup: query last TLB lookup
+ * @cpu: cpu environment
+ *
+ * This function can be used directly after a memory operation to
+ * query information about the access. It is used by the plugin
+ * infrastructure to expose more information about the address.
+ *
+ * It would only fail if not called from an instrumented memory access
+ * which would be an abuse of the API.
+ */
+bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
+ bool is_store, struct qemu_plugin_hwaddr *data);
+
+#endif /* PLUGIN_MEMORY_H */
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
new file mode 100644
index 0000000000..12a96cea2a
--- /dev/null
+++ b/include/qemu/plugin.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_PLUGIN_H
+#define QEMU_PLUGIN_H
+
+#include "qemu/config-file.h"
+#include "qemu/qemu-plugin.h"
+#include "qemu/error-report.h"
+#include "qemu/queue.h"
+#include "qemu/option.h"
+#include "qemu/plugin-event.h"
+#include "exec/memopidx.h"
+#include "hw/core/cpu.h"
+
+/*
+ * Option parsing/processing.
+ * Note that we can load an arbitrary number of plugins.
+ */
+struct qemu_plugin_desc;
+typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList;
+
+/*
+ * Construct a qemu_plugin_meminfo_t.
+ */
+static inline qemu_plugin_meminfo_t
+make_plugin_meminfo(MemOpIdx oi, enum qemu_plugin_mem_rw rw)
+{
+ return oi | (rw << 16);
+}
+
+/*
+ * Extract the memory operation direction from a qemu_plugin_meminfo_t.
+ * Other portions may be extracted via get_memop and get_mmuidx.
+ */
+static inline enum qemu_plugin_mem_rw
+get_plugin_meminfo_rw(qemu_plugin_meminfo_t i)
+{
+ return i >> 16;
+}
+
+#ifdef CONFIG_PLUGIN
+extern QemuOptsList qemu_plugin_opts;
+
+static inline void qemu_plugin_add_opts(void)
+{
+ qemu_add_opts(&qemu_plugin_opts);
+}
+
+void qemu_plugin_opt_parse(const char *optstr, QemuPluginList *head);
+int qemu_plugin_load_list(QemuPluginList *head, Error **errp);
+
+union qemu_plugin_cb_sig {
+ qemu_plugin_simple_cb_t simple;
+ qemu_plugin_udata_cb_t udata;
+ qemu_plugin_vcpu_simple_cb_t vcpu_simple;
+ qemu_plugin_vcpu_udata_cb_t vcpu_udata;
+ qemu_plugin_vcpu_tb_trans_cb_t vcpu_tb_trans;
+ qemu_plugin_vcpu_mem_cb_t vcpu_mem;
+ qemu_plugin_vcpu_syscall_cb_t vcpu_syscall;
+ qemu_plugin_vcpu_syscall_ret_cb_t vcpu_syscall_ret;
+ void *generic;
+};
+
+enum plugin_dyn_cb_type {
+ PLUGIN_CB_INSN,
+ PLUGIN_CB_MEM,
+ PLUGIN_N_CB_TYPES,
+};
+
+enum plugin_dyn_cb_subtype {
+ PLUGIN_CB_REGULAR,
+ PLUGIN_CB_REGULAR_R,
+ PLUGIN_CB_INLINE,
+ PLUGIN_N_CB_SUBTYPES,
+};
+
+/*
+ * A dynamic callback has an insertion point that is determined at run-time.
+ * Usually the insertion point is somewhere in the code cache; think for
+ * instance of a callback to be called upon the execution of a particular TB.
+ */
+struct qemu_plugin_dyn_cb {
+ union qemu_plugin_cb_sig f;
+ void *userp;
+ enum plugin_dyn_cb_subtype type;
+ /* @rw applies to mem callbacks only (both regular and inline) */
+ enum qemu_plugin_mem_rw rw;
+ /* fields specific to each dyn_cb type go here */
+ union {
+ struct {
+ qemu_plugin_u64 entry;
+ enum qemu_plugin_op op;
+ uint64_t imm;
+ } inline_insn;
+ };
+};
+
+/* Internal context for instrumenting an instruction */
+struct qemu_plugin_insn {
+ GByteArray *data;
+ uint64_t vaddr;
+ void *haddr;
+ GArray *cbs[PLUGIN_N_CB_TYPES][PLUGIN_N_CB_SUBTYPES];
+ bool calls_helpers;
+
+ /* if set, the instruction calls helpers that might access guest memory */
+ bool mem_helper;
+
+ bool mem_only;
+};
+
+/* A scoreboard is an array of values, indexed by vcpu_index */
+struct qemu_plugin_scoreboard {
+ GArray *data;
+ QLIST_ENTRY(qemu_plugin_scoreboard) entry;
+};
+
+/*
+ * qemu_plugin_insn allocate and cleanup functions. We don't expect to
+ * cleanup many of these structures. They are reused for each fresh
+ * translation.
+ */
+
+static inline void qemu_plugin_insn_cleanup_fn(gpointer data)
+{
+ struct qemu_plugin_insn *insn = (struct qemu_plugin_insn *) data;
+ g_byte_array_free(insn->data, true);
+}
+
+static inline struct qemu_plugin_insn *qemu_plugin_insn_alloc(void)
+{
+ int i, j;
+ struct qemu_plugin_insn *insn = g_new0(struct qemu_plugin_insn, 1);
+ insn->data = g_byte_array_sized_new(4);
+
+ for (i = 0; i < PLUGIN_N_CB_TYPES; i++) {
+ for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) {
+ insn->cbs[i][j] = g_array_new(false, false,
+ sizeof(struct qemu_plugin_dyn_cb));
+ }
+ }
+ return insn;
+}
+
+/* Internal context for this TranslationBlock */
+struct qemu_plugin_tb {
+ GPtrArray *insns;
+ size_t n;
+ uint64_t vaddr;
+ uint64_t vaddr2;
+ void *haddr1;
+ void *haddr2;
+ bool mem_only;
+
+ /* if set, the TB calls helpers that might access guest memory */
+ bool mem_helper;
+
+ GArray *cbs[PLUGIN_N_CB_SUBTYPES];
+};
+
+/**
+ * qemu_plugin_tb_insn_get(): get next plugin record for translation.
+ * @tb: the internal tb context
+ * @pc: address of instruction
+ */
+static inline
+struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb,
+ uint64_t pc)
+{
+ struct qemu_plugin_insn *insn;
+ int i, j;
+
+ if (unlikely(tb->n == tb->insns->len)) {
+ struct qemu_plugin_insn *new_insn = qemu_plugin_insn_alloc();
+ g_ptr_array_add(tb->insns, new_insn);
+ }
+ insn = g_ptr_array_index(tb->insns, tb->n++);
+ g_byte_array_set_size(insn->data, 0);
+ insn->calls_helpers = false;
+ insn->mem_helper = false;
+ insn->vaddr = pc;
+
+ for (i = 0; i < PLUGIN_N_CB_TYPES; i++) {
+ for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) {
+ g_array_set_size(insn->cbs[i][j], 0);
+ }
+ }
+
+ return insn;
+}
+
+/**
+ * struct CPUPluginState - per-CPU state for plugins
+ * @event_mask: plugin event bitmap. Modified only via async work.
+ */
+struct CPUPluginState {
+ DECLARE_BITMAP(event_mask, QEMU_PLUGIN_EV_MAX);
+};
+
+/**
+ * qemu_plugin_create_vcpu_state: allocate plugin state
+ */
+CPUPluginState *qemu_plugin_create_vcpu_state(void);
+
+void qemu_plugin_vcpu_init_hook(CPUState *cpu);
+void qemu_plugin_vcpu_exit_hook(CPUState *cpu);
+void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb);
+void qemu_plugin_vcpu_idle_cb(CPUState *cpu);
+void qemu_plugin_vcpu_resume_cb(CPUState *cpu);
+void
+qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1,
+ uint64_t a2, uint64_t a3, uint64_t a4, uint64_t a5,
+ uint64_t a6, uint64_t a7, uint64_t a8);
+void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret);
+
+void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
+ MemOpIdx oi, enum qemu_plugin_mem_rw rw);
+
+void qemu_plugin_flush_cb(void);
+
+void qemu_plugin_atexit_cb(void);
+
+void qemu_plugin_add_dyn_cb_arr(GArray *arr);
+
+static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu)
+{
+ cpu->plugin_mem_cbs = NULL;
+}
+
+/**
+ * qemu_plugin_user_exit(): clean-up callbacks before calling exit callbacks
+ *
+ * This is a user-mode only helper that ensure we have fully cleared
+ * callbacks from all threads before calling the exit callbacks. This
+ * is so the plugins themselves don't have to jump through hoops to
+ * guard against race conditions.
+ */
+void qemu_plugin_user_exit(void);
+
+/**
+ * qemu_plugin_user_prefork_lock(): take plugin lock before forking
+ *
+ * This is a user-mode only helper to take the internal plugin lock
+ * before a fork event. This is ensure a consistent lock state
+ */
+void qemu_plugin_user_prefork_lock(void);
+
+/**
+ * qemu_plugin_user_postfork(): reset the plugin lock
+ * @is_child: is this thread the child
+ *
+ * This user-mode only helper resets the lock state after a fork so we
+ * can continue using the plugin interface.
+ */
+void qemu_plugin_user_postfork(bool is_child);
+
+#else /* !CONFIG_PLUGIN */
+
+static inline void qemu_plugin_add_opts(void)
+{ }
+
+static inline void qemu_plugin_opt_parse(const char *optstr,
+ QemuPluginList *head)
+{
+ error_report("plugin interface not enabled in this build");
+ exit(1);
+}
+
+static inline int qemu_plugin_load_list(QemuPluginList *head, Error **errp)
+{
+ return 0;
+}
+
+static inline void qemu_plugin_vcpu_init_hook(CPUState *cpu)
+{ }
+
+static inline void qemu_plugin_vcpu_exit_hook(CPUState *cpu)
+{ }
+
+static inline void qemu_plugin_tb_trans_cb(CPUState *cpu,
+ struct qemu_plugin_tb *tb)
+{ }
+
+static inline void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
+{ }
+
+static inline void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
+{ }
+
+static inline void
+qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2,
+ uint64_t a3, uint64_t a4, uint64_t a5, uint64_t a6,
+ uint64_t a7, uint64_t a8)
+{ }
+
+static inline
+void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
+{ }
+
+static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
+ MemOpIdx oi,
+ enum qemu_plugin_mem_rw rw)
+{ }
+
+static inline void qemu_plugin_flush_cb(void)
+{ }
+
+static inline void qemu_plugin_atexit_cb(void)
+{ }
+
+static inline
+void qemu_plugin_add_dyn_cb_arr(GArray *arr)
+{ }
+
+static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu)
+{ }
+
+static inline void qemu_plugin_user_exit(void)
+{ }
+
+static inline void qemu_plugin_user_prefork_lock(void)
+{ }
+
+static inline void qemu_plugin_user_postfork(bool is_child)
+{ }
+
+#endif /* !CONFIG_PLUGIN */
+
+#endif /* QEMU_PLUGIN_H */
diff --git a/include/qemu/pmem.h b/include/qemu/pmem.h
index dfb6d0da62..d2d7ad085c 100644
--- a/include/qemu/pmem.h
+++ b/include/qemu/pmem.h
@@ -33,4 +33,4 @@ pmem_persist(const void *addr, size_t len)
#endif /* CONFIG_LIBPMEM */
-#endif /* !QEMU_PMEM_H */
+#endif /* QEMU_PMEM_H */
diff --git a/include/qemu/processor.h b/include/qemu/processor.h
index 8e16c9277d..9f0dcdf28f 100644
--- a/include/qemu/processor.h
+++ b/include/qemu/processor.h
@@ -7,8 +7,6 @@
#ifndef QEMU_PROCESSOR_H
#define QEMU_PROCESSOR_H
-#include "qemu/atomic.h"
-
#if defined(__i386__) || defined(__x86_64__)
# define cpu_relax() asm volatile("rep; nop" ::: "memory")
diff --git a/include/qemu/progress_meter.h b/include/qemu/progress_meter.h
new file mode 100644
index 0000000000..0f2c0a32d2
--- /dev/null
+++ b/include/qemu/progress_meter.h
@@ -0,0 +1,62 @@
+/*
+ * Helper functionality for some process progress tracking.
+ *
+ * Copyright (c) 2011 IBM Corp.
+ * Copyright (c) 2012, 2018 Red Hat, Inc.
+ * Copyright (c) 2020 Virtuozzo International GmbH
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_PROGRESS_METER_H
+#define QEMU_PROGRESS_METER_H
+
+#include "qemu/thread.h"
+
+typedef struct ProgressMeter {
+ /**
+ * Current progress. The unit is arbitrary as long as the ratio between
+ * current and total represents the estimated percentage
+ * of work already done.
+ */
+ uint64_t current;
+
+ /** Estimated current value at the completion of the process */
+ uint64_t total;
+
+ QemuMutex lock; /* protects concurrent access to above fields */
+} ProgressMeter;
+
+void progress_init(ProgressMeter *pm);
+void progress_destroy(ProgressMeter *pm);
+
+/* Get a snapshot of internal current and total values */
+void progress_get_snapshot(ProgressMeter *pm, uint64_t *current,
+ uint64_t *total);
+
+/* Increases the amount of work done so far by @done */
+void progress_work_done(ProgressMeter *pm, uint64_t done);
+
+/* Sets how much work has to be done to complete to @remaining */
+void progress_set_remaining(ProgressMeter *pm, uint64_t remaining);
+
+/* Increases the total work to do by @delta */
+void progress_increase_remaining(ProgressMeter *pm, uint64_t delta);
+
+#endif /* QEMU_PROGRESS_METER_H */
diff --git a/include/qemu/qdist.h b/include/qemu/qdist.h
index 54ece760d6..bfb3211537 100644
--- a/include/qemu/qdist.h
+++ b/include/qemu/qdist.h
@@ -7,7 +7,6 @@
#ifndef QEMU_QDIST_H
#define QEMU_QDIST_H
-#include "qemu-common.h"
#include "qemu/bitops.h"
/*
diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h
new file mode 100644
index 0000000000..4fc6c3739b
--- /dev/null
+++ b/include/qemu/qemu-plugin.h
@@ -0,0 +1,840 @@
+/*
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * Copyright (C) 2019, Linaro
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_QEMU_PLUGIN_H
+#define QEMU_QEMU_PLUGIN_H
+
+#include <glib.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+/*
+ * For best performance, build the plugin with -fvisibility=hidden so that
+ * QEMU_PLUGIN_LOCAL is implicit. Then, just mark qemu_plugin_install with
+ * QEMU_PLUGIN_EXPORT. For more info, see
+ * https://gcc.gnu.org/wiki/Visibility
+ */
+#if defined _WIN32 || defined __CYGWIN__
+ #ifdef CONFIG_PLUGIN
+ #define QEMU_PLUGIN_EXPORT __declspec(dllimport)
+ #define QEMU_PLUGIN_API __declspec(dllexport)
+ #else
+ #define QEMU_PLUGIN_EXPORT __declspec(dllexport)
+ #define QEMU_PLUGIN_API __declspec(dllimport)
+ #endif
+ #define QEMU_PLUGIN_LOCAL
+#else
+ #define QEMU_PLUGIN_EXPORT __attribute__((visibility("default")))
+ #define QEMU_PLUGIN_LOCAL __attribute__((visibility("hidden")))
+ #define QEMU_PLUGIN_API
+#endif
+
+/**
+ * typedef qemu_plugin_id_t - Unique plugin ID
+ */
+typedef uint64_t qemu_plugin_id_t;
+
+/*
+ * Versioning plugins:
+ *
+ * The plugin API will pass a minimum and current API version that
+ * QEMU currently supports. The minimum API will be incremented if an
+ * API needs to be deprecated.
+ *
+ * The plugins export the API they were built against by exposing the
+ * symbol qemu_plugin_version which can be checked.
+ *
+ * version 2:
+ * - removed qemu_plugin_n_vcpus and qemu_plugin_n_max_vcpus
+ * - Remove qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline.
+ * Those functions are replaced by *_per_vcpu variants, which guarantee
+ * thread-safety for operations.
+ */
+
+extern QEMU_PLUGIN_EXPORT int qemu_plugin_version;
+
+#define QEMU_PLUGIN_VERSION 2
+
+/**
+ * struct qemu_info_t - system information for plugins
+ *
+ * This structure provides for some limited information about the
+ * system to allow the plugin to make decisions on how to proceed. For
+ * example it might only be suitable for running on some guest
+ * architectures or when under full system emulation.
+ */
+typedef struct qemu_info_t {
+ /** @target_name: string describing architecture */
+ const char *target_name;
+ /** @version: minimum and current plugin API level */
+ struct {
+ int min;
+ int cur;
+ } version;
+ /** @system_emulation: is this a full system emulation? */
+ bool system_emulation;
+ union {
+ /** @system: information relevant to system emulation */
+ struct {
+ /** @system.smp_vcpus: initial number of vCPUs */
+ int smp_vcpus;
+ /** @system.max_vcpus: maximum possible number of vCPUs */
+ int max_vcpus;
+ } system;
+ };
+} qemu_info_t;
+
+/**
+ * qemu_plugin_install() - Install a plugin
+ * @id: this plugin's opaque ID
+ * @info: a block describing some details about the guest
+ * @argc: number of arguments
+ * @argv: array of arguments (@argc elements)
+ *
+ * All plugins must export this symbol which is called when the plugin
+ * is first loaded. Calling qemu_plugin_uninstall() from this function
+ * is a bug.
+ *
+ * Note: @info is only live during the call. Copy any information we
+ * want to keep. @argv remains valid throughout the lifetime of the
+ * loaded plugin.
+ *
+ * Return: 0 on successful loading, !0 for an error.
+ */
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info,
+ int argc, char **argv);
+
+/**
+ * typedef qemu_plugin_simple_cb_t - simple callback
+ * @id: the unique qemu_plugin_id_t
+ *
+ * This callback passes no information aside from the unique @id.
+ */
+typedef void (*qemu_plugin_simple_cb_t)(qemu_plugin_id_t id);
+
+/**
+ * typedef qemu_plugin_udata_cb_t - callback with user data
+ * @id: the unique qemu_plugin_id_t
+ * @userdata: a pointer to some user data supplied when the callback
+ * was registered.
+ */
+typedef void (*qemu_plugin_udata_cb_t)(qemu_plugin_id_t id, void *userdata);
+
+/**
+ * typedef qemu_plugin_vcpu_simple_cb_t - vcpu callback
+ * @id: the unique qemu_plugin_id_t
+ * @vcpu_index: the current vcpu context
+ */
+typedef void (*qemu_plugin_vcpu_simple_cb_t)(qemu_plugin_id_t id,
+ unsigned int vcpu_index);
+
+/**
+ * typedef qemu_plugin_vcpu_udata_cb_t - vcpu callback
+ * @vcpu_index: the current vcpu context
+ * @userdata: a pointer to some user data supplied when the callback
+ * was registered.
+ */
+typedef void (*qemu_plugin_vcpu_udata_cb_t)(unsigned int vcpu_index,
+ void *userdata);
+
+/**
+ * qemu_plugin_uninstall() - Uninstall a plugin
+ * @id: this plugin's opaque ID
+ * @cb: callback to be called once the plugin has been removed
+ *
+ * Do NOT assume that the plugin has been uninstalled once this function
+ * returns. Plugins are uninstalled asynchronously, and therefore the given
+ * plugin receives callbacks until @cb is called.
+ *
+ * Note: Calling this function from qemu_plugin_install() is a bug.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb);
+
+/**
+ * qemu_plugin_reset() - Reset a plugin
+ * @id: this plugin's opaque ID
+ * @cb: callback to be called once the plugin has been reset
+ *
+ * Unregisters all callbacks for the plugin given by @id.
+ *
+ * Do NOT assume that the plugin has been reset once this function returns.
+ * Plugins are reset asynchronously, and therefore the given plugin receives
+ * callbacks until @cb is called.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb);
+
+/**
+ * qemu_plugin_register_vcpu_init_cb() - register a vCPU initialization callback
+ * @id: plugin ID
+ * @cb: callback function
+ *
+ * The @cb function is called every time a vCPU is initialized.
+ *
+ * See also: qemu_plugin_register_vcpu_exit_cb()
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id,
+ qemu_plugin_vcpu_simple_cb_t cb);
+
+/**
+ * qemu_plugin_register_vcpu_exit_cb() - register a vCPU exit callback
+ * @id: plugin ID
+ * @cb: callback function
+ *
+ * The @cb function is called every time a vCPU exits.
+ *
+ * See also: qemu_plugin_register_vcpu_init_cb()
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id,
+ qemu_plugin_vcpu_simple_cb_t cb);
+
+/**
+ * qemu_plugin_register_vcpu_idle_cb() - register a vCPU idle callback
+ * @id: plugin ID
+ * @cb: callback function
+ *
+ * The @cb function is called every time a vCPU idles.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
+ qemu_plugin_vcpu_simple_cb_t cb);
+
+/**
+ * qemu_plugin_register_vcpu_resume_cb() - register a vCPU resume callback
+ * @id: plugin ID
+ * @cb: callback function
+ *
+ * The @cb function is called every time a vCPU resumes execution.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
+ qemu_plugin_vcpu_simple_cb_t cb);
+
+/** struct qemu_plugin_tb - Opaque handle for a translation block */
+struct qemu_plugin_tb;
+/** struct qemu_plugin_insn - Opaque handle for a translated instruction */
+struct qemu_plugin_insn;
+/** struct qemu_plugin_scoreboard - Opaque handle for a scoreboard */
+struct qemu_plugin_scoreboard;
+
+/**
+ * typedef qemu_plugin_u64 - uint64_t member of an entry in a scoreboard
+ *
+ * This field allows to access a specific uint64_t member in one given entry,
+ * located at a specified offset. Inline operations expect this as entry.
+ */
+typedef struct {
+ struct qemu_plugin_scoreboard *score;
+ size_t offset;
+} qemu_plugin_u64;
+
+/**
+ * enum qemu_plugin_cb_flags - type of callback
+ *
+ * @QEMU_PLUGIN_CB_NO_REGS: callback does not access the CPU's regs
+ * @QEMU_PLUGIN_CB_R_REGS: callback reads the CPU's regs
+ * @QEMU_PLUGIN_CB_RW_REGS: callback reads and writes the CPU's regs
+ *
+ * Note: currently QEMU_PLUGIN_CB_RW_REGS is unused, plugins cannot change
+ * system register state.
+ */
+enum qemu_plugin_cb_flags {
+ QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_CB_R_REGS,
+ QEMU_PLUGIN_CB_RW_REGS,
+};
+
+enum qemu_plugin_mem_rw {
+ QEMU_PLUGIN_MEM_R = 1,
+ QEMU_PLUGIN_MEM_W,
+ QEMU_PLUGIN_MEM_RW,
+};
+
+/**
+ * typedef qemu_plugin_vcpu_tb_trans_cb_t - translation callback
+ * @id: unique plugin id
+ * @tb: opaque handle used for querying and instrumenting a block.
+ */
+typedef void (*qemu_plugin_vcpu_tb_trans_cb_t)(qemu_plugin_id_t id,
+ struct qemu_plugin_tb *tb);
+
+/**
+ * qemu_plugin_register_vcpu_tb_trans_cb() - register a translate cb
+ * @id: plugin ID
+ * @cb: callback function
+ *
+ * The @cb function is called every time a translation occurs. The @cb
+ * function is passed an opaque qemu_plugin_type which it can query
+ * for additional information including the list of translated
+ * instructions. At this point the plugin can register further
+ * callbacks to be triggered when the block or individual instruction
+ * executes.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,
+ qemu_plugin_vcpu_tb_trans_cb_t cb);
+
+/**
+ * qemu_plugin_register_vcpu_tb_exec_cb() - register execution callback
+ * @tb: the opaque qemu_plugin_tb handle for the translation
+ * @cb: callback function
+ * @flags: does the plugin read or write the CPU's registers?
+ * @userdata: any plugin data to pass to the @cb?
+ *
+ * The @cb function is called every time a translated unit executes.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
+ qemu_plugin_vcpu_udata_cb_t cb,
+ enum qemu_plugin_cb_flags flags,
+ void *userdata);
+
+/**
+ * enum qemu_plugin_op - describes an inline op
+ *
+ * @QEMU_PLUGIN_INLINE_ADD_U64: add an immediate value uint64_t
+ *
+ * Note: currently only a single inline op is supported.
+ */
+
+enum qemu_plugin_op {
+ QEMU_PLUGIN_INLINE_ADD_U64,
+};
+
+/**
+ * qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() - execution inline op
+ * @tb: the opaque qemu_plugin_tb handle for the translation
+ * @op: the type of qemu_plugin_op (e.g. ADD_U64)
+ * @entry: entry to run op
+ * @imm: the op data (e.g. 1)
+ *
+ * Insert an inline op on a given scoreboard entry.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ struct qemu_plugin_tb *tb,
+ enum qemu_plugin_op op,
+ qemu_plugin_u64 entry,
+ uint64_t imm);
+
+/**
+ * qemu_plugin_register_vcpu_insn_exec_cb() - register insn execution cb
+ * @insn: the opaque qemu_plugin_insn handle for an instruction
+ * @cb: callback function
+ * @flags: does the plugin read or write the CPU's registers?
+ * @userdata: any plugin data to pass to the @cb?
+ *
+ * The @cb function is called every time an instruction is executed
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
+ qemu_plugin_vcpu_udata_cb_t cb,
+ enum qemu_plugin_cb_flags flags,
+ void *userdata);
+
+/**
+ * qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu() - insn exec inline op
+ * @insn: the opaque qemu_plugin_insn handle for an instruction
+ * @op: the type of qemu_plugin_op (e.g. ADD_U64)
+ * @entry: entry to run op
+ * @imm: the op data (e.g. 1)
+ *
+ * Insert an inline op to every time an instruction executes.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ struct qemu_plugin_insn *insn,
+ enum qemu_plugin_op op,
+ qemu_plugin_u64 entry,
+ uint64_t imm);
+
+/**
+ * qemu_plugin_tb_n_insns() - query helper for number of insns in TB
+ * @tb: opaque handle to TB passed to callback
+ *
+ * Returns: number of instructions in this block
+ */
+QEMU_PLUGIN_API
+size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb);
+
+/**
+ * qemu_plugin_tb_vaddr() - query helper for vaddr of TB start
+ * @tb: opaque handle to TB passed to callback
+ *
+ * Returns: virtual address of block start
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb);
+
+/**
+ * qemu_plugin_tb_get_insn() - retrieve handle for instruction
+ * @tb: opaque handle to TB passed to callback
+ * @idx: instruction number, 0 indexed
+ *
+ * The returned handle can be used in follow up helper queries as well
+ * as when instrumenting an instruction. It is only valid for the
+ * lifetime of the callback.
+ *
+ * Returns: opaque handle to instruction
+ */
+QEMU_PLUGIN_API
+struct qemu_plugin_insn *
+qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx);
+
+/**
+ * qemu_plugin_insn_data() - return ptr to instruction data
+ * @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
+ *
+ * Note: data is only valid for duration of callback. See
+ * qemu_plugin_insn_size() to calculate size of stream.
+ *
+ * Returns: pointer to a stream of bytes containing the value of this
+ * instructions opcode.
+ */
+QEMU_PLUGIN_API
+const void *qemu_plugin_insn_data(const struct qemu_plugin_insn *insn);
+
+/**
+ * qemu_plugin_insn_size() - return size of instruction
+ * @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
+ *
+ * Returns: size of instruction in bytes
+ */
+QEMU_PLUGIN_API
+size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn);
+
+/**
+ * qemu_plugin_insn_vaddr() - return vaddr of instruction
+ * @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
+ *
+ * Returns: virtual address of instruction
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn);
+
+/**
+ * qemu_plugin_insn_haddr() - return hardware addr of instruction
+ * @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
+ *
+ * Returns: hardware (physical) target address of instruction
+ */
+QEMU_PLUGIN_API
+void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn);
+
+/**
+ * typedef qemu_plugin_meminfo_t - opaque memory transaction handle
+ *
+ * This can be further queried using the qemu_plugin_mem_* query
+ * functions.
+ */
+typedef uint32_t qemu_plugin_meminfo_t;
+/** struct qemu_plugin_hwaddr - opaque hw address handle */
+struct qemu_plugin_hwaddr;
+
+/**
+ * qemu_plugin_mem_size_shift() - get size of access
+ * @info: opaque memory transaction handle
+ *
+ * Returns: size of access in ^2 (0=byte, 1=16bit, 2=32bit etc...)
+ */
+QEMU_PLUGIN_API
+unsigned int qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info);
+/**
+ * qemu_plugin_mem_is_sign_extended() - was the access sign extended
+ * @info: opaque memory transaction handle
+ *
+ * Returns: true if it was, otherwise false
+ */
+QEMU_PLUGIN_API
+bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info);
+/**
+ * qemu_plugin_mem_is_big_endian() - was the access big endian
+ * @info: opaque memory transaction handle
+ *
+ * Returns: true if it was, otherwise false
+ */
+QEMU_PLUGIN_API
+bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info);
+/**
+ * qemu_plugin_mem_is_store() - was the access a store
+ * @info: opaque memory transaction handle
+ *
+ * Returns: true if it was, otherwise false
+ */
+QEMU_PLUGIN_API
+bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info);
+
+/**
+ * qemu_plugin_get_hwaddr() - return handle for memory operation
+ * @info: opaque memory info structure
+ * @vaddr: the virtual address of the memory operation
+ *
+ * For system emulation returns a qemu_plugin_hwaddr handle to query
+ * details about the actual physical address backing the virtual
+ * address. For linux-user guests it just returns NULL.
+ *
+ * This handle is *only* valid for the duration of the callback. Any
+ * information about the handle should be recovered before the
+ * callback returns.
+ */
+QEMU_PLUGIN_API
+struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
+ uint64_t vaddr);
+
+/*
+ * The following additional queries can be run on the hwaddr structure to
+ * return information about it - namely whether it is for an IO access and the
+ * physical address associated with the access.
+ */
+
+/**
+ * qemu_plugin_hwaddr_is_io() - query whether memory operation is IO
+ * @haddr: address handle from qemu_plugin_get_hwaddr()
+ *
+ * Returns true if the handle's memory operation is to memory-mapped IO, or
+ * false if it is to RAM
+ */
+QEMU_PLUGIN_API
+bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr);
+
+/**
+ * qemu_plugin_hwaddr_phys_addr() - query physical address for memory operation
+ * @haddr: address handle from qemu_plugin_get_hwaddr()
+ *
+ * Returns the physical address associated with the memory operation
+ *
+ * Note that the returned physical address may not be unique if you are dealing
+ * with multiple address spaces.
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr);
+
+/*
+ * Returns a string representing the device. The string is valid for
+ * the lifetime of the plugin.
+ */
+QEMU_PLUGIN_API
+const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h);
+
+/**
+ * typedef qemu_plugin_vcpu_mem_cb_t - memory callback function type
+ * @vcpu_index: the executing vCPU
+ * @info: an opaque handle for further queries about the memory
+ * @vaddr: the virtual address of the transaction
+ * @userdata: any user data attached to the callback
+ */
+typedef void (*qemu_plugin_vcpu_mem_cb_t) (unsigned int vcpu_index,
+ qemu_plugin_meminfo_t info,
+ uint64_t vaddr,
+ void *userdata);
+
+/**
+ * qemu_plugin_register_vcpu_mem_cb() - register memory access callback
+ * @insn: handle for instruction to instrument
+ * @cb: callback of type qemu_plugin_vcpu_mem_cb_t
+ * @flags: (currently unused) callback flags
+ * @rw: monitor reads, writes or both
+ * @userdata: opaque pointer for userdata
+ *
+ * This registers a full callback for every memory access generated by
+ * an instruction. If the instruction doesn't access memory no
+ * callback will be made.
+ *
+ * The callback reports the vCPU the access took place on, the virtual
+ * address of the access and a handle for further queries. The user
+ * can attach some userdata to the callback for additional purposes.
+ *
+ * Other execution threads will continue to execute during the
+ * callback so the plugin is responsible for ensuring it doesn't get
+ * confused by making appropriate use of locking if required.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
+ qemu_plugin_vcpu_mem_cb_t cb,
+ enum qemu_plugin_cb_flags flags,
+ enum qemu_plugin_mem_rw rw,
+ void *userdata);
+
+/**
+ * qemu_plugin_register_vcpu_mem_inline_per_vcpu() - inline op for mem access
+ * @insn: handle for instruction to instrument
+ * @rw: apply to reads, writes or both
+ * @op: the op, of type qemu_plugin_op
+ * @entry: entry to run op
+ * @imm: immediate data for @op
+ *
+ * This registers a inline op every memory access generated by the
+ * instruction.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
+ struct qemu_plugin_insn *insn,
+ enum qemu_plugin_mem_rw rw,
+ enum qemu_plugin_op op,
+ qemu_plugin_u64 entry,
+ uint64_t imm);
+
+typedef void
+(*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index,
+ int64_t num, uint64_t a1, uint64_t a2,
+ uint64_t a3, uint64_t a4, uint64_t a5,
+ uint64_t a6, uint64_t a7, uint64_t a8);
+
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id,
+ qemu_plugin_vcpu_syscall_cb_t cb);
+
+typedef void
+(*qemu_plugin_vcpu_syscall_ret_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_idx,
+ int64_t num, int64_t ret);
+
+QEMU_PLUGIN_API
+void
+qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id,
+ qemu_plugin_vcpu_syscall_ret_cb_t cb);
+
+
+/**
+ * qemu_plugin_insn_disas() - return disassembly string for instruction
+ * @insn: instruction reference
+ *
+ * Returns an allocated string containing the disassembly
+ */
+
+QEMU_PLUGIN_API
+char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn);
+
+/**
+ * qemu_plugin_insn_symbol() - best effort symbol lookup
+ * @insn: instruction reference
+ *
+ * Return a static string referring to the symbol. This is dependent
+ * on the binary QEMU is running having provided a symbol table.
+ */
+QEMU_PLUGIN_API
+const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn);
+
+/**
+ * qemu_plugin_vcpu_for_each() - iterate over the existing vCPU
+ * @id: plugin ID
+ * @cb: callback function
+ *
+ * The @cb function is called once for each existing vCPU.
+ *
+ * See also: qemu_plugin_register_vcpu_init_cb()
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
+ qemu_plugin_vcpu_simple_cb_t cb);
+
+QEMU_PLUGIN_API
+void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
+ qemu_plugin_simple_cb_t cb);
+
+/**
+ * qemu_plugin_register_atexit_cb() - register exit callback
+ * @id: plugin ID
+ * @cb: callback
+ * @userdata: user data for callback
+ *
+ * The @cb function is called once execution has finished. Plugins
+ * should be able to free all their resources at this point much like
+ * after a reset/uninstall callback is called.
+ *
+ * In user-mode it is possible a few un-instrumented instructions from
+ * child threads may run before the host kernel reaps the threads.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
+ qemu_plugin_udata_cb_t cb, void *userdata);
+
+/* returns how many vcpus were started at this point */
+int qemu_plugin_num_vcpus(void);
+
+/**
+ * qemu_plugin_outs() - output string via QEMU's logging system
+ * @string: a string
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_outs(const char *string);
+
+/**
+ * qemu_plugin_bool_parse() - parses a boolean argument in the form of
+ * "<argname>=[on|yes|true|off|no|false]"
+ *
+ * @name: argument name, the part before the equals sign
+ * @val: argument value, what's after the equals sign
+ * @ret: output return value
+ *
+ * returns true if the combination @name=@val parses correctly to a boolean
+ * argument, and false otherwise
+ */
+QEMU_PLUGIN_API
+bool qemu_plugin_bool_parse(const char *name, const char *val, bool *ret);
+
+/**
+ * qemu_plugin_path_to_binary() - path to binary file being executed
+ *
+ * Return a string representing the path to the binary. For user-mode
+ * this is the main executable. For system emulation we currently
+ * return NULL. The user should g_free() the string once no longer
+ * needed.
+ */
+QEMU_PLUGIN_API
+const char *qemu_plugin_path_to_binary(void);
+
+/**
+ * qemu_plugin_start_code() - returns start of text segment
+ *
+ * Returns the nominal start address of the main text segment in
+ * user-mode. Currently returns 0 for system emulation.
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_start_code(void);
+
+/**
+ * qemu_plugin_end_code() - returns end of text segment
+ *
+ * Returns the nominal end address of the main text segment in
+ * user-mode. Currently returns 0 for system emulation.
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_end_code(void);
+
+/**
+ * qemu_plugin_entry_code() - returns start address for module
+ *
+ * Returns the nominal entry address of the main text segment in
+ * user-mode. Currently returns 0 for system emulation.
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_entry_code(void);
+
+/** struct qemu_plugin_register - Opaque handle for register access */
+struct qemu_plugin_register;
+
+/**
+ * typedef qemu_plugin_reg_descriptor - register descriptions
+ *
+ * @handle: opaque handle for retrieving value with qemu_plugin_read_register
+ * @name: register name
+ * @feature: optional feature descriptor, can be NULL
+ */
+typedef struct {
+ struct qemu_plugin_register *handle;
+ const char *name;
+ const char *feature;
+} qemu_plugin_reg_descriptor;
+
+/**
+ * qemu_plugin_get_registers() - return register list for current vCPU
+ *
+ * Returns a potentially empty GArray of qemu_plugin_reg_descriptor.
+ * Caller frees the array (but not the const strings).
+ *
+ * Should be used from a qemu_plugin_register_vcpu_init_cb() callback
+ * after the vCPU is initialised, i.e. in the vCPU context.
+ */
+QEMU_PLUGIN_API
+GArray *qemu_plugin_get_registers(void);
+
+/**
+ * qemu_plugin_read_register() - read register for current vCPU
+ *
+ * @handle: a @qemu_plugin_reg_handle handle
+ * @buf: A GByteArray for the data owned by the plugin
+ *
+ * This function is only available in a context that register read access is
+ * explicitly requested via the QEMU_PLUGIN_CB_R_REGS flag.
+ *
+ * Returns the size of the read register. The content of @buf is in target byte
+ * order. On failure returns -1.
+ */
+QEMU_PLUGIN_API
+int qemu_plugin_read_register(struct qemu_plugin_register *handle,
+ GByteArray *buf);
+
+/**
+ * qemu_plugin_scoreboard_new() - alloc a new scoreboard
+ *
+ * @element_size: size (in bytes) for one entry
+ *
+ * Returns a pointer to a new scoreboard. It must be freed using
+ * qemu_plugin_scoreboard_free.
+ */
+QEMU_PLUGIN_API
+struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size);
+
+/**
+ * qemu_plugin_scoreboard_free() - free a scoreboard
+ * @score: scoreboard to free
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score);
+
+/**
+ * qemu_plugin_scoreboard_find() - get pointer to an entry of a scoreboard
+ * @score: scoreboard to query
+ * @vcpu_index: entry index
+ *
+ * Returns address of entry of a scoreboard matching a given vcpu_index. This
+ * address can be modified later if scoreboard is resized.
+ */
+QEMU_PLUGIN_API
+void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score,
+ unsigned int vcpu_index);
+
+/* Macros to define a qemu_plugin_u64 */
+#define qemu_plugin_scoreboard_u64(score) \
+ (qemu_plugin_u64) {score, 0}
+#define qemu_plugin_scoreboard_u64_in_struct(score, type, member) \
+ (qemu_plugin_u64) {score, offsetof(type, member)}
+
+/**
+ * qemu_plugin_u64_add() - add a value to a qemu_plugin_u64 for a given vcpu
+ * @entry: entry to query
+ * @vcpu_index: entry index
+ * @added: value to add
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index,
+ uint64_t added);
+
+/**
+ * qemu_plugin_u64_get() - get value of a qemu_plugin_u64 for a given vcpu
+ * @entry: entry to query
+ * @vcpu_index: entry index
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, unsigned int vcpu_index);
+
+/**
+ * qemu_plugin_u64_set() - set value of a qemu_plugin_u64 for a given vcpu
+ * @entry: entry to query
+ * @vcpu_index: entry index
+ * @val: new value
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index,
+ uint64_t val);
+
+/**
+ * qemu_plugin_u64_sum() - return sum of all vcpu entries in a scoreboard
+ * @entry: entry to sum
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry);
+
+#endif /* QEMU_QEMU_PLUGIN_H */
diff --git a/include/qemu/qemu-print.h b/include/qemu/qemu-print.h
new file mode 100644
index 0000000000..1b70920648
--- /dev/null
+++ b/include/qemu/qemu-print.h
@@ -0,0 +1,23 @@
+/*
+ * Print to stream or current monitor
+ *
+ * Copyright (C) 2019 Red Hat Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_PRINT_H
+#define QEMU_PRINT_H
+
+int qemu_vprintf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+int qemu_printf(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
+
+int qemu_vfprintf(FILE *stream, const char *fmt, va_list ap)
+ G_GNUC_PRINTF(2, 0);
+int qemu_fprintf(FILE *stream, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
+
+#endif
diff --git a/include/qemu/qemu-progress.h b/include/qemu/qemu-progress.h
new file mode 100644
index 0000000000..137e1c316f
--- /dev/null
+++ b/include/qemu/qemu-progress.h
@@ -0,0 +1,8 @@
+#ifndef QEMU_PROGRESS_H
+#define QEMU_PROGRESS_H
+
+void qemu_progress_init(int enabled, float min_skip);
+void qemu_progress_end(void);
+void qemu_progress_print(float delta, int max);
+
+#endif /* QEMU_PROGRESS_H */
diff --git a/include/qemu/qsp.h b/include/qemu/qsp.h
index a94c464f90..bf36aabfa8 100644
--- a/include/qemu/qsp.h
+++ b/include/qemu/qsp.h
@@ -11,15 +11,13 @@
#ifndef QEMU_QSP_H
#define QEMU_QSP_H
-#include "qemu/fprintf-fn.h"
-
enum QSPSortBy {
QSP_SORT_BY_TOTAL_WAIT_TIME,
QSP_SORT_BY_AVG_WAIT_TIME,
};
-void qsp_report(FILE *f, fprintf_function cpu_fprintf, size_t max,
- enum QSPSortBy sort_by, bool callsite_coalesce);
+void qsp_report(size_t max, enum QSPSortBy sort_by,
+ bool callsite_coalesce);
bool qsp_is_enabled(void);
void qsp_enable(void);
diff --git a/include/qemu/qtree.h b/include/qemu/qtree.h
new file mode 100644
index 0000000000..dc2b14d258
--- /dev/null
+++ b/include/qemu/qtree.h
@@ -0,0 +1,200 @@
+/*
+ * GLIB - Library of useful routines for C programming
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Modified by the GLib Team and others 1997-2000. See the AUTHORS
+ * file for a list of people on the GLib Team. See the ChangeLog
+ * files for a list of changes. These files are distributed with
+ * GLib at ftp://ftp.gtk.org/pub/gtk/.
+ */
+
+/*
+ * QTree is a partial import of Glib's GTree. The parts excluded correspond
+ * to API calls either deprecated (e.g. g_tree_traverse) or recently added
+ * (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU.
+ *
+ * The reason for this import is to allow us to control the memory allocator
+ * used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's
+ * slice allocator, which causes problems when forking in user-mode;
+ * see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's
+ * "45b5a6c1e gslice: Remove slice allocator and use malloc() instead".
+ *
+ * TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3.
+ */
+
+#ifndef QEMU_QTREE_H
+#define QEMU_QTREE_H
+
+
+#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR
+
+typedef struct _QTree QTree;
+
+typedef struct _QTreeNode QTreeNode;
+
+typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node,
+ gpointer user_data);
+
+/*
+ * Balanced binary trees
+ */
+QTree *q_tree_new(GCompareFunc key_compare_func);
+QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
+ gpointer key_compare_data);
+QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
+ gpointer key_compare_data,
+ GDestroyNotify key_destroy_func,
+ GDestroyNotify value_destroy_func);
+QTree *q_tree_ref(QTree *tree);
+void q_tree_unref(QTree *tree);
+void q_tree_destroy(QTree *tree);
+void q_tree_insert(QTree *tree,
+ gpointer key,
+ gpointer value);
+void q_tree_replace(QTree *tree,
+ gpointer key,
+ gpointer value);
+gboolean q_tree_remove(QTree *tree,
+ gconstpointer key);
+gboolean q_tree_steal(QTree *tree,
+ gconstpointer key);
+gpointer q_tree_lookup(QTree *tree,
+ gconstpointer key);
+gboolean q_tree_lookup_extended(QTree *tree,
+ gconstpointer lookup_key,
+ gpointer *orig_key,
+ gpointer *value);
+void q_tree_foreach(QTree *tree,
+ GTraverseFunc func,
+ gpointer user_data);
+gpointer q_tree_search(QTree *tree,
+ GCompareFunc search_func,
+ gconstpointer user_data);
+gint q_tree_height(QTree *tree);
+gint q_tree_nnodes(QTree *tree);
+
+#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */
+
+typedef GTree QTree;
+typedef GTreeNode QTreeNode;
+typedef GTraverseNodeFunc QTraverseNodeFunc;
+
+static inline QTree *q_tree_new(GCompareFunc key_compare_func)
+{
+ return g_tree_new(key_compare_func);
+}
+
+static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
+ gpointer key_compare_data)
+{
+ return g_tree_new_with_data(key_compare_func, key_compare_data);
+}
+
+static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
+ gpointer key_compare_data,
+ GDestroyNotify key_destroy_func,
+ GDestroyNotify value_destroy_func)
+{
+ return g_tree_new_full(key_compare_func, key_compare_data,
+ key_destroy_func, value_destroy_func);
+}
+
+static inline QTree *q_tree_ref(QTree *tree)
+{
+ return g_tree_ref(tree);
+}
+
+static inline void q_tree_unref(QTree *tree)
+{
+ g_tree_unref(tree);
+}
+
+static inline void q_tree_destroy(QTree *tree)
+{
+ g_tree_destroy(tree);
+}
+
+static inline void q_tree_insert(QTree *tree,
+ gpointer key,
+ gpointer value)
+{
+ g_tree_insert(tree, key, value);
+}
+
+static inline void q_tree_replace(QTree *tree,
+ gpointer key,
+ gpointer value)
+{
+ g_tree_replace(tree, key, value);
+}
+
+static inline gboolean q_tree_remove(QTree *tree,
+ gconstpointer key)
+{
+ return g_tree_remove(tree, key);
+}
+
+static inline gboolean q_tree_steal(QTree *tree,
+ gconstpointer key)
+{
+ return g_tree_steal(tree, key);
+}
+
+static inline gpointer q_tree_lookup(QTree *tree,
+ gconstpointer key)
+{
+ return g_tree_lookup(tree, key);
+}
+
+static inline gboolean q_tree_lookup_extended(QTree *tree,
+ gconstpointer lookup_key,
+ gpointer *orig_key,
+ gpointer *value)
+{
+ return g_tree_lookup_extended(tree, lookup_key, orig_key, value);
+}
+
+static inline void q_tree_foreach(QTree *tree,
+ GTraverseFunc func,
+ gpointer user_data)
+{
+ return g_tree_foreach(tree, func, user_data);
+}
+
+static inline gpointer q_tree_search(QTree *tree,
+ GCompareFunc search_func,
+ gconstpointer user_data)
+{
+ return g_tree_search(tree, search_func, user_data);
+}
+
+static inline gint q_tree_height(QTree *tree)
+{
+ return g_tree_height(tree);
+}
+
+static inline gint q_tree_nnodes(QTree *tree)
+{
+ return g_tree_nnodes(tree);
+}
+
+#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */
+
+#endif /* QEMU_QTREE_H */
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index 1f8e219412..e029e7bf66 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -78,8 +78,6 @@
* For details on the use of these macros, see the queue(3) manual page.
*/
-#include "qemu/atomic.h" /* for smp_wmb() */
-
/*
* List definitions.
*/
@@ -144,8 +142,27 @@ struct { \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
+ (elm)->field.le_next = NULL; \
+ (elm)->field.le_prev = NULL; \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Like QLIST_REMOVE() but safe to call when elm is not in a list
+ */
+#define QLIST_SAFE_REMOVE(elm, field) do { \
+ if ((elm)->field.le_prev != NULL) { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ (elm)->field.le_next = NULL; \
+ (elm)->field.le_prev = NULL; \
+ } \
} while (/*CONSTCOND*/0)
+/* Is elm in a list? */
+#define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL)
+
#define QLIST_FOREACH(var, head, field) \
for ((var) = ((head)->lh_first); \
(var); \
@@ -201,21 +218,36 @@ struct { \
typeof(elm) save_sle_next; \
do { \
save_sle_next = (elm)->field.sle_next = (head)->slh_first; \
- } while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \
+ } while (qatomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) !=\
save_sle_next); \
} while (/*CONSTCOND*/0)
#define QSLIST_MOVE_ATOMIC(dest, src) do { \
- (dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \
+ (dest)->slh_first = qatomic_xchg(&(src)->slh_first, NULL); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_HEAD(head, field) do { \
- (head)->slh_first = (head)->slh_first->field.sle_next; \
+ typeof((head)->slh_first) elm = (head)->slh_first; \
+ (head)->slh_first = elm->field.sle_next; \
+ elm->field.sle_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
+ typeof(slistelm) next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = next->field.sle_next; \
+ next->field.sle_next = NULL; \
} while (/*CONSTCOND*/0)
-#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
- (slistelm)->field.sle_next = \
- QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \
+#define QSLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ QSLIST_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->slh_first; \
+ while (curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \
+ (elm)->field.sle_next = NULL; \
+ } \
} while (/*CONSTCOND*/0)
#define QSLIST_FOREACH(var, head, field) \
@@ -278,8 +310,10 @@ struct { \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \
- if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\
+ typeof((head)->sqh_first) elm = (head)->sqh_first; \
+ if (((head)->sqh_first = elm->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
+ elm->field.sqe_next = NULL; \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \
@@ -303,6 +337,7 @@ struct { \
if ((curelm->field.sqe_next = \
curelm->field.sqe_next->field.sqe_next) == NULL) \
(head)->sqh_last = &(curelm)->field.sqe_next; \
+ (elm)->field.sqe_next = NULL; \
} \
} while (/*CONSTCOND*/0)
@@ -341,7 +376,8 @@ struct { \
/*
* Simple queue access methods.
*/
-#define QSIMPLEQ_EMPTY_ATOMIC(head) (atomic_read(&((head)->sqh_first)) == NULL)
+#define QSIMPLEQ_EMPTY_ATOMIC(head) \
+ (qatomic_read(&((head)->sqh_first)) == NULL)
#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
@@ -420,8 +456,20 @@ union { \
(head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
(elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \
(elm)->field.tqe_circ.tql_prev = NULL; \
+ (elm)->field.tqe_circ.tql_next = NULL; \
+ (elm)->field.tqe_next = NULL; \
} while (/*CONSTCOND*/0)
+/* remove @left, @right and all elements in between from @head */
+#define QTAILQ_REMOVE_SEVERAL(head, left, right, field) do { \
+ if (((right)->field.tqe_next) != NULL) \
+ (right)->field.tqe_next->field.tqe_circ.tql_prev = \
+ (left)->field.tqe_circ.tql_prev; \
+ else \
+ (head)->tqh_circ.tql_prev = (left)->field.tqe_circ.tql_prev; \
+ (left)->field.tqe_circ.tql_prev->tql_next = (right)->field.tqe_next; \
+ } while (/*CONSTCOND*/0)
+
#define QTAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->tqh_first); \
(var); \
@@ -439,7 +487,7 @@ union { \
#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, field, prev_var) \
for ((var) = QTAILQ_LAST(head); \
- (var) && ((prev_var) = QTAILQ_PREV(var, field)); \
+ (var) && ((prev_var) = QTAILQ_PREV(var, field), 1); \
(var) = (prev_var))
/*
@@ -493,4 +541,36 @@ union { \
QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \
} while (/*CONSTCOND*/0)
+#define QLIST_RAW_FIRST(head) \
+ field_at_offset(head, 0, void *)
+
+#define QLIST_RAW_NEXT(elm, entry) \
+ field_at_offset(elm, entry, void *)
+
+#define QLIST_RAW_PREVIOUS(elm, entry) \
+ field_at_offset(elm, entry + sizeof(void *), void *)
+
+#define QLIST_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = *QLIST_RAW_FIRST(head); \
+ (elm); \
+ (elm) = *QLIST_RAW_NEXT(elm, entry))
+
+#define QLIST_RAW_INSERT_AFTER(head, prev, elem, entry) do { \
+ *QLIST_RAW_NEXT(prev, entry) = elem; \
+ *QLIST_RAW_PREVIOUS(elem, entry) = QLIST_RAW_NEXT(prev, entry); \
+ *QLIST_RAW_NEXT(elem, entry) = NULL; \
+} while (0)
+
+#define QLIST_RAW_INSERT_HEAD(head, elm, entry) do { \
+ void *first = *QLIST_RAW_FIRST(head); \
+ *QLIST_RAW_FIRST(head) = elm; \
+ *QLIST_RAW_PREVIOUS(elm, entry) = QLIST_RAW_FIRST(head); \
+ if (first) { \
+ *QLIST_RAW_NEXT(elm, entry) = first; \
+ *QLIST_RAW_PREVIOUS(first, entry) = QLIST_RAW_NEXT(elm, entry); \
+ } else { \
+ *QLIST_RAW_NEXT(elm, entry) = NULL; \
+ } \
+} while (0)
+
#endif /* QEMU_SYS_QUEUE_H */
diff --git a/include/qemu/range.h b/include/qemu/range.h
index ba606c6bc0..205e1da76d 100644
--- a/include/qemu/range.h
+++ b/include/qemu/range.h
@@ -3,26 +3,23 @@
*
* Copyright (c) 2015-2016 Red Hat, Inc.
*
- * This library is free software; you can redistribute it and/or
+ * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ * General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef QEMU_RANGE_H
#define QEMU_RANGE_H
-#include "qemu/queue.h"
-
/*
* Operations on 64 bit address ranges.
* Notes:
@@ -117,8 +114,8 @@ static inline uint64_t range_upb(Range *range)
* @size may be 0. If the range would overflow, returns -ERANGE, otherwise
* 0.
*/
-static inline int QEMU_WARN_UNUSED_RESULT range_init(Range *range, uint64_t lob,
- uint64_t size)
+G_GNUC_WARN_UNUSED_RESULT
+static inline int range_init(Range *range, uint64_t lob, uint64_t size)
{
if (lob + size < lob) {
return -ERANGE;
@@ -220,6 +217,20 @@ static inline int ranges_overlap(uint64_t first1, uint64_t len1,
return !(last2 < first1 || last1 < first2);
}
+/*
+ * Return -1 if @a < @b, 1 @a > @b, and 0 if they touch or overlap.
+ * Both @a and @b must not be empty.
+ */
+int range_compare(Range *a, Range *b);
+
GList *range_list_insert(GList *list, Range *data);
+/*
+ * Inverse an array of sorted ranges over the [low, high] span, ie.
+ * original ranges becomes holes in the newly allocated inv_ranges
+ */
+void range_inverse_array(GList *in_ranges,
+ GList **out_ranges,
+ uint64_t low, uint64_t high);
+
#endif
diff --git a/include/qemu/ratelimit.h b/include/qemu/ratelimit.h
index 1b38291823..48bf59e857 100644
--- a/include/qemu/ratelimit.h
+++ b/include/qemu/ratelimit.h
@@ -14,7 +14,11 @@
#ifndef QEMU_RATELIMIT_H
#define QEMU_RATELIMIT_H
+#include "qemu/lockable.h"
+#include "qemu/timer.h"
+
typedef struct {
+ QemuMutex lock;
int64_t slice_start_time;
int64_t slice_end_time;
uint64_t slice_quota;
@@ -38,7 +42,12 @@ static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
double delay_slices;
- assert(limit->slice_quota && limit->slice_ns);
+ QEMU_LOCK_GUARD(&limit->lock);
+ if (!limit->slice_quota) {
+ /* Throttling disabled. */
+ return 0;
+ }
+ assert(limit->slice_ns);
if (limit->slice_end_time < now) {
/* Previous, possibly extended, time slice finished; reset the
@@ -63,11 +72,26 @@ static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
return limit->slice_end_time - now;
}
+static inline void ratelimit_init(RateLimit *limit)
+{
+ qemu_mutex_init(&limit->lock);
+}
+
+static inline void ratelimit_destroy(RateLimit *limit)
+{
+ qemu_mutex_destroy(&limit->lock);
+}
+
static inline void ratelimit_set_speed(RateLimit *limit, uint64_t speed,
uint64_t slice_ns)
{
+ QEMU_LOCK_GUARD(&limit->lock);
limit->slice_ns = slice_ns;
- limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
+ if (speed == 0) {
+ limit->slice_quota = 0;
+ } else {
+ limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
+ }
}
#endif
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
index 22876d1428..fea058aa9f 100644
--- a/include/qemu/rcu.h
+++ b/include/qemu/rcu.h
@@ -27,11 +27,9 @@
#include "qemu/thread.h"
#include "qemu/queue.h"
#include "qemu/atomic.h"
+#include "qemu/notify.h"
#include "qemu/sys_membarrier.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+#include "qemu/coroutine-tls.h"
/*
* Important !
@@ -66,29 +64,39 @@ struct rcu_reader_data {
/* Data used for registry, protected by rcu_registry_lock */
QLIST_ENTRY(rcu_reader_data) node;
+
+ /*
+ * NotifierList used to force an RCU grace period. Accessed under
+ * rcu_registry_lock. Note that the notifier is called _outside_
+ * the thread!
+ */
+ NotifierList force_rcu;
};
-extern __thread struct rcu_reader_data rcu_reader;
+QEMU_DECLARE_CO_TLS(struct rcu_reader_data, rcu_reader)
static inline void rcu_read_lock(void)
{
- struct rcu_reader_data *p_rcu_reader = &rcu_reader;
+ struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
unsigned ctr;
if (p_rcu_reader->depth++ > 0) {
return;
}
- ctr = atomic_read(&rcu_gp_ctr);
- atomic_set(&p_rcu_reader->ctr, ctr);
+ ctr = qatomic_read(&rcu_gp_ctr);
+ qatomic_set(&p_rcu_reader->ctr, ctr);
- /* Write p_rcu_reader->ctr before reading RCU-protected pointers. */
+ /*
+ * Read rcu_gp_ptr and write p_rcu_reader->ctr before reading
+ * RCU-protected pointers.
+ */
smp_mb_placeholder();
}
static inline void rcu_read_unlock(void)
{
- struct rcu_reader_data *p_rcu_reader = &rcu_reader;
+ struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
assert(p_rcu_reader->depth != 0);
if (--p_rcu_reader->depth > 0) {
@@ -100,29 +108,29 @@ static inline void rcu_read_unlock(void)
* smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
* are sequentially consistent.
*/
- atomic_store_release(&p_rcu_reader->ctr, 0);
+ qatomic_store_release(&p_rcu_reader->ctr, 0);
/* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
smp_mb_placeholder();
- if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
- atomic_set(&p_rcu_reader->waiting, false);
+ if (unlikely(qatomic_read(&p_rcu_reader->waiting))) {
+ qatomic_set(&p_rcu_reader->waiting, false);
qemu_event_set(&rcu_gp_event);
}
}
-extern void synchronize_rcu(void);
+void synchronize_rcu(void);
/*
* Reader thread registration.
*/
-extern void rcu_register_thread(void);
-extern void rcu_unregister_thread(void);
+void rcu_register_thread(void);
+void rcu_unregister_thread(void);
/*
* Support for fork(). fork() support is enabled at startup.
*/
-extern void rcu_enable_atfork(void);
-extern void rcu_disable_atfork(void);
+void rcu_enable_atfork(void);
+void rcu_disable_atfork(void);
struct rcu_head;
typedef void RCUCBFunc(struct rcu_head *head);
@@ -132,7 +140,8 @@ struct rcu_head {
RCUCBFunc *func;
};
-extern void call_rcu1(struct rcu_head *head, RCUCBFunc *func);
+void call_rcu1(struct rcu_head *head, RCUCBFunc *func);
+void drain_call_rcu(void);
/* The operands of the minus operator must have the same type,
* which must be the one that we specify in the cast.
@@ -154,8 +163,36 @@ extern void call_rcu1(struct rcu_head *head, RCUCBFunc *func);
}), \
(RCUCBFunc *)g_free);
-#ifdef __cplusplus
+typedef void RCUReadAuto;
+static inline RCUReadAuto *rcu_read_auto_lock(void)
+{
+ rcu_read_lock();
+ /* Anything non-NULL causes the cleanup function to be called */
+ return (void *)(uintptr_t)0x1;
}
-#endif
+
+static inline void rcu_read_auto_unlock(RCUReadAuto *r)
+{
+ rcu_read_unlock();
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock)
+
+#define WITH_RCU_READ_LOCK_GUARD() \
+ WITH_RCU_READ_LOCK_GUARD_(glue(_rcu_read_auto, __COUNTER__))
+
+#define WITH_RCU_READ_LOCK_GUARD_(var) \
+ for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \
+ (var); rcu_read_auto_unlock(var), (var) = NULL)
+
+#define RCU_READ_LOCK_GUARD() \
+ g_autoptr(RCUReadAuto) _rcu_read_auto __attribute__((unused)) = rcu_read_auto_lock()
+
+/*
+ * Force-RCU notifiers tell readers that they should exit their
+ * read-side critical section.
+ */
+void rcu_add_force_rcu_notifier(Notifier *n);
+void rcu_remove_force_rcu_notifier(Notifier *n);
#endif /* QEMU_RCU_H */
diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
index 2d386f303e..4e6298d473 100644
--- a/include/qemu/rcu_queue.h
+++ b/include/qemu/rcu_queue.h
@@ -28,17 +28,12 @@
#include "qemu/queue.h"
#include "qemu/atomic.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
/*
* List access methods.
*/
-#define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL)
-#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
-#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
+#define QLIST_EMPTY_RCU(head) (qatomic_read(&(head)->lh_first) == NULL)
+#define QLIST_FIRST_RCU(head) (qatomic_rcu_read(&(head)->lh_first))
+#define QLIST_NEXT_RCU(elm, field) (qatomic_rcu_read(&(elm)->field.le_next))
/*
* List functions.
@@ -46,7 +41,7 @@ extern "C" {
/*
- * The difference between atomic_read/set and atomic_rcu_read/set
+ * The difference between qatomic_read/set and qatomic_rcu_read/set
* is in the including of a read/write memory barrier to the volatile
* access. atomic_rcu_* macros include the memory barrier, the
* plain atomic macros do not. Therefore, it should be correct to
@@ -66,7 +61,7 @@ extern "C" {
#define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
(elm)->field.le_next = (listelm)->field.le_next; \
(elm)->field.le_prev = &(listelm)->field.le_next; \
- atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.le_next, (elm)); \
if ((elm)->field.le_next != NULL) { \
(elm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
@@ -82,7 +77,7 @@ extern "C" {
#define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
- atomic_rcu_set((listelm)->field.le_prev, (elm)); \
+ qatomic_rcu_set((listelm)->field.le_prev, (elm)); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (/*CONSTCOND*/0)
@@ -95,7 +90,7 @@ extern "C" {
#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
(elm)->field.le_prev = &(head)->lh_first; \
(elm)->field.le_next = (head)->lh_first; \
- atomic_rcu_set((&(head)->lh_first), (elm)); \
+ qatomic_rcu_set((&(head)->lh_first), (elm)); \
if ((elm)->field.le_next != NULL) { \
(elm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
@@ -112,20 +107,20 @@ extern "C" {
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
} \
- atomic_set((elm)->field.le_prev, (elm)->field.le_next); \
+ qatomic_set((elm)->field.le_prev, (elm)->field.le_next); \
} while (/*CONSTCOND*/0)
/* List traversal must occur within an RCU critical section. */
#define QLIST_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->lh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->lh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.le_next))
+ (var) = qatomic_rcu_read(&(var)->field.le_next))
/* List traversal must occur within an RCU critical section. */
#define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
- for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
+ for ((var) = (qatomic_rcu_read(&(head)->lh_first)); \
(var) && \
- ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
+ ((next_var) = qatomic_rcu_read(&(var)->field.le_next), 1); \
(var) = (next_var))
/*
@@ -133,9 +128,10 @@ extern "C" {
*/
/* Simple queue access methods */
-#define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL)
-#define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first)
-#define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next)
+#define QSIMPLEQ_EMPTY_RCU(head) \
+ (qatomic_read(&(head)->sqh_first) == NULL)
+#define QSIMPLEQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->sqh_first)
+#define QSIMPLEQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sqe_next)
/* Simple queue functions */
#define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \
@@ -143,12 +139,12 @@ extern "C" {
if ((elm)->field.sqe_next == NULL) { \
(head)->sqh_last = &(elm)->field.sqe_next; \
} \
- atomic_rcu_set(&(head)->sqh_first, (elm)); \
+ qatomic_rcu_set(&(head)->sqh_first, (elm)); \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
- atomic_rcu_set((head)->sqh_last, (elm)); \
+ qatomic_rcu_set((head)->sqh_last, (elm)); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (/*CONSTCOND*/0)
@@ -157,11 +153,11 @@ extern "C" {
if ((elm)->field.sqe_next == NULL) { \
(head)->sqh_last = &(elm)->field.sqe_next; \
} \
- atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \
- atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \
+ qatomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next);\
if ((head)->sqh_first == NULL) { \
(head)->sqh_last = &(head)->sqh_first; \
} \
@@ -175,7 +171,7 @@ extern "C" {
while (curr->field.sqe_next != (elm)) { \
curr = curr->field.sqe_next; \
} \
- atomic_set(&curr->field.sqe_next, \
+ qatomic_set(&curr->field.sqe_next, \
curr->field.sqe_next->field.sqe_next); \
if (curr->field.sqe_next == NULL) { \
(head)->sqh_last = &(curr)->field.sqe_next; \
@@ -184,13 +180,13 @@ extern "C" {
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->sqh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->sqh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.sqe_next))
+ (var) = qatomic_rcu_read(&(var)->field.sqe_next))
#define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \
- for ((var) = atomic_rcu_read(&(head)->sqh_first); \
- (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \
+ for ((var) = qatomic_rcu_read(&(head)->sqh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.sqe_next), 1);\
(var) = (next))
/*
@@ -198,9 +194,9 @@ extern "C" {
*/
/* Tail queue access methods */
-#define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL)
-#define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first)
-#define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next)
+#define QTAILQ_EMPTY_RCU(head) (qatomic_read(&(head)->tqh_first) == NULL)
+#define QTAILQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->tqh_first)
+#define QTAILQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.tqe_next)
/* Tail queue functions */
#define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \
@@ -211,14 +207,14 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} \
- atomic_rcu_set(&(head)->tqh_first, (elm)); \
+ qatomic_rcu_set(&(head)->tqh_first, (elm)); \
(elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
- atomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
+ qatomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
@@ -230,14 +226,14 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} \
- atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
(elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \
(elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
(elm)->field.tqe_next = (listelm); \
- atomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm));\
(listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
@@ -248,21 +244,66 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
} \
- atomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, (elm)->field.tqe_next); \
+ qatomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, \
+ (elm)->field.tqe_next); \
(elm)->field.tqe_circ.tql_prev = NULL; \
} while (/*CONSTCOND*/0)
#define QTAILQ_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->tqh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->tqh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.tqe_next))
+ (var) = qatomic_rcu_read(&(var)->field.tqe_next))
#define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \
- for ((var) = atomic_rcu_read(&(head)->tqh_first); \
- (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \
+ for ((var) = qatomic_rcu_read(&(head)->tqh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.tqe_next), 1);\
+ (var) = (next))
+
+/*
+ * RCU singly-linked list
+ */
+
+/* Singly-linked list access methods */
+#define QSLIST_EMPTY_RCU(head) (qatomic_read(&(head)->slh_first) == NULL)
+#define QSLIST_FIRST_RCU(head) qatomic_rcu_read(&(head)->slh_first)
+#define QSLIST_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sle_next)
+
+/* Singly-linked list functions */
+#define QSLIST_INSERT_HEAD_RCU(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ qatomic_rcu_set(&(head)->slh_first, (elm)); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
+ (elm)->field.sle_next = (listelm)->field.sle_next; \
+ qatomic_rcu_set(&(listelm)->field.sle_next, (elm)); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_HEAD_RCU(head, field) do { \
+ qatomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next);\
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_RCU(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ QSLIST_REMOVE_HEAD_RCU((head), field); \
+ } else { \
+ struct type *curr = (head)->slh_first; \
+ while (curr->field.sle_next != (elm)) { \
+ curr = curr->field.sle_next; \
+ } \
+ qatomic_set(&curr->field.sle_next, \
+ curr->field.sle_next->field.sle_next); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_FOREACH_RCU(var, head, field) \
+ for ((var) = qatomic_rcu_read(&(head)->slh_first); \
+ (var); \
+ (var) = qatomic_rcu_read(&(var)->field.sle_next))
+
+#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \
+ for ((var) = qatomic_rcu_read(&(head)->slh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.sle_next), 1); \
(var) = (next))
-#ifdef __cplusplus
-}
-#endif
#endif /* QEMU_RCU_QUEUE_H */
diff --git a/include/qemu/readline.h b/include/qemu/readline.h
index e81258322b..b05e4782da 100644
--- a/include/qemu/readline.h
+++ b/include/qemu/readline.h
@@ -5,7 +5,7 @@
#define READLINE_MAX_CMDS 64
#define READLINE_MAX_COMPLETIONS 256
-typedef void GCC_FMT_ATTR(2, 3) ReadLinePrintfFunc(void *opaque,
+typedef void G_GNUC_PRINTF(2, 3) ReadLinePrintfFunc(void *opaque,
const char *fmt, ...);
typedef void ReadLineFlushFunc(void *opaque);
typedef void ReadLineFunc(void *opaque, const char *str,
@@ -44,6 +44,8 @@ typedef struct ReadLineState {
} ReadLineState;
void readline_add_completion(ReadLineState *rs, const char *str);
+void readline_add_completion_of(ReadLineState *rs,
+ const char *pfx, const char *str);
void readline_set_completion_index(ReadLineState *rs, int completion_index);
const char *readline_get_history(ReadLineState *rs, unsigned int index);
diff --git a/include/qemu/reserved-region.h b/include/qemu/reserved-region.h
new file mode 100644
index 0000000000..8e6f0a97e2
--- /dev/null
+++ b/include/qemu/reserved-region.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU ReservedRegion helpers
+ *
+ * Copyright (c) 2023 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_RESERVED_REGION_H
+#define QEMU_RESERVED_REGION_H
+
+#include "exec/memory.h"
+
+/*
+ * Insert a new region into a sorted list of reserved regions. In case
+ * there is overlap with existing regions, the new added region has
+ * higher priority and replaces the overlapped segment.
+ */
+GList *resv_region_list_insert(GList *list, ReservedRegion *reg);
+
+#endif
diff --git a/include/qemu/selfmap.h b/include/qemu/selfmap.h
new file mode 100644
index 0000000000..1690a74f4b
--- /dev/null
+++ b/include/qemu/selfmap.h
@@ -0,0 +1,44 @@
+/*
+ * Utility functions to read our own memory map
+ *
+ * Copyright (c) 2020 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SELFMAP_H
+#define SELFMAP_H
+
+#include "qemu/interval-tree.h"
+
+typedef struct {
+ IntervalTreeNode itree;
+
+ /* flags */
+ bool is_read;
+ bool is_write;
+ bool is_exec;
+ bool is_priv;
+
+ dev_t dev;
+ ino_t inode;
+ uint64_t offset;
+ const char *path;
+} MapInfo;
+
+/**
+ * read_self_maps:
+ *
+ * Read /proc/self/maps and return a tree of MapInfo structures.
+ */
+IntervalTreeRoot *read_self_maps(void);
+
+/**
+ * free_self_maps:
+ * @info: an interval tree
+ *
+ * Free a tree of MapInfo structures.
+ */
+void free_self_maps(IntervalTreeRoot *root);
+
+#endif /* SELFMAP_H */
diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h
index fd408b7ec5..ecb7d2c864 100644
--- a/include/qemu/seqlock.h
+++ b/include/qemu/seqlock.h
@@ -32,7 +32,7 @@ static inline void seqlock_init(QemuSeqLock *sl)
/* Lock out other writers and update the count. */
static inline void seqlock_write_begin(QemuSeqLock *sl)
{
- atomic_set(&sl->sequence, sl->sequence + 1);
+ qatomic_set(&sl->sequence, sl->sequence + 1);
/* Write sequence before updating other fields. */
smp_wmb();
@@ -43,7 +43,7 @@ static inline void seqlock_write_end(QemuSeqLock *sl)
/* Write other fields before finalizing sequence. */
smp_wmb();
- atomic_set(&sl->sequence, sl->sequence + 1);
+ qatomic_set(&sl->sequence, sl->sequence + 1);
}
/* Lock out other writers and update the count. */
@@ -55,11 +55,11 @@ static inline void seqlock_write_lock_impl(QemuSeqLock *sl, QemuLockable *lock)
#define seqlock_write_lock(sl, lock) \
seqlock_write_lock_impl(sl, QEMU_MAKE_LOCKABLE(lock))
-/* Lock out other writers and update the count. */
+/* Update the count and release the lock. */
static inline void seqlock_write_unlock_impl(QemuSeqLock *sl, QemuLockable *lock)
{
+ seqlock_write_end(sl);
qemu_lockable_unlock(lock);
- seqlock_write_begin(sl);
}
#define seqlock_write_unlock(sl, lock) \
seqlock_write_unlock_impl(sl, QEMU_MAKE_LOCKABLE(lock))
@@ -68,7 +68,7 @@ static inline void seqlock_write_unlock_impl(QemuSeqLock *sl, QemuLockable *lock
static inline unsigned seqlock_read_begin(const QemuSeqLock *sl)
{
/* Always fail if a write is in progress. */
- unsigned ret = atomic_read(&sl->sequence);
+ unsigned ret = qatomic_read(&sl->sequence);
/* Read sequence before reading other fields. */
smp_rmb();
@@ -79,7 +79,7 @@ static inline int seqlock_read_retry(const QemuSeqLock *sl, unsigned start)
{
/* Read other fields before reading final sequence. */
smp_rmb();
- return unlikely(atomic_read(&sl->sequence) != start);
+ return unlikely(qatomic_read(&sl->sequence) != start);
}
#endif
diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h
index 8140fea685..d935fd80da 100644
--- a/include/qemu/sockets.h
+++ b/include/qemu/sockets.h
@@ -14,11 +14,41 @@ int inet_aton(const char *cp, struct in_addr *ia);
/* misc helpers */
bool fd_is_socket(int fd);
int qemu_socket(int domain, int type, int protocol);
+
+/**
+ * qemu_socketpair:
+ * @domain: specifies a communication domain, such as PF_UNIX
+ * @type: specifies the socket type.
+ * @protocol: specifies a particular protocol to be used with the socket
+ * @sv: an array to store the pair of socket created
+ *
+ * Creates an unnamed pair of connected sockets in the specified domain,
+ * of the specified type, and using the optionally specified protocol.
+ * And automatically set the close-on-exec flags on the returned sockets
+ *
+ * Return 0 on success.
+ */
+int qemu_socketpair(int domain, int type, int protocol, int sv[2]);
+
int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen);
+/*
+ * A variant of send(2) which handles partial send.
+ *
+ * Return the number of bytes transferred over the socket.
+ * Set errno if fewer than `count' bytes are sent.
+ *
+ * This function don't work with non-blocking socket's.
+ * Any of the possibilities with non-blocking socket's is bad:
+ * - return a short write (then name is wrong)
+ * - busy wait adding (errno == EAGAIN) to the loop
+ */
+ssize_t qemu_send_full(int s, const void *buf, size_t count)
+ G_GNUC_WARN_UNUSED_RESULT;
int socket_set_cork(int fd, int v);
int socket_set_nodelay(int fd);
-void qemu_set_block(int fd);
-void qemu_set_nonblock(int fd);
+void qemu_socket_set_block(int fd);
+int qemu_socket_try_set_nonblock(int fd);
+void qemu_socket_set_nonblock(int fd);
int socket_set_fast_reuse(int fd);
#ifdef WIN32
@@ -39,13 +69,16 @@ NetworkAddressFamily inet_netfamily(int family);
int unix_listen(const char *path, Error **errp);
int unix_connect(const char *path, Error **errp);
+char *socket_uri(SocketAddress *addr);
SocketAddress *socket_parse(const char *str, Error **errp);
int socket_connect(SocketAddress *addr, Error **errp);
-int socket_listen(SocketAddress *addr, Error **errp);
+int socket_listen(SocketAddress *addr, int num, Error **errp);
void socket_listen_cleanup(int fd, Error **errp);
int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp);
/* Old, ipv4 only bits. Don't use for new code. */
+int convert_host_port(struct sockaddr_in *saddr, const char *host,
+ const char *port, Error **errp);
int parse_host_port(struct sockaddr_in *saddr, const char *str,
Error **errp);
int socket_init(void);
@@ -110,4 +143,14 @@ SocketAddress *socket_remote_address(int fd, Error **errp);
*/
SocketAddress *socket_address_flatten(SocketAddressLegacy *addr);
+/**
+ * socket_address_parse_named_fd:
+ *
+ * Modify @addr, replacing a named fd by its corresponding number.
+ * Needed for callers that plan to pass @addr to a context where the
+ * current monitor is not available.
+ *
+ * Return 0 on success.
+ */
+int socket_address_parse_named_fd(SocketAddress *addr, Error **errp);
#endif /* QEMU_SOCKETS_H */
diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h
index 4a357b3e9d..99b5cb724a 100644
--- a/include/qemu/stats64.h
+++ b/include/qemu/stats64.h
@@ -10,7 +10,7 @@
*/
#ifndef QEMU_STATS64_H
-#define QEMU_STATS64_H 1
+#define QEMU_STATS64_H
#include "qemu/atomic.h"
@@ -21,7 +21,7 @@
typedef struct Stat64 {
#ifdef CONFIG_ATOMIC64
- uint64_t value;
+ aligned_uint64_t value;
#else
uint32_t low, high;
uint32_t lock;
@@ -37,31 +37,37 @@ static inline void stat64_init(Stat64 *s, uint64_t value)
static inline uint64_t stat64_get(const Stat64 *s)
{
- return atomic_read__nocheck(&s->value);
+ return qatomic_read__nocheck(&s->value);
+}
+
+static inline void stat64_set(Stat64 *s, uint64_t value)
+{
+ qatomic_set__nocheck(&s->value, value);
}
static inline void stat64_add(Stat64 *s, uint64_t value)
{
- atomic_add(&s->value, value);
+ qatomic_add(&s->value, value);
}
static inline void stat64_min(Stat64 *s, uint64_t value)
{
- uint64_t orig = atomic_read__nocheck(&s->value);
+ uint64_t orig = qatomic_read__nocheck(&s->value);
while (orig > value) {
- orig = atomic_cmpxchg__nocheck(&s->value, orig, value);
+ orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
}
}
static inline void stat64_max(Stat64 *s, uint64_t value)
{
- uint64_t orig = atomic_read__nocheck(&s->value);
+ uint64_t orig = qatomic_read__nocheck(&s->value);
while (orig < value) {
- orig = atomic_cmpxchg__nocheck(&s->value, orig, value);
+ orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
}
}
#else
uint64_t stat64_get(const Stat64 *s);
+void stat64_set(Stat64 *s, uint64_t value);
bool stat64_min_slow(Stat64 *s, uint64_t value);
bool stat64_max_slow(Stat64 *s, uint64_t value);
bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high);
@@ -79,7 +85,7 @@ static inline void stat64_add(Stat64 *s, uint64_t value)
low = (uint32_t) value;
if (!low) {
if (high) {
- atomic_add(&s->high, high);
+ qatomic_add(&s->high, high);
}
return;
}
@@ -101,7 +107,7 @@ static inline void stat64_add(Stat64 *s, uint64_t value)
* the high 32 bits, so it can race just fine with stat64_add32_carry
* and even stat64_get!
*/
- old = atomic_cmpxchg(&s->low, orig, result);
+ old = qatomic_cmpxchg(&s->low, orig, result);
if (orig == old) {
return;
}
@@ -116,7 +122,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
high = value >> 32;
low = (uint32_t) value;
do {
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high < high) {
return;
}
@@ -128,7 +134,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
* the write barrier in stat64_min_slow.
*/
smp_rmb();
- orig_low = atomic_read(&s->low);
+ orig_low = qatomic_read(&s->low);
if (orig_low <= low) {
return;
}
@@ -138,7 +144,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
* we may miss being lucky.
*/
smp_rmb();
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high < high) {
return;
}
@@ -156,7 +162,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
high = value >> 32;
low = (uint32_t) value;
do {
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high > high) {
return;
}
@@ -168,7 +174,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
* the write barrier in stat64_max_slow.
*/
smp_rmb();
- orig_low = atomic_read(&s->low);
+ orig_low = qatomic_read(&s->low);
if (orig_low >= low) {
return;
}
@@ -178,7 +184,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
* we may miss being lucky.
*/
smp_rmb();
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high > high) {
return;
}
diff --git a/include/qemu/sys_membarrier.h b/include/qemu/sys_membarrier.h
index 316e3dc4a2..e7774891f8 100644
--- a/include/qemu/sys_membarrier.h
+++ b/include/qemu/sys_membarrier.h
@@ -7,15 +7,15 @@
*/
#ifndef QEMU_SYS_MEMBARRIER_H
-#define QEMU_SYS_MEMBARRIER_H 1
+#define QEMU_SYS_MEMBARRIER_H
#ifdef CONFIG_MEMBARRIER
/* Only block reordering at the compiler level in the performance-critical
* side. The slow side forces processor-level ordering on all other cores
* through a system call.
*/
-extern void smp_mb_global_init(void);
-extern void smp_mb_global(void);
+void smp_mb_global_init(void);
+void smp_mb_global(void);
#define smp_mb_placeholder() barrier()
#else
/* Keep it simple, execute a real memory barrier on both sides. */
diff --git a/include/qemu/systemd.h b/include/qemu/systemd.h
index e6a877e5c6..f0ea1266d5 100644
--- a/include/qemu/systemd.h
+++ b/include/qemu/systemd.h
@@ -11,7 +11,7 @@
*/
#ifndef QEMU_SYSTEMD_H
-#define QEMU_SYSTEMD_H 1
+#define QEMU_SYSTEMD_H
#define FIRST_SOCKET_ACTIVATION_FD 3 /* defined by systemd ABI */
diff --git a/include/qemu/thread-context.h b/include/qemu/thread-context.h
new file mode 100644
index 0000000000..2ebd6b7fe1
--- /dev/null
+++ b/include/qemu/thread-context.h
@@ -0,0 +1,57 @@
+/*
+ * QEMU Thread Context
+ *
+ * Copyright Red Hat Inc., 2022
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SYSEMU_THREAD_CONTEXT_H
+#define SYSEMU_THREAD_CONTEXT_H
+
+#include "qapi/qapi-types-machine.h"
+#include "qemu/thread.h"
+#include "qom/object.h"
+
+#define TYPE_THREAD_CONTEXT "thread-context"
+OBJECT_DECLARE_TYPE(ThreadContext, ThreadContextClass,
+ THREAD_CONTEXT)
+
+struct ThreadContextClass {
+ ObjectClass parent_class;
+};
+
+struct ThreadContext {
+ /* private */
+ Object parent;
+
+ /* private */
+ unsigned int thread_id;
+ QemuThread thread;
+
+ /* Semaphore to wait for context thread action. */
+ QemuSemaphore sem;
+ /* Semaphore to wait for action in context thread. */
+ QemuSemaphore sem_thread;
+ /* Mutex to synchronize requests. */
+ QemuMutex mutex;
+
+ /* Commands for the thread to execute. */
+ int thread_cmd;
+ void *thread_cmd_data;
+
+ /* CPU affinity bitmap used for initialization. */
+ unsigned long *init_cpu_bitmap;
+ int init_cpu_nbits;
+};
+
+void thread_context_create_thread(ThreadContext *tc, QemuThread *thread,
+ const char *name,
+ void *(*start_routine)(void *), void *arg,
+ int mode);
+
+#endif /* SYSEMU_THREAD_CONTEXT_H */
diff --git a/include/qemu/thread-posix.h b/include/qemu/thread-posix.h
index c903525062..5f2f3d1386 100644
--- a/include/qemu/thread-posix.h
+++ b/include/qemu/thread-posix.h
@@ -4,12 +4,6 @@
#include <pthread.h>
#include <semaphore.h>
-typedef QemuMutex QemuRecMutex;
-#define qemu_rec_mutex_destroy qemu_mutex_destroy
-#define qemu_rec_mutex_lock_impl qemu_mutex_lock_impl
-#define qemu_rec_mutex_trylock_impl qemu_mutex_trylock_impl
-#define qemu_rec_mutex_unlock qemu_mutex_unlock
-
struct QemuMutex {
pthread_mutex_t lock;
#ifdef CONFIG_DEBUG_MUTEX
@@ -19,20 +13,23 @@ struct QemuMutex {
bool initialized;
};
+/*
+ * QemuRecMutex cannot be a typedef of QemuMutex lest we have two
+ * compatible cases in _Generic. See qemu/lockable.h.
+ */
+typedef struct QemuRecMutex {
+ QemuMutex m;
+} QemuRecMutex;
+
struct QemuCond {
pthread_cond_t cond;
bool initialized;
};
struct QemuSemaphore {
-#ifndef CONFIG_SEM_TIMEDWAIT
- pthread_mutex_t lock;
- pthread_cond_t cond;
+ QemuMutex mutex;
+ QemuCond cond;
unsigned int count;
-#else
- sem_t sem;
-#endif
- bool initialized;
};
struct QemuEvent {
diff --git a/include/qemu/thread-win32.h b/include/qemu/thread-win32.h
index 50af5dd7ab..d95af4498f 100644
--- a/include/qemu/thread-win32.h
+++ b/include/qemu/thread-win32.h
@@ -18,12 +18,6 @@ struct QemuRecMutex {
bool initialized;
};
-void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
-void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
-int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file,
- int line);
-void qemu_rec_mutex_unlock(QemuRecMutex *mutex);
-
struct QemuCond {
CONDITION_VARIABLE var;
bool initialized;
@@ -47,6 +41,6 @@ struct QemuThread {
};
/* Only valid for joinable threads. */
-HANDLE qemu_thread_get_handle(QemuThread *thread);
+HANDLE qemu_thread_get_handle(struct QemuThread *thread);
#endif
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index 55d83a907c..fb74e21c08 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -3,6 +3,7 @@
#include "qemu/processor.h"
#include "qemu/atomic.h"
+#include "qemu/clang-tsa.h"
typedef struct QemuCond QemuCond;
typedef struct QemuSemaphore QemuSemaphore;
@@ -24,9 +25,18 @@ typedef struct QemuThread QemuThread;
void qemu_mutex_init(QemuMutex *mutex);
void qemu_mutex_destroy(QemuMutex *mutex);
-int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
-void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
-void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
+int TSA_NO_TSA qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file,
+ const int line);
+void TSA_NO_TSA qemu_mutex_lock_impl(QemuMutex *mutex, const char *file,
+ const int line);
+void TSA_NO_TSA qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file,
+ const int line);
+
+void qemu_rec_mutex_init(QemuRecMutex *mutex);
+void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
+void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
+int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
+void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
@@ -34,13 +44,16 @@ typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
int l);
+typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
+ const char *f, int l);
-extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
+extern QemuMutexLockFunc bql_mutex_lock_func;
extern QemuMutexLockFunc qemu_mutex_lock_func;
extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
extern QemuCondWaitFunc qemu_cond_wait_func;
+extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
/* convenience macros to bypass the profiler */
#define qemu_mutex_lock__raw(m) \
@@ -54,46 +67,56 @@ extern QemuCondWaitFunc qemu_cond_wait_func;
* hide them.
*/
#define qemu_mutex_lock(m) \
- qemu_mutex_lock_impl(m, __FILE__, __LINE__);
+ qemu_mutex_lock_impl(m, __FILE__, __LINE__)
#define qemu_mutex_trylock(m) \
- qemu_mutex_trylock_impl(m, __FILE__, __LINE__);
+ qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
#define qemu_rec_mutex_lock(m) \
- qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__);
+ qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
#define qemu_rec_mutex_trylock(m) \
- qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__);
+ qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
#define qemu_cond_wait(c, m) \
- qemu_cond_wait_impl(c, m, __FILE__, __LINE__);
+ qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
+#define qemu_cond_timedwait(c, m, ms) \
+ qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
#else
#define qemu_mutex_lock(m) ({ \
- QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \
+ QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
_f(m, __FILE__, __LINE__); \
})
-#define qemu_mutex_trylock(m) ({ \
- QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \
- _f(m, __FILE__, __LINE__); \
+#define qemu_mutex_trylock(m) ({ \
+ QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
+ _f(m, __FILE__, __LINE__); \
})
-#define qemu_rec_mutex_lock(m) ({ \
- QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \
- _f(m, __FILE__, __LINE__); \
+#define qemu_rec_mutex_lock(m) ({ \
+ QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
+ _f(m, __FILE__, __LINE__); \
})
#define qemu_rec_mutex_trylock(m) ({ \
QemuRecMutexTrylockFunc _f; \
- _f = atomic_read(&qemu_rec_mutex_trylock_func); \
+ _f = qatomic_read(&qemu_rec_mutex_trylock_func); \
_f(m, __FILE__, __LINE__); \
})
#define qemu_cond_wait(c, m) ({ \
- QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \
+ QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \
_f(c, m, __FILE__, __LINE__); \
})
+
+#define qemu_cond_timedwait(c, m, ms) ({ \
+ QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
+ _f(c, m, ms, __FILE__, __LINE__); \
+ })
#endif
#define qemu_mutex_unlock(mutex) \
qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
+#define qemu_rec_mutex_unlock(mutex) \
+ qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
+
static inline void (qemu_mutex_lock)(QemuMutex *mutex)
{
qemu_mutex_lock(mutex);
@@ -119,8 +142,10 @@ static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
return qemu_rec_mutex_trylock(mutex);
}
-/* Prototypes for other functions are in thread-posix.h/thread-win32.h. */
-void qemu_rec_mutex_init(QemuRecMutex *mutex);
+static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
+{
+ qemu_rec_mutex_unlock(mutex);
+}
void qemu_cond_init(QemuCond *cond);
void qemu_cond_destroy(QemuCond *cond);
@@ -132,14 +157,23 @@ void qemu_cond_destroy(QemuCond *cond);
*/
void qemu_cond_signal(QemuCond *cond);
void qemu_cond_broadcast(QemuCond *cond);
-void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
- const char *file, const int line);
+void TSA_NO_TSA qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
+ const char *file, const int line);
+bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
+ const char *file, const int line);
static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
{
qemu_cond_wait(cond, mutex);
}
+/* Returns true if timeout has not expired, and false otherwise */
+static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
+ int ms)
+{
+ return qemu_cond_timedwait(cond, mutex, ms);
+}
+
void qemu_sem_init(QemuSemaphore *sem, int init);
void qemu_sem_post(QemuSemaphore *sem);
void qemu_sem_wait(QemuSemaphore *sem);
@@ -155,10 +189,14 @@ void qemu_event_destroy(QemuEvent *ev);
void qemu_thread_create(QemuThread *thread, const char *name,
void *(*start_routine)(void *),
void *arg, int mode);
+int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus,
+ unsigned long nbits);
+int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus,
+ unsigned long *nbits);
void *qemu_thread_join(QemuThread *thread);
void qemu_thread_get_self(QemuThread *thread);
bool qemu_thread_is_self(QemuThread *thread);
-void qemu_thread_exit(void *retval);
+G_NORETURN void qemu_thread_exit(void *retval);
void qemu_thread_naming(bool enable);
struct Notifier;
@@ -187,37 +225,72 @@ void qemu_thread_atexit_add(struct Notifier *notifier);
*/
void qemu_thread_atexit_remove(struct Notifier *notifier);
+#ifdef CONFIG_TSAN
+#include <sanitizer/tsan_interface.h>
+#endif
+
struct QemuSpin {
int value;
};
static inline void qemu_spin_init(QemuSpin *spin)
{
- __sync_lock_release(&spin->value);
+ qatomic_set(&spin->value, 0);
+#ifdef CONFIG_TSAN
+ __tsan_mutex_create(spin, __tsan_mutex_not_static);
+#endif
+}
+
+static inline void qemu_spin_destroy(QemuSpin *spin)
+{
+#ifdef CONFIG_TSAN
+ __tsan_mutex_destroy(spin, __tsan_mutex_not_static);
+#endif
}
static inline void qemu_spin_lock(QemuSpin *spin)
{
- while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
- while (atomic_read(&spin->value)) {
+#ifdef CONFIG_TSAN
+ __tsan_mutex_pre_lock(spin, 0);
+#endif
+ while (unlikely(qatomic_xchg(&spin->value, 1))) {
+ while (qatomic_read(&spin->value)) {
cpu_relax();
}
}
+#ifdef CONFIG_TSAN
+ __tsan_mutex_post_lock(spin, 0, 0);
+#endif
}
static inline bool qemu_spin_trylock(QemuSpin *spin)
{
- return __sync_lock_test_and_set(&spin->value, true);
+#ifdef CONFIG_TSAN
+ __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
+#endif
+ bool busy = qatomic_xchg(&spin->value, true);
+#ifdef CONFIG_TSAN
+ unsigned flags = __tsan_mutex_try_lock;
+ flags |= busy ? __tsan_mutex_try_lock_failed : 0;
+ __tsan_mutex_post_lock(spin, flags, 0);
+#endif
+ return busy;
}
static inline bool qemu_spin_locked(QemuSpin *spin)
{
- return atomic_read(&spin->value);
+ return qatomic_read(&spin->value);
}
static inline void qemu_spin_unlock(QemuSpin *spin)
{
- __sync_lock_release(&spin->value);
+#ifdef CONFIG_TSAN
+ __tsan_mutex_pre_unlock(spin, 0);
+#endif
+ qatomic_store_release(&spin->value, 0);
+#ifdef CONFIG_TSAN
+ __tsan_mutex_post_unlock(spin, 0);
+#endif
}
struct QemuLockCnt {
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
index abeb886d93..181245d29b 100644
--- a/include/qemu/throttle.h
+++ b/include/qemu/throttle.h
@@ -25,7 +25,6 @@
#ifndef THROTTLE_H
#define THROTTLE_H
-#include "qemu-common.h"
#include "qapi/qapi-types-block-core.h"
#include "qemu/timer.h"
@@ -100,13 +99,18 @@ typedef struct ThrottleState {
int64_t previous_leak; /* timestamp of the last leak done */
} ThrottleState;
+typedef enum {
+ THROTTLE_READ = 0,
+ THROTTLE_WRITE,
+ THROTTLE_MAX
+} ThrottleDirection;
+
typedef struct ThrottleTimers {
- QEMUTimer *timers[2]; /* timers used to do the throttling */
+ QEMUTimer *timers[THROTTLE_MAX]; /* timers used to do the throttling */
QEMUClockType clock_type; /* the clock used */
/* Callbacks */
- QEMUTimerCB *read_timer_cb;
- QEMUTimerCB *write_timer_cb;
+ QEMUTimerCB *timer_cb[THROTTLE_MAX];
void *timer_opaque;
} ThrottleTimers;
@@ -150,9 +154,10 @@ void throttle_config_init(ThrottleConfig *cfg);
/* usage */
bool throttle_schedule_timer(ThrottleState *ts,
ThrottleTimers *tt,
- bool is_write);
+ ThrottleDirection direction);
-void throttle_account(ThrottleState *ts, bool is_write, uint64_t size);
+void throttle_account(ThrottleState *ts, ThrottleDirection direction,
+ uint64_t size);
void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg,
Error **errp);
void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var);
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index a86330c987..9a366e551f 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -1,7 +1,6 @@
#ifndef QEMU_TIMER_H
#define QEMU_TIMER_H
-#include "qemu-common.h"
#include "qemu/bitops.h"
#include "qemu/notify.h"
#include "qemu/host-utils.h"
@@ -63,13 +62,15 @@ typedef enum {
* The following attributes are available:
*
* QEMU_TIMER_ATTR_EXTERNAL: drives external subsystem
+ * QEMU_TIMER_ATTR_ALL: mask for all existing attributes
*
* Timers with this attribute do not recorded in rr mode, therefore it could be
* used for the subsystems that operate outside the guest core. Applicable only
* with virtual clock type.
*/
-#define QEMU_TIMER_ATTR_EXTERNAL BIT(0)
+#define QEMU_TIMER_ATTR_EXTERNAL ((int)BIT(0))
+#define QEMU_TIMER_ATTR_ALL 0xffffffff
typedef struct QEMUTimerList QEMUTimerList;
@@ -165,8 +166,8 @@ bool qemu_clock_expired(QEMUClockType type);
*
* Determine whether a clock should be used for deadline
* calculations. Some clocks, for instance vm_clock with
- * use_icount set, do not count in nanoseconds. Such clocks
- * are not used for deadline calculations, and are presumed
+ * icount_enabled() set, do not count in nanoseconds.
+ * Such clocks are not used for deadline calculations, and are presumed
* to interrupt any poll using qemu_notify/aio_notify
* etc.
*
@@ -178,6 +179,8 @@ bool qemu_clock_use_for_deadline(QEMUClockType type);
/**
* qemu_clock_deadline_ns_all:
* @type: the clock type
+ * @attr_mask: mask for the timer attributes that are included
+ * in deadline calculation
*
* Calculate the deadline across all timer lists associated
* with a clock (as opposed to just the default one)
@@ -185,7 +188,7 @@ bool qemu_clock_use_for_deadline(QEMUClockType type);
*
* Returns: time until expiry in nanoseconds or -1
*/
-int64_t qemu_clock_deadline_ns_all(QEMUClockType type);
+int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask);
/**
* qemu_clock_get_main_loop_timerlist:
@@ -222,35 +225,6 @@ void qemu_clock_notify(QEMUClockType type);
void qemu_clock_enable(QEMUClockType type, bool enabled);
/**
- * qemu_start_warp_timer:
- *
- * Starts a timer for virtual clock update
- */
-void qemu_start_warp_timer(void);
-
-/**
- * qemu_clock_register_reset_notifier:
- * @type: the clock type
- * @notifier: the notifier function
- *
- * Register a notifier function to call when the clock
- * concerned is reset.
- */
-void qemu_clock_register_reset_notifier(QEMUClockType type,
- Notifier *notifier);
-
-/**
- * qemu_clock_unregister_reset_notifier:
- * @type: the clock type
- * @notifier: the notifier function
- *
- * Unregister a notifier function to call when the clock
- * concerned is reset.
- */
-void qemu_clock_unregister_reset_notifier(QEMUClockType type,
- Notifier *notifier);
-
-/**
* qemu_clock_run_timers:
* @type: clock on which to operate
*
@@ -271,19 +245,6 @@ bool qemu_clock_run_timers(QEMUClockType type);
*/
bool qemu_clock_run_all_timers(void);
-/**
- * qemu_clock_get_last:
- *
- * Returns last clock query time.
- */
-uint64_t qemu_clock_get_last(QEMUClockType type);
-/**
- * qemu_clock_set_last:
- *
- * Sets last clock query time.
- */
-void qemu_clock_set_last(QEMUClockType type, uint64_t last);
-
/*
* QEMUTimerList
@@ -559,7 +520,7 @@ static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group,
int scale, int attributes,
QEMUTimerCB *cb, void *opaque)
{
- QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer));
+ QEMUTimer *ts = g_new0(QEMUTimer, 1);
timer_init_full(ts, timer_list_group, type, scale, attributes, cb, opaque);
return ts;
}
@@ -649,17 +610,6 @@ static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb,
void timer_deinit(QEMUTimer *ts);
/**
- * timer_free:
- * @ts: the timer
- *
- * Free a timer (it must not be on the active list)
- */
-static inline void timer_free(QEMUTimer *ts)
-{
- g_free(ts);
-}
-
-/**
* timer_del:
* @ts: the timer
*
@@ -671,6 +621,21 @@ static inline void timer_free(QEMUTimer *ts)
void timer_del(QEMUTimer *ts);
/**
+ * timer_free:
+ * @ts: the timer
+ *
+ * Free a timer. This will call timer_del() for you to remove
+ * the timer from the active list if it was still active.
+ */
+static inline void timer_free(QEMUTimer *ts)
+{
+ if (ts) {
+ timer_del(ts);
+ g_free(ts);
+ }
+}
+
+/**
* timer_mod_ns:
* @ts: the timer
* @expire_time: the expiry time in nanoseconds
@@ -711,7 +676,7 @@ void timer_mod(QEMUTimer *ts, int64_t expire_timer);
/**
* timer_mod_anticipate:
* @ts: the timer
- * @expire_time: the expiry time in nanoseconds
+ * @expire_time: the expire time in the units associated with the timer
*
* Modify a timer to expire at @expire_time or the current time, whichever
* comes earlier, taking into account the scale associated with the timer.
@@ -823,12 +788,6 @@ static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
*/
void init_clocks(QEMUTimerListNotifyCB *notify_cb);
-int64_t cpu_get_ticks(void);
-/* Caller must hold BQL */
-void cpu_enable_ticks(void);
-/* Caller must hold BQL */
-void cpu_disable_ticks(void);
-
static inline int64_t get_max_clock_jump(void)
{
/* This should be small enough to prevent excessive interrupts from being
@@ -851,6 +810,8 @@ static inline int64_t get_clock_realtime(void)
return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
}
+extern int64_t clock_start;
+
/* Warning: don't insert tracepoints into these functions, they are
also used by simpletrace backend and tracepoints would cause
an infinite recursion! */
@@ -870,14 +831,11 @@ extern int use_rt_clock;
static inline int64_t get_clock(void)
{
-#ifdef CLOCK_MONOTONIC
if (use_rt_clock) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * 1000000000LL + ts.tv_nsec;
- } else
-#endif
- {
+ } else {
/* XXX: using gettimeofday leads to problems if the date
changes, so it should be avoided. */
return get_clock_realtime();
@@ -885,13 +843,6 @@ static inline int64_t get_clock(void)
}
#endif
-/* icount */
-int64_t cpu_get_icount_raw(void);
-int64_t cpu_get_icount(void);
-int64_t cpu_get_clock(void);
-int64_t cpu_icount_to_ns(int64_t icount);
-void cpu_update_icount(CPUState *cpu);
-
/*******************************************/
/* host CPU ticks (if available) */
@@ -1028,6 +979,28 @@ static inline int64_t cpu_get_host_ticks(void)
return cur - ofs;
}
+#elif defined(__riscv) && __riscv_xlen == 32
+static inline int64_t cpu_get_host_ticks(void)
+{
+ uint32_t lo, hi, tmph;
+ do {
+ asm volatile("RDTIMEH %0\n\t"
+ "RDTIME %1\n\t"
+ "RDTIMEH %2"
+ : "=r"(hi), "=r"(lo), "=r"(tmph));
+ } while (unlikely(tmph != hi));
+ return lo | (uint64_t)hi << 32;
+}
+
+#elif defined(__riscv) && __riscv_xlen > 32
+static inline int64_t cpu_get_host_ticks(void)
+{
+ int64_t val;
+
+ asm volatile("RDTIME %0" : "=r"(val));
+ return val;
+}
+
#else
/* The host CPU doesn't have an easily accessible cycle counter.
Just return a monotonically increasing value. This will be
@@ -1038,13 +1011,4 @@ static inline int64_t cpu_get_host_ticks(void)
}
#endif
-#ifdef CONFIG_PROFILER
-static inline int64_t profile_getclock(void)
-{
- return get_clock();
-}
-
-extern int64_t dev_time;
-#endif
-
#endif
diff --git a/include/qemu/transactions.h b/include/qemu/transactions.h
new file mode 100644
index 0000000000..2f2060acd9
--- /dev/null
+++ b/include/qemu/transactions.h
@@ -0,0 +1,66 @@
+/*
+ * Simple transactions API
+ *
+ * Copyright (c) 2021 Virtuozzo International GmbH.
+ *
+ * Author:
+ * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * = Generic transaction API =
+ *
+ * The intended usage is the following: you create "prepare" functions, which
+ * represents the actions. They will usually have Transaction* argument, and
+ * call tran_add() to register finalization callbacks. For finalization
+ * callbacks, prepare corresponding TransactionActionDrv structures.
+ *
+ * Then, when you need to make a transaction, create an empty Transaction by
+ * tran_create(), call your "prepare" functions on it, and finally call
+ * tran_abort() or tran_commit() to finalize the transaction by corresponding
+ * finalization actions in reverse order.
+ *
+ * The clean() functions registered by the drivers in a transaction are called
+ * last, after all abort() or commit() functions have been called.
+ */
+
+#ifndef QEMU_TRANSACTIONS_H
+#define QEMU_TRANSACTIONS_H
+
+#include <gmodule.h>
+
+typedef struct TransactionActionDrv {
+ void (*abort)(void *opaque);
+ void (*commit)(void *opaque);
+ void (*clean)(void *opaque);
+} TransactionActionDrv;
+
+typedef struct Transaction Transaction;
+
+Transaction *tran_new(void);
+void tran_add(Transaction *tran, TransactionActionDrv *drv, void *opaque);
+void tran_abort(Transaction *tran);
+void tran_commit(Transaction *tran);
+
+static inline void tran_finalize(Transaction *tran, int ret)
+{
+ if (ret < 0) {
+ tran_abort(tran);
+ } else {
+ tran_commit(tran);
+ }
+}
+
+#endif /* QEMU_TRANSACTIONS_H */
diff --git a/include/qemu/tsan.h b/include/qemu/tsan.h
new file mode 100644
index 0000000000..09cc665f91
--- /dev/null
+++ b/include/qemu/tsan.h
@@ -0,0 +1,71 @@
+#ifndef QEMU_TSAN_H
+#define QEMU_TSAN_H
+/*
+ * tsan.h
+ *
+ * This file defines macros used to give ThreadSanitizer
+ * additional information to help suppress warnings.
+ * This is necessary since TSan does not provide a header file
+ * for these annotations. The standard way to include these
+ * is via the below macros.
+ *
+ * Annotation examples can be found here:
+ * https://github.com/llvm/llvm-project/tree/master/compiler-rt/test/tsan
+ * annotate_happens_before.cpp or ignore_race.cpp are good places to start.
+ *
+ * The full set of annotations can be found here in tsan_interface_ann.cpp.
+ * https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/tsan/rtl/
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifdef CONFIG_TSAN
+/*
+ * Informs TSan of a happens before/after relationship.
+ */
+#define QEMU_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) \
+ AnnotateHappensBefore(__FILE__, __LINE__, (void *)(addr))
+#define QEMU_TSAN_ANNOTATE_HAPPENS_AFTER(addr) \
+ AnnotateHappensAfter(__FILE__, __LINE__, (void *)(addr))
+/*
+ * Gives TSan more information about thread names it can report the
+ * name of the thread in the warning report.
+ */
+#define QEMU_TSAN_ANNOTATE_THREAD_NAME(name) \
+ AnnotateThreadName(__FILE__, __LINE__, (void *)(name))
+/*
+ * Allows defining a region of code on which TSan will not record memory READS.
+ * This has the effect of disabling race detection for this section of code.
+ */
+#define QEMU_TSAN_ANNOTATE_IGNORE_READS_BEGIN() \
+ AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
+#define QEMU_TSAN_ANNOTATE_IGNORE_READS_END() \
+ AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
+/*
+ * Allows defining a region of code on which TSan will not record memory
+ * WRITES. This has the effect of disabling race detection for this
+ * section of code.
+ */
+#define QEMU_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN() \
+ AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
+#define QEMU_TSAN_ANNOTATE_IGNORE_WRITES_END() \
+ AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
+#else
+#define QEMU_TSAN_ANNOTATE_HAPPENS_BEFORE(addr)
+#define QEMU_TSAN_ANNOTATE_HAPPENS_AFTER(addr)
+#define QEMU_TSAN_ANNOTATE_THREAD_NAME(name)
+#define QEMU_TSAN_ANNOTATE_IGNORE_READS_BEGIN()
+#define QEMU_TSAN_ANNOTATE_IGNORE_READS_END()
+#define QEMU_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN()
+#define QEMU_TSAN_ANNOTATE_IGNORE_WRITES_END()
+#endif
+
+void AnnotateHappensBefore(const char *f, int l, void *addr);
+void AnnotateHappensAfter(const char *f, int l, void *addr);
+void AnnotateThreadName(const char *f, int l, char *name);
+void AnnotateIgnoreReadsBegin(const char *f, int l);
+void AnnotateIgnoreReadsEnd(const char *f, int l);
+void AnnotateIgnoreWritesBegin(const char *f, int l);
+void AnnotateIgnoreWritesEnd(const char *f, int l);
+#endif
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 741935fe36..50c277cf0b 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -1,15 +1,34 @@
#ifndef QEMU_TYPEDEFS_H
#define QEMU_TYPEDEFS_H
-/* A load of opaque types so that device init declarations don't have to
- pull in all the real definitions. */
+/*
+ * This header is for selectively avoiding #include just to get a
+ * typedef name.
+ *
+ * Declaring a typedef name in its "obvious" place can result in
+ * inclusion cycles, in particular for complete struct and union
+ * types that need more types for their members. It can also result
+ * in headers pulling in many more headers, slowing down builds.
+ *
+ * You can break such cycles and unwanted dependencies by declaring
+ * the typedef name here.
+ *
+ * For struct types used in only a few headers, judicious use of the
+ * struct tag instead of the typedef name is commonly preferable.
+ */
-/* Please keep this list in case-insensitive alphabetical order */
+/*
+ * Incomplete struct types
+ * Please keep this list in case-insensitive alphabetical order.
+ */
+typedef struct AccelCPUState AccelCPUState;
+typedef struct AccelState AccelState;
typedef struct AdapterInfo AdapterInfo;
typedef struct AddressSpace AddressSpace;
typedef struct AioContext AioContext;
-typedef struct AllwinnerAHCIState AllwinnerAHCIState;
-typedef struct AudioState AudioState;
+typedef struct Aml Aml;
+typedef struct AnnounceTimer AnnounceTimer;
+typedef struct ArchCPU ArchCPU;
typedef struct BdrvDirtyBitmap BdrvDirtyBitmap;
typedef struct BdrvDirtyBitmapIter BdrvDirtyBitmapIter;
typedef struct BlockBackend BlockBackend;
@@ -18,17 +37,22 @@ typedef struct BlockDriverState BlockDriverState;
typedef struct BusClass BusClass;
typedef struct BusState BusState;
typedef struct Chardev Chardev;
+typedef struct Clock Clock;
typedef struct CompatProperty CompatProperty;
-typedef struct CoMutex CoMutex;
+typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
typedef struct CPUAddressSpace CPUAddressSpace;
+typedef struct CPUArchState CPUArchState;
+typedef struct CPUPluginState CPUPluginState;
+typedef struct CpuInfoFast CpuInfoFast;
+typedef struct CPUJumpCache CPUJumpCache;
typedef struct CPUState CPUState;
+typedef struct CPUTLBEntryFull CPUTLBEntryFull;
typedef struct DeviceListener DeviceListener;
typedef struct DeviceState DeviceState;
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
typedef struct DisplayChangeListener DisplayChangeListener;
-typedef struct DisplayState DisplayState;
-typedef struct DisplaySurface DisplaySurface;
typedef struct DriveInfo DriveInfo;
+typedef struct DumpState DumpState;
typedef struct Error Error;
typedef struct EventNotifier EventNotifier;
typedef struct FlatView FlatView;
@@ -36,14 +60,15 @@ typedef struct FWCfgEntry FWCfgEntry;
typedef struct FWCfgIoState FWCfgIoState;
typedef struct FWCfgMemState FWCfgMemState;
typedef struct FWCfgState FWCfgState;
-typedef struct HCIInfo HCIInfo;
-typedef struct HVFX86EmulatorState HVFX86EmulatorState;
+typedef struct GraphicHwOps GraphicHwOps;
+typedef struct HostMemoryBackend HostMemoryBackend;
typedef struct I2CBus I2CBus;
typedef struct I2SCodec I2SCodec;
typedef struct IOMMUMemoryRegion IOMMUMemoryRegion;
typedef struct ISABus ISABus;
typedef struct ISADevice ISADevice;
typedef struct IsaDma IsaDma;
+typedef struct JSONWriter JSONWriter;
typedef struct MACAddr MACAddr;
typedef struct MachineClass MachineClass;
typedef struct MachineState MachineState;
@@ -56,13 +81,13 @@ typedef struct MigrationIncomingState MigrationIncomingState;
typedef struct MigrationState MigrationState;
typedef struct Monitor Monitor;
typedef struct MonitorDef MonitorDef;
-typedef struct MouseTransformInfo MouseTransformInfo;
typedef struct MSIMessage MSIMessage;
typedef struct NetClientState NetClientState;
typedef struct NetFilterState NetFilterState;
typedef struct NICInfo NICInfo;
typedef struct NodeInfo NodeInfo;
typedef struct NumaNodeMem NumaNodeMem;
+typedef struct Object Object;
typedef struct ObjectClass ObjectClass;
typedef struct PCIBridge PCIBridge;
typedef struct PCIBus PCIBus;
@@ -72,23 +97,21 @@ typedef struct PCIEAERLog PCIEAERLog;
typedef struct PCIEAERMsg PCIEAERMsg;
typedef struct PCIEPort PCIEPort;
typedef struct PCIESlot PCIESlot;
+typedef struct PCIESriovPF PCIESriovPF;
+typedef struct PCIESriovVF PCIESriovVF;
typedef struct PCIExpressDevice PCIExpressDevice;
typedef struct PCIExpressHost PCIExpressHost;
typedef struct PCIHostDeviceAddress PCIHostDeviceAddress;
typedef struct PCIHostState PCIHostState;
-typedef struct PCMachineClass PCMachineClass;
-typedef struct PCMachineState PCMachineState;
-typedef struct PCMCIACardState PCMCIACardState;
-typedef struct PixelFormat PixelFormat;
+typedef struct PICCommonState PICCommonState;
typedef struct PostcopyDiscardState PostcopyDiscardState;
typedef struct Property Property;
typedef struct PropertyInfo PropertyInfo;
-typedef struct PS2State PS2State;
typedef struct QBool QBool;
typedef struct QDict QDict;
typedef struct QEMUBH QEMUBH;
typedef struct QemuConsole QemuConsole;
-typedef struct QemuDmaBuf QemuDmaBuf;
+typedef struct QEMUCursor QEMUCursor;
typedef struct QEMUFile QEMUFile;
typedef struct QemuLockable QemuLockable;
typedef struct QemuMutex QemuMutex;
@@ -99,7 +122,6 @@ typedef struct QEMUSGList QEMUSGList;
typedef struct QemuSpin QemuSpin;
typedef struct QEMUTimer QEMUTimer;
typedef struct QEMUTimerListGroup QEMUTimerListGroup;
-typedef struct QJSON QJSON;
typedef struct QList QList;
typedef struct QNull QNull;
typedef struct QNum QNum;
@@ -107,14 +129,28 @@ typedef struct QObject QObject;
typedef struct QString QString;
typedef struct RAMBlock RAMBlock;
typedef struct Range Range;
-typedef struct SerialState SerialState;
+typedef struct ReservedRegion ReservedRegion;
typedef struct SHPCDevice SHPCDevice;
-typedef struct SMBusDevice SMBusDevice;
typedef struct SSIBus SSIBus;
-typedef struct uWireSlave uWireSlave;
+typedef struct TCGCPUOps TCGCPUOps;
+typedef struct TCGHelperInfo TCGHelperInfo;
+typedef struct TranslationBlock TranslationBlock;
typedef struct VirtIODevice VirtIODevice;
typedef struct Visitor Visitor;
-typedef void SaveStateHandler(QEMUFile *f, void *opaque);
-typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
+typedef struct VMChangeStateEntry VMChangeStateEntry;
+typedef struct VMStateDescription VMStateDescription;
+
+/*
+ * Pointer types
+ * Such typedefs should be limited to cases where the typedef's users
+ * are oblivious of its "pointer-ness".
+ * Please keep this list in case-insensitive alphabetical order.
+ */
+typedef struct IRQState *qemu_irq;
+
+/*
+ * Function types
+ */
+typedef void (*qemu_irq_handler)(void *opaque, int n, int level);
#endif /* QEMU_TYPEDEFS_H */
diff --git a/include/qemu/units.h b/include/qemu/units.h
index 1c959d182e..692db3fbb2 100644
--- a/include/qemu/units.h
+++ b/include/qemu/units.h
@@ -17,77 +17,4 @@
#define PiB (INT64_C(1) << 50)
#define EiB (INT64_C(1) << 60)
-/*
- * The following lookup table is intended to be used when a literal string of
- * the number of bytes is required (for example if it needs to be stringified).
- * It can also be used for generic shortcuts of power-of-two sizes.
- * This table is generated using the AWK script below:
- *
- * BEGIN {
- * suffix="KMGTPE";
- * for(i=10; i<64; i++) {
- * val=2**i;
- * s=substr(suffix, int(i/10), 1);
- * n=2**(i%10);
- * pad=21-int(log(n)/log(10));
- * printf("#define S_%d%siB %*d\n", n, s, pad, val);
- * }
- * }
- */
-
-#define S_1KiB 1024
-#define S_2KiB 2048
-#define S_4KiB 4096
-#define S_8KiB 8192
-#define S_16KiB 16384
-#define S_32KiB 32768
-#define S_64KiB 65536
-#define S_128KiB 131072
-#define S_256KiB 262144
-#define S_512KiB 524288
-#define S_1MiB 1048576
-#define S_2MiB 2097152
-#define S_4MiB 4194304
-#define S_8MiB 8388608
-#define S_16MiB 16777216
-#define S_32MiB 33554432
-#define S_64MiB 67108864
-#define S_128MiB 134217728
-#define S_256MiB 268435456
-#define S_512MiB 536870912
-#define S_1GiB 1073741824
-#define S_2GiB 2147483648
-#define S_4GiB 4294967296
-#define S_8GiB 8589934592
-#define S_16GiB 17179869184
-#define S_32GiB 34359738368
-#define S_64GiB 68719476736
-#define S_128GiB 137438953472
-#define S_256GiB 274877906944
-#define S_512GiB 549755813888
-#define S_1TiB 1099511627776
-#define S_2TiB 2199023255552
-#define S_4TiB 4398046511104
-#define S_8TiB 8796093022208
-#define S_16TiB 17592186044416
-#define S_32TiB 35184372088832
-#define S_64TiB 70368744177664
-#define S_128TiB 140737488355328
-#define S_256TiB 281474976710656
-#define S_512TiB 562949953421312
-#define S_1PiB 1125899906842624
-#define S_2PiB 2251799813685248
-#define S_4PiB 4503599627370496
-#define S_8PiB 9007199254740992
-#define S_16PiB 18014398509481984
-#define S_32PiB 36028797018963968
-#define S_64PiB 72057594037927936
-#define S_128PiB 144115188075855872
-#define S_256PiB 288230376151711744
-#define S_512PiB 576460752303423488
-#define S_1EiB 1152921504606846976
-#define S_2EiB 2305843009213693952
-#define S_4EiB 4611686018427387904
-#define S_8EiB 9223372036854775808
-
#endif
diff --git a/include/qemu/uri.h b/include/qemu/uri.h
index d201c61260..255e61f452 100644
--- a/include/qemu/uri.h
+++ b/include/qemu/uri.h
@@ -41,8 +41,7 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library. If not, see <https://www.gnu.org/licenses/>.
*
* Authors:
* Richard W.M. Jones <rjones@redhat.com>
@@ -53,10 +52,6 @@
#ifndef QEMU_URI_H
#define QEMU_URI_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/**
* URI:
*
@@ -64,48 +59,41 @@ extern "C" {
* as described in RFC 2396 but separated for further processing.
*/
typedef struct URI {
- char *scheme; /* the URI scheme */
- char *opaque; /* opaque part */
- char *authority; /* the authority part */
- char *server; /* the server part */
- char *user; /* the user part */
- int port; /* the port number */
- char *path; /* the path string */
- char *fragment; /* the fragment identifier */
- int cleanup; /* parsing potentially unclean URI */
- char *query; /* the query string (as it appears in the URI) */
+ char *scheme; /* the URI scheme */
+ char *opaque; /* opaque part */
+ char *authority; /* the authority part */
+ char *server; /* the server part */
+ char *user; /* the user part */
+ int port; /* the port number */
+ char *path; /* the path string */
+ char *fragment; /* the fragment identifier */
+ int cleanup; /* parsing potentially unclean URI */
+ char *query; /* the query string (as it appears in the URI) */
} URI;
URI *uri_new(void);
-char *uri_resolve(const char *URI, const char *base);
-char *uri_resolve_relative(const char *URI, const char *base);
URI *uri_parse(const char *str);
URI *uri_parse_raw(const char *str, int raw);
int uri_parse_into(URI *uri, const char *str);
char *uri_to_string(URI *uri);
-char *uri_string_escape(const char *str, const char *list);
-char *uri_string_unescape(const char *str, int len, char *target);
void uri_free(URI *uri);
/* Single web service query parameter 'name=value'. */
typedef struct QueryParam {
- char *name; /* Name (unescaped). */
- char *value; /* Value (unescaped). */
- int ignore; /* Ignore this field in qparam_get_query */
+ char *name; /* Name (unescaped). */
+ char *value; /* Value (unescaped). */
+ int ignore; /* Ignore this field in qparam_get_query */
} QueryParam;
/* Set of parameters. */
typedef struct QueryParams {
- int n; /* number of parameters used */
- int alloc; /* allocated space */
- QueryParam *p; /* array of parameters */
+ int n; /* number of parameters used */
+ int alloc; /* allocated space */
+ QueryParam *p; /* array of parameters */
} QueryParams;
-struct QueryParams *query_params_new (int init_alloc);
-extern QueryParams *query_params_parse (const char *query);
-extern void query_params_free (QueryParams *ps);
+QueryParams *query_params_new(int init_alloc);
+QueryParams *query_params_parse(const char *query);
+void query_params_free(QueryParams *ps);
-#ifdef __cplusplus
-}
-#endif
#endif /* QEMU_URI_H */
diff --git a/include/qemu/userfaultfd.h b/include/qemu/userfaultfd.h
new file mode 100644
index 0000000000..18a4314212
--- /dev/null
+++ b/include/qemu/userfaultfd.h
@@ -0,0 +1,46 @@
+/*
+ * Linux UFFD-WP support
+ *
+ * Copyright Virtuozzo GmbH, 2020
+ *
+ * Authors:
+ * Andrey Gruzdev <andrey.gruzdev@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef USERFAULTFD_H
+#define USERFAULTFD_H
+
+#ifdef CONFIG_LINUX
+
+#include "exec/hwaddr.h"
+#include <linux/userfaultfd.h>
+
+/**
+ * uffd_open(): Open an userfaultfd handle for current context.
+ *
+ * @flags: The flags we want to pass in when creating the handle.
+ *
+ * Returns: the uffd handle if >=0, or <0 if error happens.
+ */
+int uffd_open(int flags);
+int uffd_query_features(uint64_t *features);
+int uffd_create_fd(uint64_t features, bool non_blocking);
+void uffd_close_fd(int uffd_fd);
+int uffd_register_memory(int uffd_fd, void *addr, uint64_t length,
+ uint64_t mode, uint64_t *ioctls);
+int uffd_unregister_memory(int uffd_fd, void *addr, uint64_t length);
+int uffd_change_protection(int uffd_fd, void *addr, uint64_t length,
+ bool wp, bool dont_wake);
+int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr,
+ uint64_t length, bool dont_wake);
+int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake);
+int uffd_wakeup(int uffd_fd, void *addr, uint64_t length);
+int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count);
+bool uffd_poll_events(int uffd_fd, int tmo);
+
+#endif /* CONFIG_LINUX */
+
+#endif /* USERFAULTFD_H */
diff --git a/include/qemu/uuid.h b/include/qemu/uuid.h
index 09489ce5c5..869f84af09 100644
--- a/include/qemu/uuid.h
+++ b/include/qemu/uuid.h
@@ -16,7 +16,6 @@
#ifndef QEMU_UUID_H
#define QEMU_UUID_H
-#include "qemu-common.h"
/* Version 4 UUID (pseudo random numbers), RFC4122 4.4. */
@@ -35,14 +34,54 @@ typedef struct {
};
} QemuUUID;
+/**
+ * UUID_LE - converts the fields of UUID to little-endian array,
+ * each of parameters is the filed of UUID.
+ *
+ * @time_low: The low field of the timestamp
+ * @time_mid: The middle field of the timestamp
+ * @time_hi_and_version: The high field of the timestamp
+ * multiplexed with the version number
+ * @clock_seq_hi_and_reserved: The high field of the clock
+ * sequence multiplexed with the variant
+ * @clock_seq_low: The low field of the clock sequence
+ * @node0: The spatially unique node0 identifier
+ * @node1: The spatially unique node1 identifier
+ * @node2: The spatially unique node2 identifier
+ * @node3: The spatially unique node3 identifier
+ * @node4: The spatially unique node4 identifier
+ * @node5: The spatially unique node5 identifier
+ */
+#define UUID_LE(time_low, time_mid, time_hi_and_version, \
+ clock_seq_hi_and_reserved, clock_seq_low, node0, node1, node2, \
+ node3, node4, node5) \
+ { (time_low) & 0xff, ((time_low) >> 8) & 0xff, ((time_low) >> 16) & 0xff, \
+ ((time_low) >> 24) & 0xff, (time_mid) & 0xff, ((time_mid) >> 8) & 0xff, \
+ (time_hi_and_version) & 0xff, ((time_hi_and_version) >> 8) & 0xff, \
+ (clock_seq_hi_and_reserved), (clock_seq_low), (node0), (node1), (node2),\
+ (node3), (node4), (node5) }
+
+/* Normal (network byte order) UUID */
+#define UUID(time_low, time_mid, time_hi_and_version, \
+ clock_seq_hi_and_reserved, clock_seq_low, node0, node1, node2, \
+ node3, node4, node5) \
+ { ((time_low) >> 24) & 0xff, ((time_low) >> 16) & 0xff, \
+ ((time_low) >> 8) & 0xff, (time_low) & 0xff, \
+ ((time_mid) >> 8) & 0xff, (time_mid) & 0xff, \
+ ((time_hi_and_version) >> 8) & 0xff, (time_hi_and_version) & 0xff, \
+ (clock_seq_hi_and_reserved), (clock_seq_low), \
+ (node0), (node1), (node2), (node3), (node4), (node5) \
+ }
+
#define UUID_FMT "%02hhx%02hhx%02hhx%02hhx-" \
"%02hhx%02hhx-%02hhx%02hhx-" \
"%02hhx%02hhx-" \
"%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
-#define UUID_FMT_LEN 36
-
#define UUID_NONE "00000000-0000-0000-0000-000000000000"
+QEMU_BUILD_BUG_ON(sizeof(UUID_NONE) - 1 != 36);
+
+#define UUID_STR_LEN sizeof(UUID_NONE)
void qemu_uuid_generate(QemuUUID *out);
@@ -56,6 +95,8 @@ char *qemu_uuid_unparse_strdup(const QemuUUID *uuid);
int qemu_uuid_parse(const char *str, QemuUUID *uuid);
-void qemu_uuid_bswap(QemuUUID *uuid);
+QemuUUID qemu_uuid_bswap(QemuUUID uuid);
+
+uint32_t qemu_uuid_hash(const void *uuid);
#endif
diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
index 1f057c2b9e..bde9495b25 100644
--- a/include/qemu/vfio-helpers.h
+++ b/include/qemu/vfio-helpers.h
@@ -18,11 +18,11 @@ typedef struct QEMUVFIOState QEMUVFIOState;
QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp);
void qemu_vfio_close(QEMUVFIOState *s);
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
- bool temporary, uint64_t *iova_list);
+ bool temporary, uint64_t *iova_list, Error **errp);
int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
- uint64_t offset, uint64_t size,
+ uint64_t offset, uint64_t size, int prot,
Error **errp);
void qemu_vfio_pci_unmap_bar(QEMUVFIOState *s, int index, void *bar,
uint64_t offset, uint64_t size);
diff --git a/include/qemu/vhost-user-server.h b/include/qemu/vhost-user-server.h
new file mode 100644
index 0000000000..0417ec0533
--- /dev/null
+++ b/include/qemu/vhost-user-server.h
@@ -0,0 +1,73 @@
+/*
+ * Sharing QEMU devices via vhost-user protocol
+ *
+ * Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
+ * Copyright (c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef VHOST_USER_SERVER_H
+#define VHOST_USER_SERVER_H
+
+#include "subprojects/libvhost-user/libvhost-user.h" /* only for the type definitions */
+#include "io/channel-socket.h"
+#include "io/channel-file.h"
+#include "io/net-listener.h"
+#include "qapi/error.h"
+#include "standard-headers/linux/virtio_blk.h"
+
+/* A kick fd that we monitor on behalf of libvhost-user */
+typedef struct VuFdWatch {
+ VuDev *vu_dev;
+ int fd; /*kick fd*/
+ void *pvt;
+ vu_watch_cb cb;
+ QTAILQ_ENTRY(VuFdWatch) next;
+} VuFdWatch;
+
+/**
+ * VuServer:
+ * A vhost-user server instance with user-defined VuDevIface callbacks.
+ * Vhost-user device backends can be implemented using VuServer. VuDevIface
+ * callbacks and virtqueue kicks run in the given AioContext.
+ */
+typedef struct {
+ QIONetListener *listener;
+ QEMUBH *restart_listener_bh;
+ AioContext *ctx;
+ int max_queues;
+ const VuDevIface *vu_iface;
+
+ unsigned int in_flight; /* atomic */
+
+ /* Protected by ctx lock */
+ bool in_qio_channel_yield;
+ bool wait_idle;
+ bool quiescing;
+ VuDev vu_dev;
+ QIOChannel *ioc; /* The I/O channel with the client */
+ QIOChannelSocket *sioc; /* The underlying data channel with the client */
+ QTAILQ_HEAD(, VuFdWatch) vu_fd_watches;
+
+ Coroutine *co_trip; /* coroutine for processing VhostUserMsg */
+} VuServer;
+
+bool vhost_user_server_start(VuServer *server,
+ SocketAddress *unix_socket,
+ AioContext *ctx,
+ uint16_t max_queues,
+ const VuDevIface *vu_iface,
+ Error **errp);
+
+void vhost_user_server_stop(VuServer *server);
+
+void vhost_user_server_inc_in_flight(VuServer *server);
+void vhost_user_server_dec_in_flight(VuServer *server);
+bool vhost_user_server_has_in_flight(VuServer *server);
+
+void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx);
+void vhost_user_server_detach_aio_context(VuServer *server);
+
+#endif /* VHOST_USER_SERVER_H */
diff --git a/include/qemu/win_dump_defs.h b/include/qemu/win_dump_defs.h
index 145096e8ee..73a44e2408 100644
--- a/include/qemu/win_dump_defs.h
+++ b/include/qemu/win_dump_defs.h
@@ -11,11 +11,22 @@
#ifndef QEMU_WIN_DUMP_DEFS_H
#define QEMU_WIN_DUMP_DEFS_H
+typedef struct WinDumpPhyMemRun32 {
+ uint32_t BasePage;
+ uint32_t PageCount;
+} QEMU_PACKED WinDumpPhyMemRun32;
+
typedef struct WinDumpPhyMemRun64 {
uint64_t BasePage;
uint64_t PageCount;
} QEMU_PACKED WinDumpPhyMemRun64;
+typedef struct WinDumpPhyMemDesc32 {
+ uint32_t NumberOfRuns;
+ uint32_t NumberOfPages;
+ WinDumpPhyMemRun32 Run[86];
+} QEMU_PACKED WinDumpPhyMemDesc32;
+
typedef struct WinDumpPhyMemDesc64 {
uint32_t NumberOfRuns;
uint32_t unused;
@@ -33,6 +44,39 @@ typedef struct WinDumpExceptionRecord {
uint64_t ExceptionInformation[15];
} QEMU_PACKED WinDumpExceptionRecord;
+typedef struct WinDumpHeader32 {
+ char Signature[4];
+ char ValidDump[4];
+ uint32_t MajorVersion;
+ uint32_t MinorVersion;
+ uint32_t DirectoryTableBase;
+ uint32_t PfnDatabase;
+ uint32_t PsLoadedModuleList;
+ uint32_t PsActiveProcessHead;
+ uint32_t MachineImageType;
+ uint32_t NumberProcessors;
+ union {
+ struct {
+ uint32_t BugcheckCode;
+ uint32_t BugcheckParameter1;
+ uint32_t BugcheckParameter2;
+ uint32_t BugcheckParameter3;
+ uint32_t BugcheckParameter4;
+ };
+ uint8_t BugcheckData[20];
+ };
+ uint8_t VersionUser[32];
+ uint32_t reserved0;
+ uint32_t KdDebuggerDataBlock;
+ union {
+ WinDumpPhyMemDesc32 PhysicalMemoryBlock;
+ uint8_t PhysicalMemoryBlockBuffer[700];
+ };
+ uint8_t reserved1[3200];
+ uint32_t RequiredDumpSpace;
+ uint8_t reserved2[92];
+} QEMU_PACKED WinDumpHeader32;
+
typedef struct WinDumpHeader64 {
char Signature[4];
char ValidDump[4];
@@ -81,24 +125,48 @@ typedef struct WinDumpHeader64 {
uint8_t reserved[4018];
} QEMU_PACKED WinDumpHeader64;
+typedef union WinDumpHeader {
+ struct {
+ char Signature[4];
+ char ValidDump[4];
+ };
+ WinDumpHeader32 x32;
+ WinDumpHeader64 x64;
+} WinDumpHeader;
+
#define KDBG_OWNER_TAG_OFFSET64 0x10
#define KDBG_MM_PFN_DATABASE_OFFSET64 0xC0
#define KDBG_KI_BUGCHECK_DATA_OFFSET64 0x88
#define KDBG_KI_PROCESSOR_BLOCK_OFFSET64 0x218
#define KDBG_OFFSET_PRCB_CONTEXT_OFFSET64 0x338
+#define KDBG_OWNER_TAG_OFFSET KDBG_OWNER_TAG_OFFSET64
+#define KDBG_MM_PFN_DATABASE_OFFSET KDBG_MM_PFN_DATABASE_OFFSET64
+#define KDBG_KI_BUGCHECK_DATA_OFFSET KDBG_KI_BUGCHECK_DATA_OFFSET64
+#define KDBG_KI_PROCESSOR_BLOCK_OFFSET KDBG_KI_PROCESSOR_BLOCK_OFFSET64
+#define KDBG_OFFSET_PRCB_CONTEXT_OFFSET KDBG_OFFSET_PRCB_CONTEXT_OFFSET64
+
#define VMCOREINFO_ELF_NOTE_HDR_SIZE 24
+#define VMCOREINFO_WIN_DUMP_NOTE_SIZE64 (sizeof(WinDumpHeader64) + \
+ VMCOREINFO_ELF_NOTE_HDR_SIZE)
+#define VMCOREINFO_WIN_DUMP_NOTE_SIZE32 (sizeof(WinDumpHeader32) + \
+ VMCOREINFO_ELF_NOTE_HDR_SIZE)
#define WIN_CTX_X64 0x00100000L
+#define WIN_CTX_X86 0x00010000L
#define WIN_CTX_CTL 0x00000001L
#define WIN_CTX_INT 0x00000002L
#define WIN_CTX_SEG 0x00000004L
#define WIN_CTX_FP 0x00000008L
#define WIN_CTX_DBG 0x00000010L
+#define WIN_CTX_EXT 0x00000020L
+
+#define WIN_CTX64_FULL (WIN_CTX_X64 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_FP)
+#define WIN_CTX64_ALL (WIN_CTX64_FULL | WIN_CTX_SEG | WIN_CTX_DBG)
-#define WIN_CTX_FULL (WIN_CTX_X64 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_FP)
-#define WIN_CTX_ALL (WIN_CTX_FULL | WIN_CTX_SEG | WIN_CTX_DBG)
+#define WIN_CTX32_FULL (WIN_CTX_X86 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_SEG)
+#define WIN_CTX32_ALL (WIN_CTX32_FULL | WIN_CTX_FP | WIN_CTX_DBG | WIN_CTX_EXT)
#define LIVE_SYSTEM_DUMP 0x00000161
@@ -107,7 +175,41 @@ typedef struct WinM128A {
int64_t high;
} QEMU_ALIGNED(16) WinM128A;
-typedef struct WinContext {
+typedef struct WinContext32 {
+ uint32_t ContextFlags;
+
+ uint32_t Dr0;
+ uint32_t Dr1;
+ uint32_t Dr2;
+ uint32_t Dr3;
+ uint32_t Dr6;
+ uint32_t Dr7;
+
+ uint8_t FloatSave[112];
+
+ uint32_t SegGs;
+ uint32_t SegFs;
+ uint32_t SegEs;
+ uint32_t SegDs;
+
+ uint32_t Edi;
+ uint32_t Esi;
+ uint32_t Ebx;
+ uint32_t Edx;
+ uint32_t Ecx;
+ uint32_t Eax;
+
+ uint32_t Ebp;
+ uint32_t Eip;
+ uint32_t SegCs;
+ uint32_t EFlags;
+ uint32_t Esp;
+ uint32_t SegSs;
+
+ uint8_t ExtendedRegisters[512];
+} QEMU_ALIGNED(16) WinContext32;
+
+typedef struct WinContext64 {
uint64_t PHome[6];
uint32_t ContextFlags;
@@ -174,6 +276,11 @@ typedef struct WinContext {
uint64_t LastBranchFromRip;
uint64_t LastExceptionToRip;
uint64_t LastExceptionFromRip;
-} QEMU_ALIGNED(16) WinContext;
+} QEMU_ALIGNED(16) WinContext64;
+
+typedef union WinContext {
+ WinContext32 x32;
+ WinContext64 x64;
+} WinContext;
#endif /* QEMU_WIN_DUMP_DEFS_H */
diff --git a/include/qemu/xattr.h b/include/qemu/xattr.h
index a83fe8e749..b08a934acc 100644
--- a/include/qemu/xattr.h
+++ b/include/qemu/xattr.h
@@ -22,8 +22,12 @@
#ifdef CONFIG_LIBATTR
# include <attr/xattr.h>
#else
-# define ENOATTR ENODATA
-# include <sys/xattr.h>
+# if !defined(ENOATTR)
+# define ENOATTR ENODATA
+# endif
+# ifndef CONFIG_WIN32
+# include <sys/xattr.h>
+# endif
#endif
#endif
diff --git a/include/qemu/xxhash.h b/include/qemu/xxhash.h
index 076f1f6054..0259bbef18 100644
--- a/include/qemu/xxhash.h
+++ b/include/qemu/xxhash.h
@@ -48,8 +48,8 @@
* xxhash32, customized for input variables that are not guaranteed to be
* contiguous in memory.
*/
-static inline uint32_t
-qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
+static inline uint32_t qemu_xxhash8(uint64_t ab, uint64_t cd, uint64_t ef,
+ uint32_t g, uint32_t h)
{
uint32_t v1 = QEMU_XXHASH_SEED + PRIME32_1 + PRIME32_2;
uint32_t v2 = QEMU_XXHASH_SEED + PRIME32_2;
@@ -59,6 +59,8 @@ qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
uint32_t b = ab >> 32;
uint32_t c = cd;
uint32_t d = cd >> 32;
+ uint32_t e = ef;
+ uint32_t f = ef >> 32;
uint32_t h32;
v1 += a * PRIME32_2;
@@ -89,6 +91,9 @@ qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
h32 += g * PRIME32_3;
h32 = rol32(h32, 17) * PRIME32_4;
+ h32 += h * PRIME32_3;
+ h32 = rol32(h32, 17) * PRIME32_4;
+
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
@@ -100,23 +105,127 @@ qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
static inline uint32_t qemu_xxhash2(uint64_t ab)
{
- return qemu_xxhash7(ab, 0, 0, 0, 0);
+ return qemu_xxhash8(ab, 0, 0, 0, 0);
}
static inline uint32_t qemu_xxhash4(uint64_t ab, uint64_t cd)
{
- return qemu_xxhash7(ab, cd, 0, 0, 0);
+ return qemu_xxhash8(ab, cd, 0, 0, 0);
}
static inline uint32_t qemu_xxhash5(uint64_t ab, uint64_t cd, uint32_t e)
{
- return qemu_xxhash7(ab, cd, e, 0, 0);
+ return qemu_xxhash8(ab, cd, 0, e, 0);
}
static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e,
uint32_t f)
{
- return qemu_xxhash7(ab, cd, e, f, 0);
+ return qemu_xxhash8(ab, cd, 0, e, f);
+}
+
+static inline uint32_t qemu_xxhash7(uint64_t ab, uint64_t cd, uint64_t ef,
+ uint32_t g)
+{
+ return qemu_xxhash8(ab, cd, ef, g, 0);
+}
+
+/*
+ * Component parts of the XXH64 algorithm from
+ * https://github.com/Cyan4973/xxHash/blob/v0.8.0/xxhash.h
+ *
+ * The complete algorithm looks like
+ *
+ * i = 0;
+ * if (len >= 32) {
+ * v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ * v2 = seed + XXH_PRIME64_2;
+ * v3 = seed + 0;
+ * v4 = seed - XXH_PRIME64_1;
+ * do {
+ * v1 = XXH64_round(v1, get64bits(input + i));
+ * v2 = XXH64_round(v2, get64bits(input + i + 8));
+ * v3 = XXH64_round(v3, get64bits(input + i + 16));
+ * v4 = XXH64_round(v4, get64bits(input + i + 24));
+ * } while ((i += 32) <= len);
+ * h64 = XXH64_mergerounds(v1, v2, v3, v4);
+ * } else {
+ * h64 = seed + XXH_PRIME64_5;
+ * }
+ * h64 += len;
+ *
+ * for (; i + 8 <= len; i += 8) {
+ * h64 ^= XXH64_round(0, get64bits(input + i));
+ * h64 = rol64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4;
+ * }
+ * for (; i + 4 <= len; i += 4) {
+ * h64 ^= get32bits(input + i) * PRIME64_1;
+ * h64 = rol64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
+ * }
+ * for (; i < len; i += 1) {
+ * h64 ^= get8bits(input + i) * XXH_PRIME64_5;
+ * h64 = rol64(h64, 11) * XXH_PRIME64_1;
+ * }
+ *
+ * return XXH64_avalanche(h64)
+ *
+ * Exposing the pieces instead allows for simplified usage when
+ * the length is a known constant and the inputs are in registers.
+ */
+#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
+#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
+#define XXH_PRIME64_3 0x165667B19E3779F9ULL
+#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
+#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
+
+static inline uint64_t XXH64_round(uint64_t acc, uint64_t input)
+{
+ return rol64(acc + input * XXH_PRIME64_2, 31) * XXH_PRIME64_1;
+}
+
+static inline uint64_t XXH64_mergeround(uint64_t acc, uint64_t val)
+{
+ return (acc ^ XXH64_round(0, val)) * XXH_PRIME64_1 + XXH_PRIME64_4;
+}
+
+static inline uint64_t XXH64_mergerounds(uint64_t v1, uint64_t v2,
+ uint64_t v3, uint64_t v4)
+{
+ uint64_t h64;
+
+ h64 = rol64(v1, 1) + rol64(v2, 7) + rol64(v3, 12) + rol64(v4, 18);
+ h64 = XXH64_mergeround(h64, v1);
+ h64 = XXH64_mergeround(h64, v2);
+ h64 = XXH64_mergeround(h64, v3);
+ h64 = XXH64_mergeround(h64, v4);
+
+ return h64;
+}
+
+static inline uint64_t XXH64_avalanche(uint64_t h64)
+{
+ h64 ^= h64 >> 33;
+ h64 *= XXH_PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= XXH_PRIME64_3;
+ h64 ^= h64 >> 32;
+ return h64;
+}
+
+static inline uint64_t qemu_xxhash64_4(uint64_t a, uint64_t b,
+ uint64_t c, uint64_t d)
+{
+ uint64_t v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
+ uint64_t v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
+ uint64_t v3 = QEMU_XXHASH_SEED + 0;
+ uint64_t v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
+
+ v1 = XXH64_round(v1, a);
+ v2 = XXH64_round(v2, b);
+ v3 = XXH64_round(v3, c);
+ v4 = XXH64_round(v4, d);
+
+ return XXH64_avalanche(XXH64_mergerounds(v1, v2, v3, v4));
}
#endif /* QEMU_XXHASH_H */
diff --git a/include/qemu/yank.h b/include/qemu/yank.h
new file mode 100644
index 0000000000..3d88af6996
--- /dev/null
+++ b/include/qemu/yank.h
@@ -0,0 +1,87 @@
+/*
+ * QEMU yank feature
+ *
+ * Copyright (c) Lukas Straub <lukasstraub2@web.de>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef YANK_H
+#define YANK_H
+
+#include "qapi/qapi-types-yank.h"
+
+typedef void (YankFn)(void *opaque);
+
+/**
+ * yank_register_instance: Register a new instance.
+ *
+ * This registers a new instance for yanking. Must be called before any yank
+ * function is registered for this instance.
+ *
+ * This function is thread-safe.
+ *
+ * @instance: The instance.
+ * @errp: Error object.
+ *
+ * Returns true on success or false if an error occurred.
+ */
+bool yank_register_instance(const YankInstance *instance, Error **errp);
+
+/**
+ * yank_unregister_instance: Unregister a instance.
+ *
+ * This unregisters a instance. Must be called only after every yank function
+ * of the instance has been unregistered.
+ *
+ * This function is thread-safe.
+ *
+ * @instance: The instance.
+ */
+void yank_unregister_instance(const YankInstance *instance);
+
+/**
+ * yank_register_function: Register a yank function
+ *
+ * This registers a yank function. All limitations of qmp oob commands apply
+ * to the yank function as well. See docs/devel/qapi-code-gen.rst under
+ * "An OOB-capable command handler must satisfy the following conditions".
+ *
+ * This function is thread-safe.
+ *
+ * @instance: The instance.
+ * @func: The yank function.
+ * @opaque: Will be passed to the yank function.
+ */
+void yank_register_function(const YankInstance *instance,
+ YankFn *func,
+ void *opaque);
+
+/**
+ * yank_unregister_function: Unregister a yank function
+ *
+ * This unregisters a yank function.
+ *
+ * This function is thread-safe.
+ *
+ * @instance: The instance.
+ * @func: func that was passed to yank_register_function.
+ * @opaque: opaque that was passed to yank_register_function.
+ */
+void yank_unregister_function(const YankInstance *instance,
+ YankFn *func,
+ void *opaque);
+
+#define BLOCKDEV_YANK_INSTANCE(the_node_name) (&(YankInstance) { \
+ .type = YANK_INSTANCE_TYPE_BLOCK_NODE, \
+ .u.block_node.node_name = (the_node_name) })
+
+#define CHARDEV_YANK_INSTANCE(the_id) (&(YankInstance) { \
+ .type = YANK_INSTANCE_TYPE_CHARDEV, \
+ .u.chardev.id = (the_id) })
+
+#define MIGRATION_YANK_INSTANCE (&(YankInstance) { \
+ .type = YANK_INSTANCE_TYPE_MIGRATION })
+
+#endif
diff --git a/include/qom/object.h b/include/qom/object.h
index e0262962b5..13d3a655dd 100644
--- a/include/qom/object.h
+++ b/include/qom/object.h
@@ -15,13 +15,11 @@
#define QEMU_OBJECT_H
#include "qapi/qapi-builtin-types.h"
-#include "qemu/queue.h"
+#include "qemu/module.h"
struct TypeImpl;
typedef struct TypeImpl *Type;
-typedef struct Object Object;
-
typedef struct TypeInfo TypeInfo;
typedef struct InterfaceClass InterfaceClass;
@@ -29,282 +27,10 @@ typedef struct InterfaceInfo InterfaceInfo;
#define TYPE_OBJECT "object"
-/**
- * SECTION:object.h
- * @title:Base Object Type System
- * @short_description: interfaces for creating new types and objects
- *
- * The QEMU Object Model provides a framework for registering user creatable
- * types and instantiating objects from those types. QOM provides the following
- * features:
- *
- * - System for dynamically registering types
- * - Support for single-inheritance of types
- * - Multiple inheritance of stateless interfaces
- *
- * <example>
- * <title>Creating a minimal type</title>
- * <programlisting>
- * #include "qdev.h"
- *
- * #define TYPE_MY_DEVICE "my-device"
- *
- * // No new virtual functions: we can reuse the typedef for the
- * // superclass.
- * typedef DeviceClass MyDeviceClass;
- * typedef struct MyDevice
- * {
- * DeviceState parent;
- *
- * int reg0, reg1, reg2;
- * } MyDevice;
- *
- * static const TypeInfo my_device_info = {
- * .name = TYPE_MY_DEVICE,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDevice),
- * };
- *
- * static void my_device_register_types(void)
- * {
- * type_register_static(&my_device_info);
- * }
- *
- * type_init(my_device_register_types)
- * </programlisting>
- * </example>
- *
- * In the above example, we create a simple type that is described by #TypeInfo.
- * #TypeInfo describes information about the type including what it inherits
- * from, the instance and class size, and constructor/destructor hooks.
- *
- * Alternatively several static types could be registered using helper macro
- * DEFINE_TYPES()
- *
- * <example>
- * <programlisting>
- * static const TypeInfo device_types_info[] = {
- * {
- * .name = TYPE_MY_DEVICE_A,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDeviceA),
- * },
- * {
- * .name = TYPE_MY_DEVICE_B,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDeviceB),
- * },
- * };
- *
- * DEFINE_TYPES(device_types_info)
- * </programlisting>
- * </example>
- *
- * Every type has an #ObjectClass associated with it. #ObjectClass derivatives
- * are instantiated dynamically but there is only ever one instance for any
- * given type. The #ObjectClass typically holds a table of function pointers
- * for the virtual methods implemented by this type.
- *
- * Using object_new(), a new #Object derivative will be instantiated. You can
- * cast an #Object to a subclass (or base-class) type using
- * object_dynamic_cast(). You typically want to define macro wrappers around
- * OBJECT_CHECK() and OBJECT_CLASS_CHECK() to make it easier to convert to a
- * specific type:
- *
- * <example>
- * <title>Typecasting macros</title>
- * <programlisting>
- * #define MY_DEVICE_GET_CLASS(obj) \
- * OBJECT_GET_CLASS(MyDeviceClass, obj, TYPE_MY_DEVICE)
- * #define MY_DEVICE_CLASS(klass) \
- * OBJECT_CLASS_CHECK(MyDeviceClass, klass, TYPE_MY_DEVICE)
- * #define MY_DEVICE(obj) \
- * OBJECT_CHECK(MyDevice, obj, TYPE_MY_DEVICE)
- * </programlisting>
- * </example>
- *
- * # Class Initialization #
- *
- * Before an object is initialized, the class for the object must be
- * initialized. There is only one class object for all instance objects
- * that is created lazily.
- *
- * Classes are initialized by first initializing any parent classes (if
- * necessary). After the parent class object has initialized, it will be
- * copied into the current class object and any additional storage in the
- * class object is zero filled.
- *
- * The effect of this is that classes automatically inherit any virtual
- * function pointers that the parent class has already initialized. All
- * other fields will be zero filled.
- *
- * Once all of the parent classes have been initialized, #TypeInfo::class_init
- * is called to let the class being instantiated provide default initialize for
- * its virtual functions. Here is how the above example might be modified
- * to introduce an overridden virtual function:
- *
- * <example>
- * <title>Overriding a virtual function</title>
- * <programlisting>
- * #include "qdev.h"
- *
- * void my_device_class_init(ObjectClass *klass, void *class_data)
- * {
- * DeviceClass *dc = DEVICE_CLASS(klass);
- * dc->reset = my_device_reset;
- * }
- *
- * static const TypeInfo my_device_info = {
- * .name = TYPE_MY_DEVICE,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDevice),
- * .class_init = my_device_class_init,
- * };
- * </programlisting>
- * </example>
- *
- * Introducing new virtual methods requires a class to define its own
- * struct and to add a .class_size member to the #TypeInfo. Each method
- * will also have a wrapper function to call it easily:
- *
- * <example>
- * <title>Defining an abstract class</title>
- * <programlisting>
- * #include "qdev.h"
- *
- * typedef struct MyDeviceClass
- * {
- * DeviceClass parent;
- *
- * void (*frobnicate) (MyDevice *obj);
- * } MyDeviceClass;
- *
- * static const TypeInfo my_device_info = {
- * .name = TYPE_MY_DEVICE,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDevice),
- * .abstract = true, // or set a default in my_device_class_init
- * .class_size = sizeof(MyDeviceClass),
- * };
- *
- * void my_device_frobnicate(MyDevice *obj)
- * {
- * MyDeviceClass *klass = MY_DEVICE_GET_CLASS(obj);
- *
- * klass->frobnicate(obj);
- * }
- * </programlisting>
- * </example>
- *
- * # Interfaces #
- *
- * Interfaces allow a limited form of multiple inheritance. Instances are
- * similar to normal types except for the fact that are only defined by
- * their classes and never carry any state. You can dynamically cast an object
- * to one of its #Interface types and vice versa.
- *
- * # Methods #
- *
- * A <emphasis>method</emphasis> is a function within the namespace scope of
- * a class. It usually operates on the object instance by passing it as a
- * strongly-typed first argument.
- * If it does not operate on an object instance, it is dubbed
- * <emphasis>class method</emphasis>.
- *
- * Methods cannot be overloaded. That is, the #ObjectClass and method name
- * uniquely identity the function to be called; the signature does not vary
- * except for trailing varargs.
- *
- * Methods are always <emphasis>virtual</emphasis>. Overriding a method in
- * #TypeInfo.class_init of a subclass leads to any user of the class obtained
- * via OBJECT_GET_CLASS() accessing the overridden function.
- * The original function is not automatically invoked. It is the responsibility
- * of the overriding class to determine whether and when to invoke the method
- * being overridden.
- *
- * To invoke the method being overridden, the preferred solution is to store
- * the original value in the overriding class before overriding the method.
- * This corresponds to |[ {super,base}.method(...) ]| in Java and C#
- * respectively; this frees the overriding class from hardcoding its parent
- * class, which someone might choose to change at some point.
- *
- * <example>
- * <title>Overriding a virtual method</title>
- * <programlisting>
- * typedef struct MyState MyState;
- *
- * typedef void (*MyDoSomething)(MyState *obj);
- *
- * typedef struct MyClass {
- * ObjectClass parent_class;
- *
- * MyDoSomething do_something;
- * } MyClass;
- *
- * static void my_do_something(MyState *obj)
- * {
- * // do something
- * }
- *
- * static void my_class_init(ObjectClass *oc, void *data)
- * {
- * MyClass *mc = MY_CLASS(oc);
- *
- * mc->do_something = my_do_something;
- * }
- *
- * static const TypeInfo my_type_info = {
- * .name = TYPE_MY,
- * .parent = TYPE_OBJECT,
- * .instance_size = sizeof(MyState),
- * .class_size = sizeof(MyClass),
- * .class_init = my_class_init,
- * };
- *
- * typedef struct DerivedClass {
- * MyClass parent_class;
- *
- * MyDoSomething parent_do_something;
- * } DerivedClass;
- *
- * static void derived_do_something(MyState *obj)
- * {
- * DerivedClass *dc = DERIVED_GET_CLASS(obj);
- *
- * // do something here
- * dc->parent_do_something(obj);
- * // do something else here
- * }
- *
- * static void derived_class_init(ObjectClass *oc, void *data)
- * {
- * MyClass *mc = MY_CLASS(oc);
- * DerivedClass *dc = DERIVED_CLASS(oc);
- *
- * dc->parent_do_something = mc->do_something;
- * mc->do_something = derived_do_something;
- * }
- *
- * static const TypeInfo derived_type_info = {
- * .name = TYPE_DERIVED,
- * .parent = TYPE_MY,
- * .class_size = sizeof(DerivedClass),
- * .class_init = derived_class_init,
- * };
- * </programlisting>
- * </example>
- *
- * Alternatively, object_class_by_name() can be used to obtain the class and
- * its non-overridden methods for a specific type. This would correspond to
- * |[ MyClass::method(...) ]| in C++.
- *
- * The first example of such a QOM method was #CPUClass.reset,
- * another example is #DeviceClass.realize.
- */
-
+typedef struct ObjectProperty ObjectProperty;
/**
- * ObjectPropertyAccessor:
+ * typedef ObjectPropertyAccessor:
* @obj: the object that owns the property
* @v: the visitor that contains the property data
* @name: the name of the property
@@ -320,7 +46,7 @@ typedef void (ObjectPropertyAccessor)(Object *obj,
Error **errp);
/**
- * ObjectPropertyResolve:
+ * typedef ObjectPropertyResolve:
* @obj: the object that owns the property
* @opaque: the opaque registered with the property
* @part: the name of the property
@@ -339,7 +65,7 @@ typedef Object *(ObjectPropertyResolve)(Object *obj,
const char *part);
/**
- * ObjectPropertyRelease:
+ * typedef ObjectPropertyRelease:
* @obj: the object that owns the property
* @name: the name of the property
* @opaque: the opaque registered with the property
@@ -350,20 +76,31 @@ typedef void (ObjectPropertyRelease)(Object *obj,
const char *name,
void *opaque);
-typedef struct ObjectProperty
+/**
+ * typedef ObjectPropertyInit:
+ * @obj: the object that owns the property
+ * @prop: the property to set
+ *
+ * Called when a property is initialized.
+ */
+typedef void (ObjectPropertyInit)(Object *obj, ObjectProperty *prop);
+
+struct ObjectProperty
{
- gchar *name;
- gchar *type;
- gchar *description;
+ char *name;
+ char *type;
+ char *description;
ObjectPropertyAccessor *get;
ObjectPropertyAccessor *set;
ObjectPropertyResolve *resolve;
ObjectPropertyRelease *release;
+ ObjectPropertyInit *init;
void *opaque;
-} ObjectProperty;
+ QObject *defval;
+};
/**
- * ObjectUnparent:
+ * typedef ObjectUnparent:
* @obj: the object that is being removed from the composition tree
*
* Called when an object is being removed from the QOM composition tree.
@@ -372,7 +109,7 @@ typedef struct ObjectProperty
typedef void (ObjectUnparent)(Object *obj);
/**
- * ObjectFree:
+ * typedef ObjectFree:
* @obj: the object being freed
*
* Called when an object's last reference is removed.
@@ -382,14 +119,14 @@ typedef void (ObjectFree)(void *obj);
#define OBJECT_CLASS_CAST_CACHE 4
/**
- * ObjectClass:
+ * struct ObjectClass:
*
* The base for all classes. The only thing that #ObjectClass contains is an
* integer type handle.
*/
struct ObjectClass
{
- /*< private >*/
+ /* private: */
Type type;
GSList *interfaces;
@@ -402,7 +139,7 @@ struct ObjectClass
};
/**
- * Object:
+ * struct Object:
*
* The base for all objects. The first member of this object is a pointer to
* a #ObjectClass. Since C guarantees that the first member of a structure
@@ -415,7 +152,7 @@ struct ObjectClass
*/
struct Object
{
- /*< private >*/
+ /* private: */
ObjectClass *class;
ObjectFree *free;
GHashTable *properties;
@@ -424,12 +161,285 @@ struct Object
};
/**
- * TypeInfo:
+ * DECLARE_INSTANCE_CHECKER:
+ * @InstanceType: instance struct name
+ * @OBJ_NAME: the object name in uppercase with underscore separators
+ * @TYPENAME: type name
+ *
+ * Direct usage of this macro should be avoided, and the complete
+ * OBJECT_DECLARE_TYPE macro is recommended instead.
+ *
+ * This macro will provide the instance type cast functions for a
+ * QOM type.
+ */
+#define DECLARE_INSTANCE_CHECKER(InstanceType, OBJ_NAME, TYPENAME) \
+ static inline G_GNUC_UNUSED InstanceType * \
+ OBJ_NAME(const void *obj) \
+ { return OBJECT_CHECK(InstanceType, obj, TYPENAME); }
+
+/**
+ * DECLARE_CLASS_CHECKERS:
+ * @ClassType: class struct name
+ * @OBJ_NAME: the object name in uppercase with underscore separators
+ * @TYPENAME: type name
+ *
+ * Direct usage of this macro should be avoided, and the complete
+ * OBJECT_DECLARE_TYPE macro is recommended instead.
+ *
+ * This macro will provide the class type cast functions for a
+ * QOM type.
+ */
+#define DECLARE_CLASS_CHECKERS(ClassType, OBJ_NAME, TYPENAME) \
+ static inline G_GNUC_UNUSED ClassType * \
+ OBJ_NAME##_GET_CLASS(const void *obj) \
+ { return OBJECT_GET_CLASS(ClassType, obj, TYPENAME); } \
+ \
+ static inline G_GNUC_UNUSED ClassType * \
+ OBJ_NAME##_CLASS(const void *klass) \
+ { return OBJECT_CLASS_CHECK(ClassType, klass, TYPENAME); }
+
+/**
+ * DECLARE_OBJ_CHECKERS:
+ * @InstanceType: instance struct name
+ * @ClassType: class struct name
+ * @OBJ_NAME: the object name in uppercase with underscore separators
+ * @TYPENAME: type name
+ *
+ * Direct usage of this macro should be avoided, and the complete
+ * OBJECT_DECLARE_TYPE macro is recommended instead.
+ *
+ * This macro will provide the three standard type cast functions for a
+ * QOM type.
+ */
+#define DECLARE_OBJ_CHECKERS(InstanceType, ClassType, OBJ_NAME, TYPENAME) \
+ DECLARE_INSTANCE_CHECKER(InstanceType, OBJ_NAME, TYPENAME) \
+ \
+ DECLARE_CLASS_CHECKERS(ClassType, OBJ_NAME, TYPENAME)
+
+/**
+ * OBJECT_DECLARE_TYPE:
+ * @InstanceType: instance struct name
+ * @ClassType: class struct name
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ *
+ * This macro is typically used in a header file, and will:
+ *
+ * - create the typedefs for the object and class structs
+ * - register the type for use with g_autoptr
+ * - provide three standard type cast functions
+ *
+ * The object struct and class struct need to be declared manually.
+ */
+#define OBJECT_DECLARE_TYPE(InstanceType, ClassType, MODULE_OBJ_NAME) \
+ typedef struct InstanceType InstanceType; \
+ typedef struct ClassType ClassType; \
+ \
+ G_DEFINE_AUTOPTR_CLEANUP_FUNC(InstanceType, object_unref) \
+ \
+ DECLARE_OBJ_CHECKERS(InstanceType, ClassType, \
+ MODULE_OBJ_NAME, TYPE_##MODULE_OBJ_NAME)
+
+/**
+ * OBJECT_DECLARE_SIMPLE_TYPE:
+ * @InstanceType: instance struct name
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ *
+ * This does the same as OBJECT_DECLARE_TYPE(), but with no class struct
+ * declared.
+ *
+ * This macro should be used unless the class struct needs to have
+ * virtual methods declared.
+ */
+#define OBJECT_DECLARE_SIMPLE_TYPE(InstanceType, MODULE_OBJ_NAME) \
+ typedef struct InstanceType InstanceType; \
+ \
+ G_DEFINE_AUTOPTR_CLEANUP_FUNC(InstanceType, object_unref) \
+ \
+ DECLARE_INSTANCE_CHECKER(InstanceType, MODULE_OBJ_NAME, TYPE_##MODULE_OBJ_NAME)
+
+
+/**
+ * DO_OBJECT_DEFINE_TYPE_EXTENDED:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ * @ABSTRACT: boolean flag to indicate whether the object can be instantiated
+ * @CLASS_SIZE: size of the type's class
+ * @...: list of initializers for "InterfaceInfo" to declare implemented interfaces
+ *
+ * This is the base macro used to implement all the OBJECT_DEFINE_*
+ * macros. It should never be used directly in a source file.
+ */
+#define DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, \
+ PARENT_MODULE_OBJ_NAME, \
+ ABSTRACT, CLASS_SIZE, ...) \
+ static void \
+ module_obj_name##_finalize(Object *obj); \
+ static void \
+ module_obj_name##_class_init(ObjectClass *oc, void *data); \
+ static void \
+ module_obj_name##_init(Object *obj); \
+ \
+ static const TypeInfo module_obj_name##_info = { \
+ .parent = TYPE_##PARENT_MODULE_OBJ_NAME, \
+ .name = TYPE_##MODULE_OBJ_NAME, \
+ .instance_size = sizeof(ModuleObjName), \
+ .instance_align = __alignof__(ModuleObjName), \
+ .instance_init = module_obj_name##_init, \
+ .instance_finalize = module_obj_name##_finalize, \
+ .class_size = CLASS_SIZE, \
+ .class_init = module_obj_name##_class_init, \
+ .abstract = ABSTRACT, \
+ .interfaces = (InterfaceInfo[]) { __VA_ARGS__ } , \
+ }; \
+ \
+ static void \
+ module_obj_name##_register_types(void) \
+ { \
+ type_register_static(&module_obj_name##_info); \
+ } \
+ type_init(module_obj_name##_register_types);
+
+/**
+ * OBJECT_DEFINE_TYPE_EXTENDED:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ * @ABSTRACT: boolean flag to indicate whether the object can be instantiated
+ * @...: list of initializers for "InterfaceInfo" to declare implemented interfaces
+ *
+ * This macro is typically used in a source file, and will:
+ *
+ * - declare prototypes for _finalize, _class_init and _init methods
+ * - declare the TypeInfo struct instance
+ * - provide the constructor to register the type
+ *
+ * After using this macro, implementations of the _finalize, _class_init,
+ * and _init methods need to be written. Any of these can be zero-line
+ * no-op impls if no special logic is required for a given type.
+ *
+ * This macro should rarely be used, instead one of the more specialized
+ * macros is usually a better choice.
+ */
+#define OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ ABSTRACT, ...) \
+ DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ ABSTRACT, sizeof(ModuleObjName##Class), \
+ __VA_ARGS__)
+
+/**
+ * OBJECT_DEFINE_TYPE:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ *
+ * This is a specialization of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable
+ * for the common case of a non-abstract type, without any interfaces.
+ */
+#define OBJECT_DEFINE_TYPE(ModuleObjName, module_obj_name, MODULE_OBJ_NAME, \
+ PARENT_MODULE_OBJ_NAME) \
+ OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ false, { NULL })
+
+/**
+ * OBJECT_DEFINE_TYPE_WITH_INTERFACES:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ * @...: list of initializers for "InterfaceInfo" to declare implemented interfaces
+ *
+ * This is a specialization of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable
+ * for the common case of a non-abstract type, with one or more implemented
+ * interfaces.
+ *
+ * Note when passing the list of interfaces, be sure to include the final
+ * NULL entry, e.g. { TYPE_USER_CREATABLE }, { NULL }
+ */
+#define OBJECT_DEFINE_TYPE_WITH_INTERFACES(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, \
+ PARENT_MODULE_OBJ_NAME, ...) \
+ OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ false, __VA_ARGS__)
+
+/**
+ * OBJECT_DEFINE_ABSTRACT_TYPE:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ *
+ * This is a specialization of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable
+ * for defining an abstract type, without any interfaces.
+ */
+#define OBJECT_DEFINE_ABSTRACT_TYPE(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME) \
+ OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ true, { NULL })
+
+/**
+ * OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ *
+ * This is a variant of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable for
+ * the case of a non-abstract type, with interfaces, and with no requirement
+ * for a class struct.
+ */
+#define OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(ModuleObjName, \
+ module_obj_name, \
+ MODULE_OBJ_NAME, \
+ PARENT_MODULE_OBJ_NAME, ...) \
+ DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ false, 0, __VA_ARGS__)
+
+/**
+ * OBJECT_DEFINE_SIMPLE_TYPE:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ *
+ * This is a variant of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable for
+ * the common case of a non-abstract type, without any interfaces, and with
+ * no requirement for a class struct. If you declared your type with
+ * OBJECT_DECLARE_SIMPLE_TYPE then this is probably the right choice for
+ * defining it.
+ */
+#define OBJECT_DEFINE_SIMPLE_TYPE(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME) \
+ OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, { NULL })
+
+/**
+ * struct TypeInfo:
* @name: The name of the type.
* @parent: The name of the parent type.
* @instance_size: The size of the object (derivative of #Object). If
* @instance_size is 0, then the size of the object will be the size of the
* parent object.
+ * @instance_align: The required alignment of the object. If @instance_align
+ * is 0, then normal malloc alignment is sufficient; if non-zero, then we
+ * must use qemu_memalign for allocation.
* @instance_init: This function is called to initialize an object. The parent
* class will have already been initialized so the type is only responsible
* for initializing its own members.
@@ -467,6 +477,7 @@ struct TypeInfo
const char *parent;
size_t instance_size;
+ size_t instance_align;
void (*instance_init)(Object *obj);
void (*instance_post_init)(Object *obj);
void (*instance_finalize)(Object *obj);
@@ -546,7 +557,7 @@ struct TypeInfo
OBJECT_CLASS_CHECK(class, object_get_class(OBJECT(obj)), name)
/**
- * InterfaceInfo:
+ * struct InterfaceInfo:
* @type: The name of the interface.
*
* The information associated with an interface.
@@ -556,7 +567,7 @@ struct InterfaceInfo {
};
/**
- * InterfaceClass:
+ * struct InterfaceClass:
* @parent_class: the base class
*
* The class for all interfaces. Subclasses of this class should only add
@@ -565,7 +576,7 @@ struct InterfaceInfo {
struct InterfaceClass
{
ObjectClass parent_class;
- /*< private >*/
+ /* private: */
ObjectClass *concrete_class;
Type interface_type;
};
@@ -593,6 +604,18 @@ struct InterfaceClass
__FILE__, __LINE__, __func__))
/**
+ * object_new_with_class:
+ * @klass: The class to instantiate.
+ *
+ * This function will initialize a new object using heap allocated memory.
+ * The returned object has a reference count of 1, and will be freed when
+ * the last reference is dropped.
+ *
+ * Returns: The newly allocated and instantiated object.
+ */
+Object *object_new_with_class(ObjectClass *klass);
+
+/**
* object_new:
* @typename: The name of the type of the object to instantiate.
*
@@ -625,28 +648,25 @@ Object *object_new(const char *typename);
* object will be marked complete once all the properties have been
* processed.
*
- * <example>
- * <title>Creating an object with properties</title>
- * <programlisting>
- * Error *err = NULL;
- * Object *obj;
- *
- * obj = object_new_with_props(TYPE_MEMORY_BACKEND_FILE,
- * object_get_objects_root(),
- * "hostmem0",
- * &err,
- * "share", "yes",
- * "mem-path", "/dev/shm/somefile",
- * "prealloc", "yes",
- * "size", "1048576",
- * NULL);
- *
- * if (!obj) {
- * g_printerr("Cannot create memory backend: %s\n",
- * error_get_pretty(err));
- * }
- * </programlisting>
- * </example>
+ * .. code-block:: c
+ * :caption: Creating an object with properties
+ *
+ * Error *err = NULL;
+ * Object *obj;
+ *
+ * obj = object_new_with_props(TYPE_MEMORY_BACKEND_FILE,
+ * object_get_objects_root(),
+ * "hostmem0",
+ * &err,
+ * "share", "yes",
+ * "mem-path", "/dev/shm/somefile",
+ * "prealloc", "yes",
+ * "size", "1048576",
+ * NULL);
+ *
+ * if (!obj) {
+ * error_reportf_err(err, "Cannot create memory backend: ");
+ * }
*
* The returned object will have one stable reference maintained
* for as long as it is present in the object hierarchy.
@@ -657,7 +677,7 @@ Object *object_new_with_props(const char *typename,
Object *parent,
const char *id,
Error **errp,
- ...) QEMU_SENTINEL;
+ ...) G_GNUC_NULL_TERMINATED;
/**
* object_new_with_propv:
@@ -675,8 +695,13 @@ Object *object_new_with_propv(const char *typename,
Error **errp,
va_list vargs);
-void object_apply_global_props(Object *obj, const GPtrArray *props,
+bool object_apply_global_props(Object *obj, const GPtrArray *props,
Error **errp);
+void object_set_machine_compat_props(GPtrArray *compat_props);
+void object_set_accelerator_compat_props(GPtrArray *compat_props);
+void object_register_sugar_prop(const char *driver, const char *prop,
+ const char *value, bool optional);
+void object_apply_compat_props(Object *obj);
/**
* object_set_props:
@@ -691,35 +716,28 @@ void object_apply_global_props(Object *obj, const GPtrArray *props,
* strings. The propname of %NULL indicates the end of the property
* list.
*
- * <example>
- * <title>Update an object's properties</title>
- * <programlisting>
- * Error *err = NULL;
- * Object *obj = ...get / create object...;
- *
- * obj = object_set_props(obj,
- * &err,
- * "share", "yes",
- * "mem-path", "/dev/shm/somefile",
- * "prealloc", "yes",
- * "size", "1048576",
- * NULL);
- *
- * if (!obj) {
- * g_printerr("Cannot set properties: %s\n",
- * error_get_pretty(err));
- * }
- * </programlisting>
- * </example>
+ * .. code-block:: c
+ * :caption: Update an object's properties
+ *
+ * Error *err = NULL;
+ * Object *obj = ...get / create object...;
+ *
+ * if (!object_set_props(obj,
+ * &err,
+ * "share", "yes",
+ * "mem-path", "/dev/shm/somefile",
+ * "prealloc", "yes",
+ * "size", "1048576",
+ * NULL)) {
+ * error_reportf_err(err, "Cannot set properties: ");
+ * }
*
* The returned object will have one stable reference maintained
* for as long as it is present in the object hierarchy.
*
- * Returns: -1 on error, 0 on success
+ * Returns: %true on success, %false on error.
*/
-int object_set_props(Object *obj,
- Error **errp,
- ...) QEMU_SENTINEL;
+bool object_set_props(Object *obj, Error **errp, ...) G_GNUC_NULL_TERMINATED;
/**
* object_set_propv:
@@ -729,11 +747,9 @@ int object_set_props(Object *obj,
*
* See object_set_props() for documentation.
*
- * Returns: -1 on error, 0 on success
+ * Returns: %true on success, %false on error.
*/
-int object_set_propv(Object *obj,
- Error **errp,
- va_list vargs);
+bool object_set_propv(Object *obj, Error **errp, va_list vargs);
/**
* object_initialize:
@@ -748,7 +764,7 @@ int object_set_propv(Object *obj,
void object_initialize(void *obj, size_t size, const char *typename);
/**
- * object_initialize_child:
+ * object_initialize_child_with_props:
* @parentobj: The parent object to add a property to
* @propname: The name of the property
* @childobj: A pointer to the memory to be used for the object.
@@ -767,13 +783,16 @@ void object_initialize(void *obj, size_t size, const char *typename);
* strings. The propname of %NULL indicates the end of the property list.
* If the object implements the user creatable interface, the object will
* be marked complete once all the properties have been processed.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_initialize_child(Object *parentobj, const char *propname,
+bool object_initialize_child_with_props(Object *parentobj,
+ const char *propname,
void *childobj, size_t size, const char *type,
- Error **errp, ...) QEMU_SENTINEL;
+ Error **errp, ...) G_GNUC_NULL_TERMINATED;
/**
- * object_initialize_childv:
+ * object_initialize_child_with_propsv:
* @parentobj: The parent object to add a property to
* @propname: The name of the property
* @childobj: A pointer to the memory to be used for the object.
@@ -783,12 +802,36 @@ void object_initialize_child(Object *parentobj, const char *propname,
* @vargs: list of property names and values
*
* See object_initialize_child() for documentation.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_initialize_childv(Object *parentobj, const char *propname,
+bool object_initialize_child_with_propsv(Object *parentobj,
+ const char *propname,
void *childobj, size_t size, const char *type,
Error **errp, va_list vargs);
/**
+ * object_initialize_child:
+ * @parent: The parent object to add a property to
+ * @propname: The name of the property
+ * @child: A precisely typed pointer to the memory to be used for the
+ * object.
+ * @type: The name of the type of the object to instantiate.
+ *
+ * This is like::
+ *
+ * object_initialize_child_with_props(parent, propname,
+ * child, sizeof(*child), type,
+ * &error_abort, NULL)
+ */
+#define object_initialize_child(parent, propname, child, type) \
+ object_initialize_child_internal((parent), (propname), \
+ (child), sizeof(*(child)), (type))
+void object_initialize_child_internal(Object *parent, const char *propname,
+ void *child, size_t size,
+ const char *type);
+
+/**
* object_dynamic_cast:
* @obj: The object to cast.
* @typename: The @typename to cast to.
@@ -802,6 +845,11 @@ Object *object_dynamic_cast(Object *obj, const char *typename);
/**
* object_dynamic_cast_assert:
+ * @obj: The object to cast.
+ * @typename: The @typename to cast to.
+ * @file: Source code file where function was called
+ * @line: Source code line where function was called
+ * @func: Name of function where this function was called
*
* See object_dynamic_cast() for a description of the parameters of this
* function. The only difference in behavior is that this function asserts
@@ -875,15 +923,41 @@ static void do_qemu_init_ ## type_array(void) \
type_init(do_qemu_init_ ## type_array)
/**
+ * type_print_class_properties:
+ * @type: a QOM class name
+ *
+ * Print the object's class properties to stdout or the monitor.
+ * Return whether an object was found.
+ */
+bool type_print_class_properties(const char *type);
+
+/**
+ * object_set_properties_from_keyval:
+ * @obj: a QOM object
+ * @qdict: a dictionary with the properties to be set
+ * @from_json: true if leaf values of @qdict are typed, false if they
+ * are strings
+ * @errp: pointer to error object
+ *
+ * For each key in the dictionary, parse the value string if needed,
+ * then set the corresponding property in @obj.
+ */
+void object_set_properties_from_keyval(Object *obj, const QDict *qdict,
+ bool from_json, Error **errp);
+
+/**
* object_class_dynamic_cast_assert:
* @klass: The #ObjectClass to attempt to cast.
* @typename: The QOM typename of the class to cast to.
+ * @file: Source code file where function was called
+ * @line: Source code line where function was called
+ * @func: Name of function where this function was called
*
* See object_class_dynamic_cast() for a description of the parameters
* of this function. The only difference in behavior is that this function
* asserts instead of returning #NULL on failure if QOM cast debugging is
* enabled. This function is not meant to be called directly, but only through
- * the wrapper macros OBJECT_CLASS_CHECK and INTERFACE_CHECK.
+ * the wrapper macro OBJECT_CLASS_CHECK.
*/
ObjectClass *object_class_dynamic_cast_assert(ObjectClass *klass,
const char *typename,
@@ -939,6 +1013,18 @@ bool object_class_is_abstract(ObjectClass *klass);
*/
ObjectClass *object_class_by_name(const char *typename);
+/**
+ * module_object_class_by_name:
+ * @typename: The QOM typename to obtain the class for.
+ *
+ * For objects which might be provided by a module. Behaves like
+ * object_class_by_name, but additionally tries to load the module
+ * needed in case the class is not available.
+ *
+ * Returns: The class for @typename or %NULL if not found.
+ */
+ObjectClass *module_object_class_by_name(const char *typename);
+
void object_class_foreach(void (*fn)(ObjectClass *klass, void *opaque),
const char *implements_type, bool include_abstract,
void *opaque);
@@ -970,8 +1056,9 @@ GSList *object_class_get_list_sorted(const char *implements_type,
*
* Increase the reference count of a object. A object cannot be freed as long
* as its reference count is greater than zero.
+ * Returns: @obj
*/
-void object_ref(Object *obj);
+Object *object_ref(void *obj);
/**
* object_unref:
@@ -980,10 +1067,10 @@ void object_ref(Object *obj);
* Decrease the reference count of a object. A object cannot be freed as long
* as its reference count is greater than zero.
*/
-void object_unref(Object *obj);
+void object_unref(void *obj);
/**
- * object_property_add:
+ * object_property_try_add:
* @obj: the object to add a property to
* @name: the name of the property. This can contain any character except for
* a forward slash. In general, you should use hyphens '-' instead of
@@ -1000,39 +1087,150 @@ void object_unref(Object *obj);
* meant to allow a property to free its opaque upon object
* destruction. This may be NULL.
* @opaque: an opaque pointer to pass to the callbacks for the property
- * @errp: returns an error if this function fails
+ * @errp: pointer to error object
*
* Returns: The #ObjectProperty; this can be used to set the @resolve
* callback for child and link properties.
*/
+ObjectProperty *object_property_try_add(Object *obj, const char *name,
+ const char *type,
+ ObjectPropertyAccessor *get,
+ ObjectPropertyAccessor *set,
+ ObjectPropertyRelease *release,
+ void *opaque, Error **errp);
+
+/**
+ * object_property_add:
+ * Same as object_property_try_add() with @errp hardcoded to
+ * &error_abort.
+ *
+ * @obj: the object to add a property to
+ * @name: the name of the property. This can contain any character except for
+ * a forward slash. In general, you should use hyphens '-' instead of
+ * underscores '_' when naming properties.
+ * @type: the type name of the property. This namespace is pretty loosely
+ * defined. Sub namespaces are constructed by using a prefix and then
+ * to angle brackets. For instance, the type 'virtio-net-pci' in the
+ * 'link' namespace would be 'link<virtio-net-pci>'.
+ * @get: The getter to be called to read a property. If this is NULL, then
+ * the property cannot be read.
+ * @set: the setter to be called to write a property. If this is NULL,
+ * then the property cannot be written.
+ * @release: called when the property is removed from the object. This is
+ * meant to allow a property to free its opaque upon object
+ * destruction. This may be NULL.
+ * @opaque: an opaque pointer to pass to the callbacks for the property
+ */
ObjectProperty *object_property_add(Object *obj, const char *name,
const char *type,
ObjectPropertyAccessor *get,
ObjectPropertyAccessor *set,
ObjectPropertyRelease *release,
- void *opaque, Error **errp);
+ void *opaque);
-void object_property_del(Object *obj, const char *name, Error **errp);
+void object_property_del(Object *obj, const char *name);
ObjectProperty *object_class_property_add(ObjectClass *klass, const char *name,
const char *type,
ObjectPropertyAccessor *get,
ObjectPropertyAccessor *set,
ObjectPropertyRelease *release,
- void *opaque, Error **errp);
+ void *opaque);
+
+/**
+ * object_property_set_default_bool:
+ * @prop: the property to set
+ * @value: the value to be written to the property
+ *
+ * Set the property default value.
+ */
+void object_property_set_default_bool(ObjectProperty *prop, bool value);
+
+/**
+ * object_property_set_default_str:
+ * @prop: the property to set
+ * @value: the value to be written to the property
+ *
+ * Set the property default value.
+ */
+void object_property_set_default_str(ObjectProperty *prop, const char *value);
+
+/**
+ * object_property_set_default_list:
+ * @prop: the property to set
+ *
+ * Set the property default value to be an empty list.
+ */
+void object_property_set_default_list(ObjectProperty *prop);
+
+/**
+ * object_property_set_default_int:
+ * @prop: the property to set
+ * @value: the value to be written to the property
+ *
+ * Set the property default value.
+ */
+void object_property_set_default_int(ObjectProperty *prop, int64_t value);
+
+/**
+ * object_property_set_default_uint:
+ * @prop: the property to set
+ * @value: the value to be written to the property
+ *
+ * Set the property default value.
+ */
+void object_property_set_default_uint(ObjectProperty *prop, uint64_t value);
/**
* object_property_find:
* @obj: the object
* @name: the name of the property
+ *
+ * Look up a property for an object.
+ *
+ * Return its #ObjectProperty if found, or NULL.
+ */
+ObjectProperty *object_property_find(Object *obj, const char *name);
+
+/**
+ * object_property_find_err:
+ * @obj: the object
+ * @name: the name of the property
* @errp: returns an error if this function fails
*
- * Look up a property for an object and return its #ObjectProperty if found.
+ * Look up a property for an object.
+ *
+ * Return its #ObjectProperty if found, or NULL.
*/
-ObjectProperty *object_property_find(Object *obj, const char *name,
- Error **errp);
-ObjectProperty *object_class_property_find(ObjectClass *klass, const char *name,
- Error **errp);
+ObjectProperty *object_property_find_err(Object *obj,
+ const char *name,
+ Error **errp);
+
+/**
+ * object_class_property_find:
+ * @klass: the object class
+ * @name: the name of the property
+ *
+ * Look up a property for an object class.
+ *
+ * Return its #ObjectProperty if found, or NULL.
+ */
+ObjectProperty *object_class_property_find(ObjectClass *klass,
+ const char *name);
+
+/**
+ * object_class_property_find_err:
+ * @klass: the object class
+ * @name: the name of the property
+ * @errp: returns an error if this function fails
+ *
+ * Look up a property for an object class.
+ *
+ * Return its #ObjectProperty if found, or NULL.
+ */
+ObjectProperty *object_class_property_find_err(ObjectClass *klass,
+ const char *name,
+ Error **errp);
typedef struct ObjectPropertyIterator {
ObjectClass *nextclass;
@@ -1041,6 +1239,7 @@ typedef struct ObjectPropertyIterator {
/**
* object_property_iter_init:
+ * @iter: the iterator instance
* @obj: the object
*
* Initializes an iterator for traversing all properties
@@ -1051,24 +1250,23 @@ typedef struct ObjectPropertyIterator {
*
* Typical usage pattern would be
*
- * <example>
- * <title>Using object property iterators</title>
- * <programlisting>
- * ObjectProperty *prop;
- * ObjectPropertyIterator iter;
+ * .. code-block:: c
+ * :caption: Using object property iterators
*
- * object_property_iter_init(&iter, obj);
- * while ((prop = object_property_iter_next(&iter))) {
- * ... do something with prop ...
- * }
- * </programlisting>
- * </example>
+ * ObjectProperty *prop;
+ * ObjectPropertyIterator iter;
+ *
+ * object_property_iter_init(&iter, obj);
+ * while ((prop = object_property_iter_next(&iter))) {
+ * ... do something with prop ...
+ * }
*/
void object_property_iter_init(ObjectPropertyIterator *iter,
Object *obj);
/**
* object_class_property_iter_init:
+ * @iter: the iterator instance
* @klass: the class
*
* Initializes an iterator for traversing all properties
@@ -1102,26 +1300,31 @@ void object_unparent(Object *obj);
/**
* object_property_get:
* @obj: the object
+ * @name: the name of the property
* @v: the visitor that will receive the property value. This should be an
* Output visitor and the data will be written with @name as the name.
- * @name: the name of the property
* @errp: returns an error if this function fails
*
* Reads a property from a object.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_property_get(Object *obj, Visitor *v, const char *name,
+bool object_property_get(Object *obj, const char *name, Visitor *v,
Error **errp);
/**
* object_property_set_str:
- * @value: the value to be written to the property
+ * @obj: the object
* @name: the name of the property
+ * @value: the value to be written to the property
* @errp: returns an error if this function fails
*
* Writes a string value to a property.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_property_set_str(Object *obj, const char *value,
- const char *name, Error **errp);
+bool object_property_set_str(Object *obj, const char *name,
+ const char *value, Error **errp);
/**
* object_property_get_str:
@@ -1138,19 +1341,21 @@ char *object_property_get_str(Object *obj, const char *name,
/**
* object_property_set_link:
- * @value: the value to be written to the property
+ * @obj: the object
* @name: the name of the property
+ * @value: the value to be written to the property
* @errp: returns an error if this function fails
*
* Writes an object's canonical path to a property.
*
* If the link property was created with
- * <code>OBJ_PROP_LINK_STRONG</code> bit, the old target object is
+ * %OBJ_PROP_LINK_STRONG bit, the old target object is
* unreferenced, and a reference is added to the new target object.
*
+ * Returns: %true on success, %false on failure.
*/
-void object_property_set_link(Object *obj, Object *value,
- const char *name, Error **errp);
+bool object_property_set_link(Object *obj, const char *name,
+ Object *value, Error **errp);
/**
* object_property_get_link:
@@ -1167,14 +1372,17 @@ Object *object_property_get_link(Object *obj, const char *name,
/**
* object_property_set_bool:
- * @value: the value to be written to the property
+ * @obj: the object
* @name: the name of the property
+ * @value: the value to be written to the property
* @errp: returns an error if this function fails
*
* Writes a bool value to a property.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_property_set_bool(Object *obj, bool value,
- const char *name, Error **errp);
+bool object_property_set_bool(Object *obj, const char *name,
+ bool value, Error **errp);
/**
* object_property_get_bool:
@@ -1182,7 +1390,7 @@ void object_property_set_bool(Object *obj, bool value,
* @name: the name of the property
* @errp: returns an error if this function fails
*
- * Returns: the value of the property, converted to a boolean, or NULL if
+ * Returns: the value of the property, converted to a boolean, or false if
* an error occurs (including when the property value is not a bool).
*/
bool object_property_get_bool(Object *obj, const char *name,
@@ -1190,14 +1398,17 @@ bool object_property_get_bool(Object *obj, const char *name,
/**
* object_property_set_int:
- * @value: the value to be written to the property
+ * @obj: the object
* @name: the name of the property
+ * @value: the value to be written to the property
* @errp: returns an error if this function fails
*
* Writes an integer value to a property.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_property_set_int(Object *obj, int64_t value,
- const char *name, Error **errp);
+bool object_property_set_int(Object *obj, const char *name,
+ int64_t value, Error **errp);
/**
* object_property_get_int:
@@ -1205,7 +1416,7 @@ void object_property_set_int(Object *obj, int64_t value,
* @name: the name of the property
* @errp: returns an error if this function fails
*
- * Returns: the value of the property, converted to an integer, or negative if
+ * Returns: the value of the property, converted to an integer, or -1 if
* an error occurs (including when the property value is not an integer).
*/
int64_t object_property_get_int(Object *obj, const char *name,
@@ -1213,14 +1424,17 @@ int64_t object_property_get_int(Object *obj, const char *name,
/**
* object_property_set_uint:
- * @value: the value to be written to the property
+ * @obj: the object
* @name: the name of the property
+ * @value: the value to be written to the property
* @errp: returns an error if this function fails
*
* Writes an unsigned integer value to a property.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_property_set_uint(Object *obj, uint64_t value,
- const char *name, Error **errp);
+bool object_property_set_uint(Object *obj, const char *name,
+ uint64_t value, Error **errp);
/**
* object_property_get_uint:
@@ -1241,52 +1455,42 @@ uint64_t object_property_get_uint(Object *obj, const char *name,
* @typename: the name of the enum data type
* @errp: returns an error if this function fails
*
- * Returns: the value of the property, converted to an integer, or
- * undefined if an error occurs (including when the property value is not
- * an enum).
+ * Returns: the value of the property, converted to an integer (which
+ * can't be negative), or -1 on error (including when the property
+ * value is not an enum).
*/
int object_property_get_enum(Object *obj, const char *name,
const char *typename, Error **errp);
/**
- * object_property_get_uint16List:
- * @obj: the object
- * @name: the name of the property
- * @list: the returned int list
- * @errp: returns an error if this function fails
- *
- * Returns: the value of the property, converted to integers, or
- * undefined if an error occurs (including when the property value is not
- * an list of integers).
- */
-void object_property_get_uint16List(Object *obj, const char *name,
- uint16List **list, Error **errp);
-
-/**
* object_property_set:
* @obj: the object
+ * @name: the name of the property
* @v: the visitor that will be used to write the property value. This should
* be an Input visitor and the data will be first read with @name as the
* name and then written as the property value.
- * @name: the name of the property
* @errp: returns an error if this function fails
*
* Writes a property to a object.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_property_set(Object *obj, Visitor *v, const char *name,
+bool object_property_set(Object *obj, const char *name, Visitor *v,
Error **errp);
/**
* object_property_parse:
* @obj: the object
- * @string: the string that will be used to parse the property value.
* @name: the name of the property
+ * @string: the string that will be used to parse the property value.
* @errp: returns an error if this function fails
*
* Parses a string and writes the result into a property of an object.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_property_parse(Object *obj, const char *string,
- const char *name, Error **errp);
+bool object_property_parse(Object *obj, const char *name,
+ const char *string, Error **errp);
/**
* object_property_print:
@@ -1344,20 +1548,23 @@ Object *object_get_internal_root(void);
/**
* object_get_canonical_path_component:
+ * @obj: the object
*
* Returns: The final component in the object's canonical path. The canonical
* path is the path within the composition tree starting from the root.
* %NULL if the object doesn't have a parent (and thus a canonical path).
*/
-gchar *object_get_canonical_path_component(Object *obj);
+const char *object_get_canonical_path_component(const Object *obj);
/**
* object_get_canonical_path:
+ * @obj: the object
*
- * Returns: The canonical path for a object. This is the path within the
- * composition tree starting from the root.
+ * Returns: The canonical path for a object, newly allocated. This is
+ * the path within the composition tree starting from the root. Use
+ * g_free() to free it.
*/
-gchar *object_get_canonical_path(Object *obj);
+char *object_get_canonical_path(const Object *obj);
/**
* object_resolve_path:
@@ -1406,6 +1613,31 @@ Object *object_resolve_path_type(const char *path, const char *typename,
bool *ambiguous);
/**
+ * object_resolve_type_unambiguous:
+ * @typename: the type to look for
+ * @errp: pointer to error object
+ *
+ * Return the only object in the QOM tree of type @typename.
+ * If no match or more than one match is found, an error is
+ * returned.
+ *
+ * Returns: The matched object or NULL on path lookup failure.
+ */
+Object *object_resolve_type_unambiguous(const char *typename, Error **errp);
+
+/**
+ * object_resolve_path_at:
+ * @parent: the object in which to resolve the path
+ * @path: the path to resolve
+ *
+ * This is like object_resolve_path(), except paths not starting with
+ * a slash are relative to @parent.
+ *
+ * Returns: The resolved object or NULL on path lookup failure.
+ */
+Object *object_resolve_path_at(Object *parent, const char *path);
+
+/**
* object_resolve_path_component:
* @parent: the object in which to resolve the path
* @part: the component to resolve.
@@ -1415,14 +1647,14 @@ Object *object_resolve_path_type(const char *path, const char *typename,
*
* Returns: The resolved object or NULL on path lookup failure.
*/
-Object *object_resolve_path_component(Object *parent, const gchar *part);
+Object *object_resolve_path_component(Object *parent, const char *part);
/**
- * object_property_add_child:
+ * object_property_try_add_child:
* @obj: the object to add a property to
* @name: the name of the property
* @child: the child object
- * @errp: if an error occurs, a pointer to an area to store the error
+ * @errp: pointer to error object
*
* Child properties form the composition tree. All objects need to be a child
* of another object. Objects can only be a child of one object.
@@ -1433,34 +1665,55 @@ Object *object_resolve_path_component(Object *parent, const gchar *part);
* The value of a child property as a C string will be the child object's
* canonical path. It can be retrieved using object_property_get_str().
* The child object itself can be retrieved using object_property_get_link().
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
+ */
+ObjectProperty *object_property_try_add_child(Object *obj, const char *name,
+ Object *child, Error **errp);
+
+/**
+ * object_property_add_child:
+ * @obj: the object to add a property to
+ * @name: the name of the property
+ * @child: the child object
+ *
+ * Same as object_property_try_add_child() with @errp hardcoded to
+ * &error_abort
*/
-void object_property_add_child(Object *obj, const char *name,
- Object *child, Error **errp);
+ObjectProperty *object_property_add_child(Object *obj, const char *name,
+ Object *child);
typedef enum {
/* Unref the link pointer when the property is deleted */
OBJ_PROP_LINK_STRONG = 0x1,
+
+ /* private */
+ OBJ_PROP_LINK_DIRECT = 0x2,
+ OBJ_PROP_LINK_CLASS = 0x4,
} ObjectPropertyLinkFlags;
/**
* object_property_allow_set_link:
+ * @obj: the object to add a property to
+ * @name: the name of the property
+ * @child: the child object
+ * @errp: pointer to error object
*
* The default implementation of the object_property_add_link() check()
* callback function. It allows the link property to be set and never returns
* an error.
*/
-void object_property_allow_set_link(const Object *, const char *,
- Object *, Error **);
+void object_property_allow_set_link(const Object *obj, const char *name,
+ Object *child, Error **errp);
/**
* object_property_add_link:
* @obj: the object to add a property to
* @name: the name of the property
* @type: the qobj type of the link
- * @child: a pointer to where the link object reference is stored
+ * @targetp: a pointer to where the link object reference is stored
* @check: callback to veto setting or NULL if the property is read-only
* @flags: additional options for the link
- * @errp: if an error occurs, a pointer to an area to store the error
*
* Links establish relationships between objects. Links are unidirectional
* although two links can be combined to form a bidirectional relationship
@@ -1468,25 +1721,33 @@ void object_property_allow_set_link(const Object *, const char *,
*
* Links form the graph in the object model.
*
- * The <code>@check()</code> callback is invoked when
+ * The @check() callback is invoked when
* object_property_set_link() is called and can raise an error to prevent the
- * link being set. If <code>@check</code> is NULL, the property is read-only
+ * link being set. If @check is NULL, the property is read-only
* and cannot be set.
*
* Ownership of the pointer that @child points to is transferred to the
- * link property. The reference count for <code>*@child</code> is
+ * link property. The reference count for *@child is
* managed by the property from after the function returns till the
* property is deleted with object_property_del(). If the
- * <code>@flags</code> <code>OBJ_PROP_LINK_STRONG</code> bit is set,
+ * @flags %OBJ_PROP_LINK_STRONG bit is set,
* the reference count is decremented when the property is deleted or
* modified.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_link(Object *obj, const char *name,
- const char *type, Object **child,
+ObjectProperty *object_property_add_link(Object *obj, const char *name,
+ const char *type, Object **targetp,
void (*check)(const Object *obj, const char *name,
Object *val, Error **errp),
- ObjectPropertyLinkFlags flags,
- Error **errp);
+ ObjectPropertyLinkFlags flags);
+
+ObjectProperty *object_class_property_add_link(ObjectClass *oc,
+ const char *name,
+ const char *type, ptrdiff_t offset,
+ void (*check)(const Object *obj, const char *name,
+ Object *val, Error **errp),
+ ObjectPropertyLinkFlags flags);
/**
* object_property_add_str:
@@ -1495,21 +1756,21 @@ void object_property_add_link(Object *obj, const char *name,
* @get: the getter or NULL if the property is write-only. This function must
* return a string to be freed by g_free().
* @set: the setter or NULL if the property is read-only
- * @errp: if an error occurs, a pointer to an area to store the error
*
* Add a string property using getters/setters. This function will add a
* property of type 'string'.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_str(Object *obj, const char *name,
+ObjectProperty *object_property_add_str(Object *obj, const char *name,
char *(*get)(Object *, Error **),
- void (*set)(Object *, const char *, Error **),
- Error **errp);
+ void (*set)(Object *, const char *, Error **));
-void object_class_property_add_str(ObjectClass *klass, const char *name,
+ObjectProperty *object_class_property_add_str(ObjectClass *klass,
+ const char *name,
char *(*get)(Object *, Error **),
void (*set)(Object *, const char *,
- Error **),
- Error **errp);
+ Error **));
/**
* object_property_add_bool:
@@ -1517,124 +1778,158 @@ void object_class_property_add_str(ObjectClass *klass, const char *name,
* @name: the name of the property
* @get: the getter or NULL if the property is write-only.
* @set: the setter or NULL if the property is read-only
- * @errp: if an error occurs, a pointer to an area to store the error
*
* Add a bool property using getters/setters. This function will add a
* property of type 'bool'.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_bool(Object *obj, const char *name,
+ObjectProperty *object_property_add_bool(Object *obj, const char *name,
bool (*get)(Object *, Error **),
- void (*set)(Object *, bool, Error **),
- Error **errp);
+ void (*set)(Object *, bool, Error **));
-void object_class_property_add_bool(ObjectClass *klass, const char *name,
+ObjectProperty *object_class_property_add_bool(ObjectClass *klass,
+ const char *name,
bool (*get)(Object *, Error **),
- void (*set)(Object *, bool, Error **),
- Error **errp);
+ void (*set)(Object *, bool, Error **));
/**
* object_property_add_enum:
* @obj: the object to add a property to
* @name: the name of the property
* @typename: the name of the enum data type
+ * @lookup: enum value namelookup table
* @get: the getter or %NULL if the property is write-only.
* @set: the setter or %NULL if the property is read-only
- * @errp: if an error occurs, a pointer to an area to store the error
*
* Add an enum property using getters/setters. This function will add a
* property of type '@typename'.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_enum(Object *obj, const char *name,
+ObjectProperty *object_property_add_enum(Object *obj, const char *name,
const char *typename,
const QEnumLookup *lookup,
int (*get)(Object *, Error **),
- void (*set)(Object *, int, Error **),
- Error **errp);
+ void (*set)(Object *, int, Error **));
-void object_class_property_add_enum(ObjectClass *klass, const char *name,
+ObjectProperty *object_class_property_add_enum(ObjectClass *klass,
+ const char *name,
const char *typename,
const QEnumLookup *lookup,
int (*get)(Object *, Error **),
- void (*set)(Object *, int, Error **),
- Error **errp);
+ void (*set)(Object *, int, Error **));
/**
* object_property_add_tm:
* @obj: the object to add a property to
* @name: the name of the property
* @get: the getter or NULL if the property is write-only.
- * @errp: if an error occurs, a pointer to an area to store the error
*
* Add a read-only struct tm valued property using a getter function.
* This function will add a property of type 'struct tm'.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_tm(Object *obj, const char *name,
- void (*get)(Object *, struct tm *, Error **),
- Error **errp);
+ObjectProperty *object_property_add_tm(Object *obj, const char *name,
+ void (*get)(Object *, struct tm *, Error **));
-void object_class_property_add_tm(ObjectClass *klass, const char *name,
- void (*get)(Object *, struct tm *, Error **),
- Error **errp);
+ObjectProperty *object_class_property_add_tm(ObjectClass *klass,
+ const char *name,
+ void (*get)(Object *, struct tm *, Error **));
+
+typedef enum {
+ /* Automatically add a getter to the property */
+ OBJ_PROP_FLAG_READ = 1 << 0,
+ /* Automatically add a setter to the property */
+ OBJ_PROP_FLAG_WRITE = 1 << 1,
+ /* Automatically add a getter and a setter to the property */
+ OBJ_PROP_FLAG_READWRITE = (OBJ_PROP_FLAG_READ | OBJ_PROP_FLAG_WRITE),
+} ObjectPropertyFlags;
/**
* object_property_add_uint8_ptr:
* @obj: the object to add a property to
* @name: the name of the property
* @v: pointer to value
- * @errp: if an error occurs, a pointer to an area to store the error
+ * @flags: bitwise-or'd ObjectPropertyFlags
*
* Add an integer property in memory. This function will add a
* property of type 'uint8'.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_uint8_ptr(Object *obj, const char *name,
- const uint8_t *v, Error **errp);
-void object_class_property_add_uint8_ptr(ObjectClass *klass, const char *name,
- const uint8_t *v, Error **errp);
+ObjectProperty *object_property_add_uint8_ptr(Object *obj, const char *name,
+ const uint8_t *v,
+ ObjectPropertyFlags flags);
+
+ObjectProperty *object_class_property_add_uint8_ptr(ObjectClass *klass,
+ const char *name,
+ const uint8_t *v,
+ ObjectPropertyFlags flags);
/**
* object_property_add_uint16_ptr:
* @obj: the object to add a property to
* @name: the name of the property
* @v: pointer to value
- * @errp: if an error occurs, a pointer to an area to store the error
+ * @flags: bitwise-or'd ObjectPropertyFlags
*
* Add an integer property in memory. This function will add a
* property of type 'uint16'.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_uint16_ptr(Object *obj, const char *name,
- const uint16_t *v, Error **errp);
-void object_class_property_add_uint16_ptr(ObjectClass *klass, const char *name,
- const uint16_t *v, Error **errp);
+ObjectProperty *object_property_add_uint16_ptr(Object *obj, const char *name,
+ const uint16_t *v,
+ ObjectPropertyFlags flags);
+
+ObjectProperty *object_class_property_add_uint16_ptr(ObjectClass *klass,
+ const char *name,
+ const uint16_t *v,
+ ObjectPropertyFlags flags);
/**
* object_property_add_uint32_ptr:
* @obj: the object to add a property to
* @name: the name of the property
* @v: pointer to value
- * @errp: if an error occurs, a pointer to an area to store the error
+ * @flags: bitwise-or'd ObjectPropertyFlags
*
* Add an integer property in memory. This function will add a
* property of type 'uint32'.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_uint32_ptr(Object *obj, const char *name,
- const uint32_t *v, Error **errp);
-void object_class_property_add_uint32_ptr(ObjectClass *klass, const char *name,
- const uint32_t *v, Error **errp);
+ObjectProperty *object_property_add_uint32_ptr(Object *obj, const char *name,
+ const uint32_t *v,
+ ObjectPropertyFlags flags);
+
+ObjectProperty *object_class_property_add_uint32_ptr(ObjectClass *klass,
+ const char *name,
+ const uint32_t *v,
+ ObjectPropertyFlags flags);
/**
* object_property_add_uint64_ptr:
* @obj: the object to add a property to
* @name: the name of the property
* @v: pointer to value
- * @errp: if an error occurs, a pointer to an area to store the error
+ * @flags: bitwise-or'd ObjectPropertyFlags
*
* Add an integer property in memory. This function will add a
* property of type 'uint64'.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_uint64_ptr(Object *obj, const char *name,
- const uint64_t *v, Error **Errp);
-void object_class_property_add_uint64_ptr(ObjectClass *klass, const char *name,
- const uint64_t *v, Error **Errp);
+ObjectProperty *object_property_add_uint64_ptr(Object *obj, const char *name,
+ const uint64_t *v,
+ ObjectPropertyFlags flags);
+
+ObjectProperty *object_class_property_add_uint64_ptr(ObjectClass *klass,
+ const char *name,
+ const uint64_t *v,
+ ObjectPropertyFlags flags);
/**
* object_property_add_alias:
@@ -1642,26 +1937,25 @@ void object_class_property_add_uint64_ptr(ObjectClass *klass, const char *name,
* @name: the name of the property
* @target_obj: the object to forward property access to
* @target_name: the name of the property on the forwarded object
- * @errp: if an error occurs, a pointer to an area to store the error
*
* Add an alias for a property on an object. This function will add a property
* of the same type as the forwarded property.
*
- * The caller must ensure that <code>@target_obj</code> stays alive as long as
+ * The caller must ensure that @target_obj stays alive as long as
* this property exists. In the case of a child object or an alias on the same
* object this will be the case. For aliases to other objects the caller is
* responsible for taking a reference.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_alias(Object *obj, const char *name,
- Object *target_obj, const char *target_name,
- Error **errp);
+ObjectProperty *object_property_add_alias(Object *obj, const char *name,
+ Object *target_obj, const char *target_name);
/**
* object_property_add_const_link:
* @obj: the object to add a property to
* @name: the name of the property
* @target: the object to be referred by the link
- * @errp: if an error occurs, a pointer to an area to store the error
*
* Add an unmodifiable link for a property on an object. This function will
* add a property of type link<TYPE> where TYPE is the type of @target.
@@ -1670,25 +1964,26 @@ void object_property_add_alias(Object *obj, const char *name,
* this property exists. In the case @target is a child of @obj,
* this will be the case. Otherwise, the caller is responsible for
* taking a reference.
+ *
+ * Returns: The newly added property on success, or %NULL on failure.
*/
-void object_property_add_const_link(Object *obj, const char *name,
- Object *target, Error **errp);
+ObjectProperty *object_property_add_const_link(Object *obj, const char *name,
+ Object *target);
/**
* object_property_set_description:
* @obj: the object owning the property
* @name: the name of the property
* @description: the description of the property on the object
- * @errp: if an error occurs, a pointer to an area to store the error
*
* Set an object property's description.
*
+ * Returns: %true on success, %false on failure.
*/
void object_property_set_description(Object *obj, const char *name,
- const char *description, Error **errp);
+ const char *description);
void object_class_property_set_description(ObjectClass *klass, const char *name,
- const char *description,
- Error **errp);
+ const char *description);
/**
* object_child_foreach:
@@ -1744,4 +2039,20 @@ Object *container_get(Object *root, const char *path);
* Returns the instance_size of the given @typename.
*/
size_t object_type_get_instance_size(const char *typename);
+
+/**
+ * object_property_help:
+ * @name: the name of the property
+ * @type: the type of the property
+ * @defval: the default value
+ * @description: description of the property
+ *
+ * Returns: a user-friendly formatted string describing the property
+ * for help purposes.
+ */
+char *object_property_help(const char *name, const char *type,
+ QObject *defval, const char *description);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(Object, object_unref)
+
#endif
diff --git a/include/qom/object_interfaces.h b/include/qom/object_interfaces.h
index 682ba1d9b0..02b11a7ef0 100644
--- a/include/qom/object_interfaces.h
+++ b/include/qom/object_interfaces.h
@@ -2,16 +2,14 @@
#define OBJECT_INTERFACES_H
#include "qom/object.h"
+#include "qapi/qapi-types-qom.h"
#include "qapi/visitor.h"
#define TYPE_USER_CREATABLE "user-creatable"
-#define USER_CREATABLE_CLASS(klass) \
- OBJECT_CLASS_CHECK(UserCreatableClass, (klass), \
- TYPE_USER_CREATABLE)
-#define USER_CREATABLE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(UserCreatableClass, (obj), \
- TYPE_USER_CREATABLE)
+typedef struct UserCreatableClass UserCreatableClass;
+DECLARE_CLASS_CHECKERS(UserCreatableClass, USER_CREATABLE,
+ TYPE_USER_CREATABLE)
#define USER_CREATABLE(obj) \
INTERFACE_CHECK(UserCreatable, (obj), \
TYPE_USER_CREATABLE)
@@ -40,14 +38,14 @@ typedef struct UserCreatable UserCreatable;
* object's type implements USER_CREATABLE interface and needs
* complete() callback to be called.
*/
-typedef struct UserCreatableClass {
+struct UserCreatableClass {
/* <private> */
InterfaceClass parent_class;
/* <public> */
void (*complete)(UserCreatable *uc, Error **errp);
bool (*can_be_deleted)(UserCreatable *uc);
-} UserCreatableClass;
+};
/**
* user_creatable_complete:
@@ -57,8 +55,10 @@ typedef struct UserCreatableClass {
* Wrapper to call complete() method if one of types it's inherited
* from implements USER_CREATABLE interface, otherwise the call does
* nothing.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void user_creatable_complete(UserCreatable *uc, Error **errp);
+bool user_creatable_complete(UserCreatable *uc, Error **errp);
/**
* user_creatable_can_be_deleted:
@@ -88,49 +88,74 @@ Object *user_creatable_add_type(const char *type, const char *id,
Visitor *v, Error **errp);
/**
- * user_creatable_add_opts:
- * @opts: the object definition
+ * user_creatable_add_qapi:
+ * @options: the object definition
+ * @errp: if an error occurs, a pointer to an area to store the error
+ *
+ * Create an instance of the user creatable object according to the
+ * options passed in @opts as described in the QAPI schema documentation.
+ */
+void user_creatable_add_qapi(ObjectOptions *options, Error **errp);
+
+/**
+ * user_creatable_parse_str:
+ * @str: the object definition string as passed on the command line
* @errp: if an error occurs, a pointer to an area to store the error
*
- * Create an instance of the user creatable object whose type
- * is defined in @opts by the 'qom-type' option, placing it
- * in the object composition tree with name provided by the
- * 'id' field. The remaining options in @opts are used to
- * initialize the object properties.
+ * Parses the option for the user creatable object with a keyval parser and
+ * implicit key 'qom-type', converting the result to ObjectOptions.
*
- * Returns: the newly created object or NULL on error
+ * If a help option is given, print help instead.
+ *
+ * Returns: ObjectOptions on success, NULL when an error occurred (*errp is set
+ * then) or help was printed (*errp is not set).
*/
-Object *user_creatable_add_opts(QemuOpts *opts, Error **errp);
+ObjectOptions *user_creatable_parse_str(const char *str, Error **errp);
+/**
+ * user_creatable_add_from_str:
+ * @str: the object definition string as passed on the command line
+ * @errp: if an error occurs, a pointer to an area to store the error
+ *
+ * Create an instance of the user creatable object by parsing @str
+ * with a keyval parser and implicit key 'qom-type', converting the
+ * result to ObjectOptions and calling into qmp_object_add().
+ *
+ * If a help option is given, print help instead.
+ *
+ * Returns: true when an object was successfully created, false when an error
+ * occurred (*errp is set then) or help was printed (*errp is not set).
+ */
+bool user_creatable_add_from_str(const char *str, Error **errp);
/**
- * user_creatable_add_opts_predicate:
- * @type: the QOM type to be added
+ * user_creatable_process_cmdline:
+ * @cmdline: the object definition string as passed on the command line
*
- * A callback function to determine whether an object
- * of type @type should be created. Instances of this
- * callback should be passed to user_creatable_add_opts_foreach
+ * Create an instance of the user creatable object by parsing @cmdline
+ * with a keyval parser and implicit key 'qom-type', converting the
+ * result to ObjectOptions and calling into qmp_object_add().
+ *
+ * If a help option is given, print help instead and exit.
+ *
+ * This function is only meant to be called during command line parsing.
+ * It exits the process on failure or after printing help.
*/
-typedef bool (*user_creatable_add_opts_predicate)(const char *type);
+void user_creatable_process_cmdline(const char *cmdline);
/**
- * user_creatable_add_opts_foreach:
- * @opaque: a user_creatable_add_opts_predicate callback or NULL
+ * user_creatable_print_help:
+ * @type: the QOM type to be added
* @opts: options to create
- * @errp: unused
*
- * An iterator callback to be used in conjunction with
- * the qemu_opts_foreach() method for creating a list of
- * objects from a set of QemuOpts
+ * Prints help if requested in @type or @opts. Note that if @type is neither
+ * "help"/"?" nor a valid user creatable type, no help will be printed
+ * regardless of @opts.
*
- * The @opaque parameter can be passed a user_creatable_add_opts_predicate
- * callback to filter which types of object are created during iteration.
- * When it fails, report the error.
- *
- * Returns: 0 on success, -1 when an error was reported.
+ * Returns: true if a help option was found and help was printed, false
+ * otherwise.
*/
-int user_creatable_add_opts_foreach(void *opaque,
- QemuOpts *opts, Error **errp);
+bool user_creatable_print_help(const char *type, QemuOpts *opts);
/**
* user_creatable_del:
@@ -139,8 +164,10 @@ int user_creatable_add_opts_foreach(void *opaque,
*
* Delete an instance of the user creatable object identified
* by @id.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void user_creatable_del(const char *id, Error **errp);
+bool user_creatable_del(const char *id, Error **errp);
/**
* user_creatable_cleanup:
diff --git a/include/qom/qom-qobject.h b/include/qom/qom-qobject.h
index 77cd717e3f..73e4e0e474 100644
--- a/include/qom/qom-qobject.h
+++ b/include/qom/qom-qobject.h
@@ -13,8 +13,6 @@
#ifndef QEMU_QOM_QOBJECT_H
#define QEMU_QOM_QOBJECT_H
-#include "qom/object.h"
-
/*
* object_property_get_qobject:
* @obj: the object
@@ -30,13 +28,16 @@ struct QObject *object_property_get_qobject(Object *obj, const char *name,
/**
* object_property_set_qobject:
* @obj: the object
- * @ret: The value that will be written to the property.
* @name: the name of the property
+ * @value: The value that will be written to the property.
* @errp: returns an error if this function fails
*
* Writes a property to a object.
+ *
+ * Returns: %true on success, %false on failure.
*/
-void object_property_set_qobject(Object *obj, struct QObject *qobj,
- const char *name, struct Error **errp);
+bool object_property_set_qobject(Object *obj,
+ const char *name, struct QObject *value,
+ struct Error **errp);
#endif
diff --git a/include/scsi/constants.h b/include/scsi/constants.h
index 0dc550732d..9b98451912 100644
--- a/include/scsi/constants.h
+++ b/include/scsi/constants.h
@@ -20,8 +20,8 @@
* the scsi code for linux.
*/
-#ifndef BLOCK_SCSI_H
-#define BLOCK_SCSI_H
+#ifndef SCSI_CONSTANTS_H
+#define SCSI_CONSTANTS_H
/*
* SCSI opcodes
@@ -218,21 +218,25 @@
#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
#define TYPE_RBC 0x0e /* Simplified Direct-Access Device */
#define TYPE_OSD 0x11 /* Object-storage Device */
+#define TYPE_ZBC 0x14 /* Host-managed Zoned SCSI Device */
#define TYPE_WLUN 0x1e /* Well known LUN */
#define TYPE_NOT_PRESENT 0x1f
#define TYPE_INACTIVE 0x20
#define TYPE_NO_LUN 0x7f
/* Mode page codes for mode sense/set */
+#define MODE_PAGE_VENDOR_SPECIFIC 0x00
#define MODE_PAGE_R_W_ERROR 0x01
#define MODE_PAGE_HD_GEOMETRY 0x04
#define MODE_PAGE_FLEXIBLE_DISK_GEOMETRY 0x05
#define MODE_PAGE_CACHING 0x08
#define MODE_PAGE_AUDIO_CTL 0x0e
+#define MODE_PAGE_CONTROL 0x0a
#define MODE_PAGE_POWER 0x1a
#define MODE_PAGE_FAULT_FAIL 0x1c
#define MODE_PAGE_TO_PROTECT 0x1d
#define MODE_PAGE_CAPABILITIES 0x2a
+#define MODE_PAGE_APPLE_VENDOR 0x30
#define MODE_PAGE_ALLS 0x3f
/* Not in Mt. Fuji, but in ATAPI 2.6 -- deprecated now in favor
* of MODE_PAGE_SENSE_POWER */
diff --git a/include/scsi/pr-manager.h b/include/scsi/pr-manager.h
index 6ad5fd1ff7..45de28d354 100644
--- a/include/scsi/pr-manager.h
+++ b/include/scsi/pr-manager.h
@@ -5,37 +5,32 @@
#include "qapi/visitor.h"
#include "qom/object_interfaces.h"
#include "block/aio.h"
-#include "qemu/coroutine.h"
#define TYPE_PR_MANAGER "pr-manager"
-#define PR_MANAGER_CLASS(klass) \
- OBJECT_CLASS_CHECK(PRManagerClass, (klass), TYPE_PR_MANAGER)
-#define PR_MANAGER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PRManagerClass, (obj), TYPE_PR_MANAGER)
-#define PR_MANAGER(obj) \
- OBJECT_CHECK(PRManager, (obj), TYPE_PR_MANAGER)
+OBJECT_DECLARE_TYPE(PRManager, PRManagerClass,
+ PR_MANAGER)
struct sg_io_hdr;
-typedef struct PRManager {
+struct PRManager {
/* <private> */
Object parent;
-} PRManager;
+};
/**
* PRManagerClass:
* @parent_class: the base class
* @run: callback invoked in thread pool context
*/
-typedef struct PRManagerClass {
+struct PRManagerClass {
/* <private> */
ObjectClass parent_class;
/* <public> */
int (*run)(PRManager *pr_mgr, int fd, struct sg_io_hdr *hdr);
bool (*is_connected)(PRManager *pr_mgr);
-} PRManagerClass;
+};
bool pr_manager_is_connected(PRManager *pr_mgr);
int coroutine_fn pr_manager_execute(PRManager *pr_mgr, AioContext *ctx, int fd,
diff --git a/include/scsi/utils.h b/include/scsi/utils.h
index 4b705f5e0f..d5c8efa16e 100644
--- a/include/scsi/utils.h
+++ b/include/scsi/utils.h
@@ -1,5 +1,5 @@
#ifndef SCSI_UTILS_H
-#define SCSI_UTILS_H 1
+#define SCSI_UTILS_H
#ifdef CONFIG_LINUX
#include <scsi/sg.h>
@@ -16,6 +16,22 @@ enum SCSIXferMode {
SCSI_XFER_TO_DEV, /* WRITE, MODE_SELECT, ... */
};
+enum SCSIHostStatus {
+ SCSI_HOST_OK,
+ SCSI_HOST_NO_LUN,
+ SCSI_HOST_BUSY,
+ SCSI_HOST_TIME_OUT,
+ SCSI_HOST_BAD_RESPONSE,
+ SCSI_HOST_ABORTED,
+ SCSI_HOST_ERROR = 0x07,
+ SCSI_HOST_RESET = 0x08,
+ SCSI_HOST_TRANSPORT_DISRUPTED = 0xe,
+ SCSI_HOST_TARGET_FAILURE = 0x10,
+ SCSI_HOST_RESERVATION_ERROR = 0x11,
+ SCSI_HOST_ALLOCATION_FAILURE = 0x12,
+ SCSI_HOST_MEDIUM_ERROR = 0x13,
+};
+
typedef struct SCSICommand {
uint8_t buf[SCSI_CMD_BUF_SIZE];
int len;
@@ -57,6 +73,8 @@ extern const struct SCSISense sense_code_LBA_OUT_OF_RANGE;
extern const struct SCSISense sense_code_INVALID_FIELD;
/* Illegal request, Invalid field in parameter list */
extern const struct SCSISense sense_code_INVALID_PARAM;
+/* Illegal request, Invalid value in parameter list */
+extern const struct SCSISense sense_code_INVALID_PARAM_VALUE;
/* Illegal request, Parameter list length error */
extern const struct SCSISense sense_code_INVALID_PARAM_LEN;
/* Illegal request, LUN not supported */
@@ -106,6 +124,7 @@ extern const struct SCSISense sense_code_SPACE_ALLOC_FAILED;
int scsi_sense_to_errno(int key, int asc, int ascq);
int scsi_sense_buf_to_errno(const uint8_t *sense, size_t sense_size);
+bool scsi_sense_buf_is_guest_recoverable(const uint8_t *sense, size_t sense_size);
int scsi_convert_sense(uint8_t *in_buf, int in_len,
uint8_t *buf, int len, bool fixed);
@@ -120,16 +139,9 @@ int scsi_cdb_length(uint8_t *buf);
#ifdef CONFIG_LINUX
#define SG_ERR_DRIVER_TIMEOUT 0x06
#define SG_ERR_DRIVER_SENSE 0x08
-
-#define SG_ERR_DID_OK 0x00
-#define SG_ERR_DID_NO_CONNECT 0x01
-#define SG_ERR_DID_BUS_BUSY 0x02
-#define SG_ERR_DID_TIME_OUT 0x03
-
-#define SG_ERR_DRIVER_SENSE 0x08
-
-int sg_io_sense_from_errno(int errno_value, struct sg_io_hdr *io_hdr,
- SCSISense *sense);
#endif
+int scsi_sense_from_errno(int errno_value, SCSISense *sense);
+int scsi_sense_from_host_status(uint8_t host_status, SCSISense *sense);
+
#endif
diff --git a/include/semihosting/common-semi.h b/include/semihosting/common-semi.h
new file mode 100644
index 0000000000..0a91db7c41
--- /dev/null
+++ b/include/semihosting/common-semi.h
@@ -0,0 +1,39 @@
+/*
+ * Semihosting support for systems modeled on the Arm "Angel"
+ * semihosting syscalls design. This includes Arm and RISC-V processors
+ *
+ * Copyright (c) 2005, 2007 CodeSourcery.
+ * Copyright (c) 2019 Linaro
+ * Written by Paul Brook.
+ *
+ * Copyright © 2020 by Keith Packard <keithp@keithp.com>
+ * Adapted for systems other than ARM, including RISC-V, by Keith Packard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * ARM Semihosting is documented in:
+ * Semihosting for AArch32 and AArch64 Release 2.0
+ * https://static.docs.arm.com/100863/0200/semihosting.pdf
+ *
+ * RISC-V Semihosting is documented in:
+ * RISC-V Semihosting
+ * https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc
+ */
+
+#ifndef COMMON_SEMI_H
+#define COMMON_SEMI_H
+
+void do_common_semihosting(CPUState *cs);
+
+#endif /* COMMON_SEMI_H */
diff --git a/include/semihosting/console.h b/include/semihosting/console.h
new file mode 100644
index 0000000000..bd78e5f03f
--- /dev/null
+++ b/include/semihosting/console.h
@@ -0,0 +1,59 @@
+/*
+ * Semihosting Console
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SEMIHOST_CONSOLE_H
+#define SEMIHOST_CONSOLE_H
+
+#include "cpu.h"
+
+/**
+ * qemu_semihosting_console_read:
+ * @cs: CPUState
+ * @buf: host buffer
+ * @len: buffer size
+ *
+ * Receive at least one character from debug console. As this call may
+ * block if no data is available we suspend the CPU and will re-execute the
+ * instruction when data is there. Therefore two conditions must be met:
+ *
+ * - CPUState is synchronized before calling this function
+ * - pc is only updated once the character is successfully returned
+ *
+ * Returns: number of characters read, OR cpu_loop_exit!
+ */
+int qemu_semihosting_console_read(CPUState *cs, void *buf, int len);
+
+/**
+ * qemu_semihosting_console_write:
+ * @buf: host buffer
+ * @len: buffer size
+ *
+ * Write len bytes from buf to the debug console.
+ *
+ * Returns: number of bytes written -- this should only ever be short
+ * on some sort of i/o error.
+ */
+int qemu_semihosting_console_write(void *buf, int len);
+
+/*
+ * qemu_semihosting_console_block_until_ready:
+ * @cs: CPUState
+ *
+ * If no data is available we suspend the CPU and will re-execute the
+ * instruction when data is available.
+ */
+void qemu_semihosting_console_block_until_ready(CPUState *cs);
+
+/**
+ * qemu_semihosting_console_ready:
+ *
+ * Return true if characters are available for read; does not block.
+ */
+bool qemu_semihosting_console_ready(void);
+
+#endif /* SEMIHOST_CONSOLE_H */
diff --git a/include/semihosting/guestfd.h b/include/semihosting/guestfd.h
new file mode 100644
index 0000000000..3d426fedab
--- /dev/null
+++ b/include/semihosting/guestfd.h
@@ -0,0 +1,91 @@
+/*
+ * Hosted file support for semihosting syscalls.
+ *
+ * Copyright (c) 2005, 2007 CodeSourcery.
+ * Copyright (c) 2019 Linaro
+ * Copyright © 2020 by Keith Packard <keithp@keithp.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SEMIHOSTING_GUESTFD_H
+#define SEMIHOSTING_GUESTFD_H
+
+typedef enum GuestFDType {
+ GuestFDUnused = 0,
+ GuestFDHost,
+ GuestFDGDB,
+ GuestFDStatic,
+ GuestFDConsole,
+} GuestFDType;
+
+/*
+ * Guest file descriptors are integer indexes into an array of
+ * these structures (we will dynamically resize as necessary).
+ */
+typedef struct GuestFD {
+ GuestFDType type;
+ union {
+ int hostfd;
+ struct {
+ const uint8_t *data;
+ size_t len;
+ size_t off;
+ } staticfile;
+ };
+} GuestFD;
+
+/*
+ * For ARM semihosting, we have a separate structure for routing
+ * data for the console which is outside the guest fd address space.
+ */
+extern GuestFD console_in_gf;
+extern GuestFD console_out_gf;
+
+/**
+ * alloc_guestfd:
+ *
+ * Allocate an unused GuestFD index. The associated guestfd index
+ * will still be GuestFDUnused until it is initialized.
+ */
+int alloc_guestfd(void);
+
+/**
+ * dealloc_guestfd:
+ * @guestfd: GuestFD index
+ *
+ * Deallocate a GuestFD index. The associated GuestFD structure
+ * will be recycled for a subsequent allocation.
+ */
+void dealloc_guestfd(int guestfd);
+
+/**
+ * get_guestfd:
+ * @guestfd: GuestFD index
+ *
+ * Return the GuestFD structure associated with an initialized @guestfd,
+ * or NULL if it has not been allocated, or hasn't been initialized.
+ */
+GuestFD *get_guestfd(int guestfd);
+
+/**
+ * associate_guestfd:
+ * @guestfd: GuestFD index
+ * @hostfd: host file descriptor
+ *
+ * Initialize the GuestFD for @guestfd to GuestFDHost using @hostfd.
+ */
+void associate_guestfd(int guestfd, int hostfd);
+
+/**
+ * staticfile_guestfd:
+ * @guestfd: GuestFD index
+ * @data: data to be read
+ * @len: length of @data
+ *
+ * Initialize the GuestFD for @guestfd to GuestFDStatic.
+ * The @len bytes at @data will be returned to the guest on reads.
+ */
+void staticfile_guestfd(int guestfd, const uint8_t *data, size_t len);
+
+#endif /* SEMIHOSTING_GUESTFD_H */
diff --git a/include/exec/semihost.h b/include/semihosting/semihost.h
index 5980939c7b..97d2a2ba99 100644
--- a/include/exec/semihost.h
+++ b/include/semihosting/semihost.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -27,7 +27,7 @@ typedef enum SemihostingTarget {
} SemihostingTarget;
#ifdef CONFIG_USER_ONLY
-static inline bool semihosting_enabled(void)
+static inline bool semihosting_enabled(bool is_user)
{
return true;
}
@@ -51,12 +51,25 @@ static inline const char *semihosting_get_cmdline(void)
{
return NULL;
}
-#else
-bool semihosting_enabled(void);
+#else /* !CONFIG_USER_ONLY */
+/**
+ * semihosting_enabled:
+ * @is_user: true if guest code is in usermode (i.e. not privileged)
+ *
+ * Return true if guest code is allowed to make semihosting calls.
+ */
+bool semihosting_enabled(bool is_user);
SemihostingTarget semihosting_get_target(void);
const char *semihosting_get_arg(int i);
int semihosting_get_argc(void);
const char *semihosting_get_cmdline(void);
-#endif
+void semihosting_arg_fallback(const char *file, const char *cmd);
+/* for vl.c hooks */
+void qemu_semihosting_enable(void);
+int qemu_semihosting_config_options(const char *optstr);
+void qemu_semihosting_chardev_init(void);
+void qemu_semihosting_console_init(Chardev *);
+#endif /* CONFIG_USER_ONLY */
+void qemu_semihosting_guestfd_init(void);
-#endif
+#endif /* SEMIHOST_H */
diff --git a/include/semihosting/syscalls.h b/include/semihosting/syscalls.h
new file mode 100644
index 0000000000..3a5ec229eb
--- /dev/null
+++ b/include/semihosting/syscalls.h
@@ -0,0 +1,75 @@
+/*
+ * Syscall implementations for semihosting.
+ *
+ * Copyright (c) 2022 Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SEMIHOSTING_SYSCALLS_H
+#define SEMIHOSTING_SYSCALLS_H
+
+/*
+ * Argument loading from the guest is performed by the caller;
+ * results are returned via the 'complete' callback.
+ *
+ * String operands are in address/len pairs. The len argument may be 0
+ * (when the semihosting abi does not already provide the length),
+ * or non-zero (where it should include the terminating zero).
+ */
+
+typedef struct GuestFD GuestFD;
+
+void semihost_sys_open(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong fname, target_ulong fname_len,
+ int gdb_flags, int mode);
+
+void semihost_sys_close(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd);
+
+void semihost_sys_read(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, target_ulong buf, target_ulong len);
+
+void semihost_sys_read_gf(CPUState *cs, gdb_syscall_complete_cb complete,
+ GuestFD *gf, target_ulong buf, target_ulong len);
+
+void semihost_sys_write(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, target_ulong buf, target_ulong len);
+
+void semihost_sys_write_gf(CPUState *cs, gdb_syscall_complete_cb complete,
+ GuestFD *gf, target_ulong buf, target_ulong len);
+
+void semihost_sys_lseek(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, int64_t off, int gdb_whence);
+
+void semihost_sys_isatty(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd);
+
+void semihost_sys_flen(CPUState *cs, gdb_syscall_complete_cb fstat_cb,
+ gdb_syscall_complete_cb flen_cb,
+ int fd, target_ulong fstat_addr);
+
+void semihost_sys_fstat(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, target_ulong addr);
+
+void semihost_sys_stat(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong fname, target_ulong fname_len,
+ target_ulong addr);
+
+void semihost_sys_remove(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong fname, target_ulong fname_len);
+
+void semihost_sys_rename(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong oname, target_ulong oname_len,
+ target_ulong nname, target_ulong nname_len);
+
+void semihost_sys_system(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong cmd, target_ulong cmd_len);
+
+void semihost_sys_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong tv_addr, target_ulong tz_addr);
+
+void semihost_sys_poll_one(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, GIOCondition cond, int timeout);
+
+#endif /* SEMIHOSTING_SYSCALLS_H */
diff --git a/include/semihosting/uaccess.h b/include/semihosting/uaccess.h
new file mode 100644
index 0000000000..3963eafc3e
--- /dev/null
+++ b/include/semihosting/uaccess.h
@@ -0,0 +1,63 @@
+/*
+ * Helper routines to provide target memory access for semihosting
+ * syscalls in system emulation mode.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ *
+ * This code is licensed under the GPL
+ */
+
+#ifndef SEMIHOSTING_UACCESS_H
+#define SEMIHOSTING_UACCESS_H
+
+#ifdef CONFIG_USER_ONLY
+#error Cannot include semihosting/uaccess.h from user emulation
+#endif
+
+#include "cpu.h"
+
+#define get_user_u64(val, addr) \
+ ({ uint64_t val_ = 0; \
+ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
+ &val_, sizeof(val_), 0); \
+ (val) = tswap64(val_); ret_; })
+
+#define get_user_u32(val, addr) \
+ ({ uint32_t val_ = 0; \
+ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
+ &val_, sizeof(val_), 0); \
+ (val) = tswap32(val_); ret_; })
+
+#define get_user_u8(val, addr) \
+ ({ uint8_t val_ = 0; \
+ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
+ &val_, sizeof(val_), 0); \
+ (val) = val_; ret_; })
+
+#define get_user_ual(arg, p) get_user_u32(arg, p)
+
+#define put_user_u64(val, addr) \
+ ({ uint64_t val_ = tswap64(val); \
+ cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); })
+
+#define put_user_u32(val, addr) \
+ ({ uint32_t val_ = tswap32(val); \
+ cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); })
+
+#define put_user_ual(arg, p) put_user_u32(arg, p)
+
+void *uaccess_lock_user(CPUArchState *env, target_ulong addr,
+ target_ulong len, bool copy);
+#define lock_user(type, p, len, copy) uaccess_lock_user(env, p, len, copy)
+
+char *uaccess_lock_user_string(CPUArchState *env, target_ulong addr);
+#define lock_user_string(p) uaccess_lock_user_string(env, p)
+
+void uaccess_unlock_user(CPUArchState *env, void *p,
+ target_ulong addr, target_ulong len);
+#define unlock_user(s, args, len) uaccess_unlock_user(env, s, args, len)
+
+ssize_t uaccess_strlen_user(CPUArchState *env, target_ulong addr);
+#define target_strlen(p) uaccess_strlen_user(env, p)
+
+#endif /* SEMIHOSTING_SOFTMMU_UACCESS_H */
diff --git a/include/standard-headers/asm-m68k/bootinfo-mac.h b/include/standard-headers/asm-m68k/bootinfo-mac.h
new file mode 100644
index 0000000000..449928cfcb
--- /dev/null
+++ b/include/standard-headers/asm-m68k/bootinfo-mac.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+** asm/bootinfo-mac.h -- Macintosh-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_MAC_H
+#define _UAPI_ASM_M68K_BOOTINFO_MAC_H
+
+
+ /*
+ * Macintosh-specific tags (all __be32)
+ */
+
+#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */
+#define BI_MAC_VADDR 0x8001 /* Mac video base address */
+#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */
+#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */
+#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */
+#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */
+#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */
+#define BI_MAC_BTIME 0x8007 /* Mac boot time */
+#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */
+#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */
+#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */
+#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */
+
+
+ /*
+ * Macintosh hardware profile data - unused, see macintosh.h for
+ * reasonable type values
+ */
+
+#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */
+#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */
+#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */
+#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */
+#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */
+#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */
+#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */
+#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */
+#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */
+#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */
+#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */
+#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */
+#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */
+#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */
+#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */
+#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */
+#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */
+#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */
+
+
+ /*
+ * Macintosh Gestalt numbers (BI_MAC_MODEL)
+ */
+
+#define MAC_MODEL_II 6
+#define MAC_MODEL_IIX 7
+#define MAC_MODEL_IICX 8
+#define MAC_MODEL_SE30 9
+#define MAC_MODEL_IICI 11
+#define MAC_MODEL_IIFX 13 /* And well numbered it is too */
+#define MAC_MODEL_IISI 18
+#define MAC_MODEL_LC 19
+#define MAC_MODEL_Q900 20
+#define MAC_MODEL_PB170 21
+#define MAC_MODEL_Q700 22
+#define MAC_MODEL_CLII 23 /* aka: P200 */
+#define MAC_MODEL_PB140 25
+#define MAC_MODEL_Q950 26 /* aka: WGS95 */
+#define MAC_MODEL_LCIII 27 /* aka: P450 */
+#define MAC_MODEL_PB210 29
+#define MAC_MODEL_C650 30
+#define MAC_MODEL_PB230 32
+#define MAC_MODEL_PB180 33
+#define MAC_MODEL_PB160 34
+#define MAC_MODEL_Q800 35 /* aka: WGS80 */
+#define MAC_MODEL_Q650 36
+#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */
+#define MAC_MODEL_PB250 38
+#define MAC_MODEL_IIVI 44
+#define MAC_MODEL_P600 45 /* aka: P600CD */
+#define MAC_MODEL_IIVX 48
+#define MAC_MODEL_CCL 49 /* aka: P250 */
+#define MAC_MODEL_PB165C 50
+#define MAC_MODEL_C610 52 /* aka: WGS60 */
+#define MAC_MODEL_Q610 53
+#define MAC_MODEL_PB145 54 /* aka: PB145B */
+#define MAC_MODEL_P520 56 /* aka: LC520 */
+#define MAC_MODEL_C660 60
+#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */
+#define MAC_MODEL_PB180C 71
+#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */
+#define MAC_MODEL_PB270C 77
+#define MAC_MODEL_Q840 78
+#define MAC_MODEL_P550 80 /* aka: LC550, P560 */
+#define MAC_MODEL_CCLII 83 /* aka: P275 */
+#define MAC_MODEL_PB165 84
+#define MAC_MODEL_PB190 85 /* aka: PB190CS */
+#define MAC_MODEL_TV 88
+#define MAC_MODEL_P475 89 /* aka: LC475, P476 */
+#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */
+#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */
+#define MAC_MODEL_Q605 94
+#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */
+#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */
+#define MAC_MODEL_P588 99 /* aka: LC580, P580 */
+#define MAC_MODEL_PB280 102
+#define MAC_MODEL_PB280C 103
+#define MAC_MODEL_PB150 115
+
+
+ /*
+ * Latest Macintosh bootinfo version
+ */
+
+#define MAC_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */
diff --git a/include/standard-headers/asm-m68k/bootinfo-virt.h b/include/standard-headers/asm-m68k/bootinfo-virt.h
new file mode 100644
index 0000000000..75ac6bbd7d
--- /dev/null
+++ b/include/standard-headers/asm-m68k/bootinfo-virt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+** asm/bootinfo-virt.h -- Virtual-m68k-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_VIRT_H
+#define _UAPI_ASM_M68K_BOOTINFO_VIRT_H
+
+#define BI_VIRT_QEMU_VERSION 0x8000
+#define BI_VIRT_GF_PIC_BASE 0x8001
+#define BI_VIRT_GF_RTC_BASE 0x8002
+#define BI_VIRT_GF_TTY_BASE 0x8003
+#define BI_VIRT_VIRTIO_BASE 0x8004
+#define BI_VIRT_CTRL_BASE 0x8005
+
+/* No longer used -- replaced with BI_RNG_SEED -- but don't reuse this index:
+ * #define BI_VIRT_RNG_SEED 0x8006 */
+
+#define VIRT_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */
diff --git a/include/standard-headers/asm-m68k/bootinfo.h b/include/standard-headers/asm-m68k/bootinfo.h
new file mode 100644
index 0000000000..b7a8dd2514
--- /dev/null
+++ b/include/standard-headers/asm-m68k/bootinfo.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure
+ *
+ * Copyright 1992 by Greg Harp
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_H
+#define _UAPI_ASM_M68K_BOOTINFO_H
+
+
+ /*
+ * Bootinfo definitions
+ *
+ * This is an easily parsable and extendable structure containing all
+ * information to be passed from the bootstrap to the kernel.
+ *
+ * This way I hope to keep all future changes back/forewards compatible.
+ * Thus, keep your fingers crossed...
+ *
+ * This structure is copied right after the kernel by the bootstrap
+ * routine.
+ */
+
+struct bi_record {
+ uint16_t tag; /* tag ID */
+ uint16_t size; /* size of record (in bytes) */
+ uint32_t data[0]; /* data */
+};
+
+
+struct mem_info {
+ uint32_t addr; /* physical address of memory chunk */
+ uint32_t size; /* length of memory chunk (in bytes) */
+};
+
+
+ /*
+ * Tag Definitions
+ *
+ * Machine independent tags start counting from 0x0000
+ * Machine dependent tags start counting from 0x8000
+ */
+
+#define BI_LAST 0x0000 /* last record (sentinel) */
+#define BI_MACHTYPE 0x0001 /* machine type (uint32_t) */
+#define BI_CPUTYPE 0x0002 /* cpu type (uint32_t) */
+#define BI_FPUTYPE 0x0003 /* fpu type (uint32_t) */
+#define BI_MMUTYPE 0x0004 /* mmu type (uint32_t) */
+#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */
+ /* (struct mem_info) */
+#define BI_RAMDISK 0x0006 /* ramdisk address and size */
+ /* (struct mem_info) */
+#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */
+ /* (string) */
+/*
+ * A random seed used to initialize the RNG. Record format:
+ *
+ * - length [ 2 bytes, 16-bit big endian ]
+ * - seed data [ `length` bytes, padded to preserve 4-byte struct alignment ]
+ */
+#define BI_RNG_SEED 0x0008
+
+ /*
+ * Linux/m68k Architectures (BI_MACHTYPE)
+ */
+
+#define MACH_AMIGA 1
+#define MACH_ATARI 2
+#define MACH_MAC 3
+#define MACH_APOLLO 4
+#define MACH_SUN3 5
+#define MACH_MVME147 6
+#define MACH_MVME16x 7
+#define MACH_BVME6000 8
+#define MACH_HP300 9
+#define MACH_Q40 10
+#define MACH_SUN3X 11
+#define MACH_M54XX 12
+#define MACH_M5441X 13
+#define MACH_VIRT 14
+
+
+ /*
+ * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE)
+ *
+ * Note: we may rely on the following equalities:
+ *
+ * CPU_68020 == MMU_68851
+ * CPU_68030 == MMU_68030
+ * CPU_68040 == FPU_68040 == MMU_68040
+ * CPU_68060 == FPU_68060 == MMU_68060
+ */
+
+#define CPUB_68020 0
+#define CPUB_68030 1
+#define CPUB_68040 2
+#define CPUB_68060 3
+#define CPUB_COLDFIRE 4
+
+#define CPU_68020 (1 << CPUB_68020)
+#define CPU_68030 (1 << CPUB_68030)
+#define CPU_68040 (1 << CPUB_68040)
+#define CPU_68060 (1 << CPUB_68060)
+#define CPU_COLDFIRE (1 << CPUB_COLDFIRE)
+
+#define FPUB_68881 0
+#define FPUB_68882 1
+#define FPUB_68040 2 /* Internal FPU */
+#define FPUB_68060 3 /* Internal FPU */
+#define FPUB_SUNFPA 4 /* Sun-3 FPA */
+#define FPUB_COLDFIRE 5 /* ColdFire FPU */
+
+#define FPU_68881 (1 << FPUB_68881)
+#define FPU_68882 (1 << FPUB_68882)
+#define FPU_68040 (1 << FPUB_68040)
+#define FPU_68060 (1 << FPUB_68060)
+#define FPU_SUNFPA (1 << FPUB_SUNFPA)
+#define FPU_COLDFIRE (1 << FPUB_COLDFIRE)
+
+#define MMUB_68851 0
+#define MMUB_68030 1 /* Internal MMU */
+#define MMUB_68040 2 /* Internal MMU */
+#define MMUB_68060 3 /* Internal MMU */
+#define MMUB_APOLLO 4 /* Custom Apollo */
+#define MMUB_SUN3 5 /* Custom Sun-3 */
+#define MMUB_COLDFIRE 6 /* Internal MMU */
+
+#define MMU_68851 (1 << MMUB_68851)
+#define MMU_68030 (1 << MMUB_68030)
+#define MMU_68040 (1 << MMUB_68040)
+#define MMU_68060 (1 << MMUB_68060)
+#define MMU_SUN3 (1 << MMUB_SUN3)
+#define MMU_APOLLO (1 << MMUB_APOLLO)
+#define MMU_COLDFIRE (1 << MMUB_COLDFIRE)
+
+
+ /*
+ * Stuff for bootinfo interface versioning
+ *
+ * At the start of kernel code, a 'struct bootversion' is located.
+ * bootstrap checks for a matching version of the interface before booting
+ * a kernel, to avoid user confusion if kernel and bootstrap don't work
+ * together :-)
+ *
+ * If incompatible changes are made to the bootinfo interface, the major
+ * number below should be stepped (and the minor reset to 0) for the
+ * appropriate machine. If a change is backward-compatible, the minor
+ * should be stepped. "Backwards-compatible" means that booting will work,
+ * but certain features may not.
+ */
+
+#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */
+#define MK_BI_VERSION(major, minor) (((major) << 16) + (minor))
+#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff)
+#define BI_VERSION_MINOR(v) ((v) & 0xffff)
+
+struct bootversion {
+ uint16_t branch;
+ uint32_t magic;
+ struct {
+ uint32_t machtype;
+ uint32_t version;
+ } machversions[0];
+} QEMU_PACKED;
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_H */
diff --git a/include/standard-headers/asm-x86/bootparam.h b/include/standard-headers/asm-x86/bootparam.h
new file mode 100644
index 0000000000..b582a105c0
--- /dev/null
+++ b/include/standard-headers/asm-x86/bootparam.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_X86_BOOTPARAM_H
+#define _ASM_X86_BOOTPARAM_H
+
+#include "standard-headers/asm-x86/setup_data.h"
+
+/* ram_size flags */
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+/* loadflags */
+#define LOADED_HIGH (1<<0)
+#define KASLR_FLAG (1<<1)
+#define QUIET_FLAG (1<<5)
+#define KEEP_SEGMENTS (1<<6)
+#define CAN_USE_HEAP (1<<7)
+
+/* xloadflags */
+#define XLF_KERNEL_64 (1<<0)
+#define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1)
+#define XLF_EFI_HANDOVER_32 (1<<2)
+#define XLF_EFI_HANDOVER_64 (1<<3)
+#define XLF_EFI_KEXEC (1<<4)
+#define XLF_5LEVEL (1<<5)
+#define XLF_5LEVEL_ENABLED (1<<6)
+#define XLF_MEM_ENCRYPTION (1<<7)
+
+
+#endif /* _ASM_X86_BOOTPARAM_H */
diff --git a/include/standard-headers/asm-x86/kvm_para.h b/include/standard-headers/asm-x86/kvm_para.h
index 35cd8d651f..9a011d20f0 100644
--- a/include/standard-headers/asm-x86/kvm_para.h
+++ b/include/standard-headers/asm-x86/kvm_para.h
@@ -8,6 +8,7 @@
* should be used to determine that a VM is running under KVM.
*/
#define KVM_CPUID_SIGNATURE 0x40000000
+#define KVM_SIGNATURE "KVMKVMKVM\0\0\0"
/* This CPUID returns two feature bitmaps in eax, edx. Before enabling
* a particular paravirtualization, the appropriate feature bit should
@@ -29,6 +30,12 @@
#define KVM_FEATURE_PV_TLB_FLUSH 9
#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
#define KVM_FEATURE_PV_SEND_IPI 11
+#define KVM_FEATURE_POLL_CONTROL 12
+#define KVM_FEATURE_PV_SCHED_YIELD 13
+#define KVM_FEATURE_ASYNC_PF_INT 14
+#define KVM_FEATURE_MSI_EXT_DEST_ID 15
+#define KVM_FEATURE_HC_MAP_GPA_RANGE 16
+#define KVM_FEATURE_MIGRATION_CONTROL 17
#define KVM_HINTS_REALTIME 0
@@ -47,6 +54,10 @@
#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
#define MSR_KVM_STEAL_TIME 0x4b564d03
#define MSR_KVM_PV_EOI_EN 0x4b564d04
+#define MSR_KVM_POLL_CONTROL 0x4b564d05
+#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
+#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
+#define MSR_KVM_MIGRATION_CONTROL 0x4b564d08
struct kvm_steal_time {
uint64_t steal;
@@ -78,6 +89,21 @@ struct kvm_clock_pairing {
#define KVM_ASYNC_PF_ENABLED (1 << 0)
#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
#define KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT (1 << 2)
+#define KVM_ASYNC_PF_DELIVERY_AS_INT (1 << 3)
+
+/* MSR_KVM_ASYNC_PF_INT */
+#define KVM_ASYNC_PF_VEC_MASK __GENMASK(7, 0)
+
+/* MSR_KVM_MIGRATION_CONTROL */
+#define KVM_MIGRATION_READY (1 << 0)
+
+/* KVM_HC_MAP_GPA_RANGE */
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_4K 0
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_2M (1 << 0)
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_1G (1 << 1)
+#define KVM_MAP_GPA_RANGE_ENC_STAT(n) (n << 4)
+#define KVM_MAP_GPA_RANGE_ENCRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(1)
+#define KVM_MAP_GPA_RANGE_DECRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(0)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
@@ -109,9 +135,13 @@ struct kvm_mmu_op_release_pt {
#define KVM_PV_REASON_PAGE_READY 2
struct kvm_vcpu_pv_apf_data {
- uint32_t reason;
- uint8_t pad[60];
- uint32_t enabled;
+ /* Used for 'page not present' events delivered via #PF */
+ uint32_t flags;
+
+ /* Used for 'page ready' events delivered via interrupt notification */
+ uint32_t token;
+
+ uint8_t pad[56];
};
#define KVM_PV_EOI_BIT 0
diff --git a/include/standard-headers/asm-x86/setup_data.h b/include/standard-headers/asm-x86/setup_data.h
new file mode 100644
index 0000000000..09355f54c5
--- /dev/null
+++ b/include/standard-headers/asm-x86/setup_data.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_X86_SETUP_DATA_H
+#define _ASM_X86_SETUP_DATA_H
+
+/* setup_data/setup_indirect types */
+#define SETUP_NONE 0
+#define SETUP_E820_EXT 1
+#define SETUP_DTB 2
+#define SETUP_PCI 3
+#define SETUP_EFI 4
+#define SETUP_APPLE_PROPERTIES 5
+#define SETUP_JAILHOUSE 6
+#define SETUP_CC_BLOB 7
+#define SETUP_IMA 8
+#define SETUP_RNG_SEED 9
+#define SETUP_ENUM_MAX SETUP_RNG_SEED
+
+#define SETUP_INDIRECT (1<<31)
+#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT)
+
+#ifndef __ASSEMBLY__
+
+#include "standard-headers/linux/types.h"
+
+/* extensible setup data list node */
+struct setup_data {
+ uint64_t next;
+ uint32_t type;
+ uint32_t len;
+ uint8_t data[];
+};
+
+/* extensible setup indirect data node */
+struct setup_indirect {
+ uint32_t type;
+ uint32_t reserved; /* Reserved, must be set to zero. */
+ uint64_t len;
+ uint64_t addr;
+};
+
+/*
+ * The E820 memory region entry of the boot protocol ABI:
+ */
+struct boot_e820_entry {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t type;
+} QEMU_PACKED;
+
+/*
+ * The boot loader is passing platform information via this Jailhouse-specific
+ * setup data structure.
+ */
+struct jailhouse_setup_data {
+ struct {
+ uint16_t version;
+ uint16_t compatible_version;
+ } QEMU_PACKED hdr;
+ struct {
+ uint16_t pm_timer_address;
+ uint16_t num_cpus;
+ uint64_t pci_mmconfig_base;
+ uint32_t tsc_khz;
+ uint32_t apic_khz;
+ uint8_t standard_ioapic;
+ uint8_t cpu_ids[255];
+ } QEMU_PACKED v1;
+ struct {
+ uint32_t flags;
+ } QEMU_PACKED v2;
+} QEMU_PACKED;
+
+/*
+ * IMA buffer setup data information from the previous kernel during kexec
+ */
+struct ima_setup_data {
+ uint64_t addr;
+ uint64_t size;
+} QEMU_PACKED;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_SETUP_DATA_H */
diff --git a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
deleted file mode 100644
index 422eb3f4c1..0000000000
--- a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ /dev/null
@@ -1,667 +0,0 @@
-/*
- * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of EITHER the GNU General Public License
- * version 2 as published by the Free Software Foundation or the BSD
- * 2-Clause License. This program is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License version 2 for more details at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program available in the file COPYING in the main
- * directory of this source tree.
- *
- * The BSD 2-Clause License
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PVRDMA_DEV_API_H__
-#define __PVRDMA_DEV_API_H__
-
-#include "standard-headers/linux/types.h"
-
-#include "pvrdma_verbs.h"
-
-/*
- * PVRDMA version macros. Some new features require updates to PVRDMA_VERSION.
- * These macros allow us to check for different features if necessary.
- */
-
-#define PVRDMA_ROCEV1_VERSION 17
-#define PVRDMA_ROCEV2_VERSION 18
-#define PVRDMA_VERSION PVRDMA_ROCEV2_VERSION
-
-#define PVRDMA_BOARD_ID 1
-#define PVRDMA_REV_ID 1
-
-/*
- * Masks and accessors for page directory, which is a two-level lookup:
- * page directory -> page table -> page. Only one directory for now, but we
- * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
- * gigabyte for memory regions and so forth.
- */
-
-#define PVRDMA_PDIR_SHIFT 18
-#define PVRDMA_PTABLE_SHIFT 9
-#define PVRDMA_PAGE_DIR_DIR(x) (((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
-#define PVRDMA_PAGE_DIR_TABLE(x) (((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
-#define PVRDMA_PAGE_DIR_PAGE(x) ((x) & 0x1ff)
-#define PVRDMA_PAGE_DIR_MAX_PAGES (1 * 512 * 512)
-#define PVRDMA_MAX_FAST_REG_PAGES 128
-
-/*
- * Max MSI-X vectors.
- */
-
-#define PVRDMA_MAX_INTERRUPTS 3
-
-/* Register offsets within PCI resource on BAR1. */
-#define PVRDMA_REG_VERSION 0x00 /* R: Version of device. */
-#define PVRDMA_REG_DSRLOW 0x04 /* W: Device shared region low PA. */
-#define PVRDMA_REG_DSRHIGH 0x08 /* W: Device shared region high PA. */
-#define PVRDMA_REG_CTL 0x0c /* W: PVRDMA_DEVICE_CTL */
-#define PVRDMA_REG_REQUEST 0x10 /* W: Indicate device request. */
-#define PVRDMA_REG_ERR 0x14 /* R: Device error. */
-#define PVRDMA_REG_ICR 0x18 /* R: Interrupt cause. */
-#define PVRDMA_REG_IMR 0x1c /* R/W: Interrupt mask. */
-#define PVRDMA_REG_MACL 0x20 /* R/W: MAC address low. */
-#define PVRDMA_REG_MACH 0x24 /* R/W: MAC address high. */
-
-/* Object flags. */
-#define PVRDMA_CQ_FLAG_ARMED_SOL BIT(0) /* Armed for solicited-only. */
-#define PVRDMA_CQ_FLAG_ARMED BIT(1) /* Armed. */
-#define PVRDMA_MR_FLAG_DMA BIT(0) /* DMA region. */
-#define PVRDMA_MR_FLAG_FRMR BIT(1) /* Fast reg memory region. */
-
-/*
- * Atomic operation capability (masked versions are extended atomic
- * operations.
- */
-
-#define PVRDMA_ATOMIC_OP_COMP_SWAP BIT(0) /* Compare and swap. */
-#define PVRDMA_ATOMIC_OP_FETCH_ADD BIT(1) /* Fetch and add. */
-#define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP BIT(2) /* Masked compare and swap. */
-#define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD BIT(3) /* Masked fetch and add. */
-
-/*
- * Base Memory Management Extension flags to support Fast Reg Memory Regions
- * and Fast Reg Work Requests. Each flag represents a verb operation and we
- * must support all of them to qualify for the BMME device cap.
- */
-
-#define PVRDMA_BMME_FLAG_LOCAL_INV BIT(0) /* Local Invalidate. */
-#define PVRDMA_BMME_FLAG_REMOTE_INV BIT(1) /* Remote Invalidate. */
-#define PVRDMA_BMME_FLAG_FAST_REG_WR BIT(2) /* Fast Reg Work Request. */
-
-/*
- * GID types. The interpretation of the gid_types bit field in the device
- * capabilities will depend on the device mode. For now, the device only
- * supports RoCE as mode, so only the different GID types for RoCE are
- * defined.
- */
-
-#define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0)
-#define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1)
-
-/*
- * Version checks. This checks whether each version supports specific
- * capabilities from the device.
- */
-
-#define PVRDMA_IS_VERSION17(_dev) \
- (_dev->dsr_version == PVRDMA_ROCEV1_VERSION && \
- _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
-
-#define PVRDMA_IS_VERSION18(_dev) \
- (_dev->dsr_version >= PVRDMA_ROCEV2_VERSION && \
- (_dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1 || \
- _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)) \
-
-#define PVRDMA_SUPPORTED(_dev) \
- ((_dev->dsr->caps.mode == PVRDMA_DEVICE_MODE_ROCE) && \
- (PVRDMA_IS_VERSION17(_dev) || PVRDMA_IS_VERSION18(_dev)))
-
-/*
- * Get capability values based on device version.
- */
-
-#define PVRDMA_GET_CAP(_dev, _old_val, _val) \
- ((PVRDMA_IS_VERSION18(_dev)) ? _val : _old_val)
-
-enum pvrdma_pci_resource {
- PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */
- PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */
- PVRDMA_PCI_RESOURCE_UAR, /* BAR2: UAR pages, MMIO, 64-bit. */
- PVRDMA_PCI_RESOURCE_LAST, /* Last. */
-};
-
-enum pvrdma_device_ctl {
- PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
- PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
- PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
-};
-
-enum pvrdma_intr_vector {
- PVRDMA_INTR_VECTOR_RESPONSE, /* Command response. */
- PVRDMA_INTR_VECTOR_ASYNC, /* Async events. */
- PVRDMA_INTR_VECTOR_CQ, /* CQ notification. */
- /* Additional CQ notification vectors. */
-};
-
-enum pvrdma_intr_cause {
- PVRDMA_INTR_CAUSE_RESPONSE = (1 << PVRDMA_INTR_VECTOR_RESPONSE),
- PVRDMA_INTR_CAUSE_ASYNC = (1 << PVRDMA_INTR_VECTOR_ASYNC),
- PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ),
-};
-
-enum pvrdma_gos_bits {
- PVRDMA_GOS_BITS_UNK, /* Unknown. */
- PVRDMA_GOS_BITS_32, /* 32-bit. */
- PVRDMA_GOS_BITS_64, /* 64-bit. */
-};
-
-enum pvrdma_gos_type {
- PVRDMA_GOS_TYPE_UNK, /* Unknown. */
- PVRDMA_GOS_TYPE_LINUX, /* Linux. */
-};
-
-enum pvrdma_device_mode {
- PVRDMA_DEVICE_MODE_ROCE, /* RoCE. */
- PVRDMA_DEVICE_MODE_IWARP, /* iWarp. */
- PVRDMA_DEVICE_MODE_IB, /* InfiniBand. */
-};
-
-struct pvrdma_gos_info {
- uint32_t gos_bits:2; /* W: PVRDMA_GOS_BITS_ */
- uint32_t gos_type:4; /* W: PVRDMA_GOS_TYPE_ */
- uint32_t gos_ver:16; /* W: Guest OS version. */
- uint32_t gos_misc:10; /* W: Other. */
- uint32_t pad; /* Pad to 8-byte alignment. */
-};
-
-struct pvrdma_device_caps {
- uint64_t fw_ver; /* R: Query device. */
- uint64_t node_guid;
- uint64_t sys_image_guid;
- uint64_t max_mr_size;
- uint64_t page_size_cap;
- uint64_t atomic_arg_sizes; /* EX verbs. */
- uint32_t ex_comp_mask; /* EX verbs. */
- uint32_t device_cap_flags2; /* EX verbs. */
- uint32_t max_fa_bit_boundary; /* EX verbs. */
- uint32_t log_max_atomic_inline_arg; /* EX verbs. */
- uint32_t vendor_id;
- uint32_t vendor_part_id;
- uint32_t hw_ver;
- uint32_t max_qp;
- uint32_t max_qp_wr;
- uint32_t device_cap_flags;
- uint32_t max_sge;
- uint32_t max_sge_rd;
- uint32_t max_cq;
- uint32_t max_cqe;
- uint32_t max_mr;
- uint32_t max_pd;
- uint32_t max_qp_rd_atom;
- uint32_t max_ee_rd_atom;
- uint32_t max_res_rd_atom;
- uint32_t max_qp_init_rd_atom;
- uint32_t max_ee_init_rd_atom;
- uint32_t max_ee;
- uint32_t max_rdd;
- uint32_t max_mw;
- uint32_t max_raw_ipv6_qp;
- uint32_t max_raw_ethy_qp;
- uint32_t max_mcast_grp;
- uint32_t max_mcast_qp_attach;
- uint32_t max_total_mcast_qp_attach;
- uint32_t max_ah;
- uint32_t max_fmr;
- uint32_t max_map_per_fmr;
- uint32_t max_srq;
- uint32_t max_srq_wr;
- uint32_t max_srq_sge;
- uint32_t max_uar;
- uint32_t gid_tbl_len;
- uint16_t max_pkeys;
- uint8_t local_ca_ack_delay;
- uint8_t phys_port_cnt;
- uint8_t mode; /* PVRDMA_DEVICE_MODE_ */
- uint8_t atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */
- uint8_t bmme_flags; /* FRWR Mem Mgmt Extensions */
- uint8_t gid_types; /* PVRDMA_GID_TYPE_FLAG_ */
- uint32_t max_fast_reg_page_list_len;
-};
-
-struct pvrdma_ring_page_info {
- uint32_t num_pages; /* Num pages incl. header. */
- uint32_t reserved; /* Reserved. */
- uint64_t pdir_dma; /* Page directory PA. */
-};
-
-#pragma pack(push, 1)
-
-struct pvrdma_device_shared_region {
- uint32_t driver_version; /* W: Driver version. */
- uint32_t pad; /* Pad to 8-byte align. */
- struct pvrdma_gos_info gos_info; /* W: Guest OS information. */
- uint64_t cmd_slot_dma; /* W: Command slot address. */
- uint64_t resp_slot_dma; /* W: Response slot address. */
- struct pvrdma_ring_page_info async_ring_pages;
- /* W: Async ring page info. */
- struct pvrdma_ring_page_info cq_ring_pages;
- /* W: CQ ring page info. */
- uint32_t uar_pfn; /* W: UAR pageframe. */
- uint32_t pad2; /* Pad to 8-byte align. */
- struct pvrdma_device_caps caps; /* R: Device capabilities. */
-};
-
-#pragma pack(pop)
-
-/* Event types. Currently a 1:1 mapping with enum ib_event. */
-enum pvrdma_eqe_type {
- PVRDMA_EVENT_CQ_ERR,
- PVRDMA_EVENT_QP_FATAL,
- PVRDMA_EVENT_QP_REQ_ERR,
- PVRDMA_EVENT_QP_ACCESS_ERR,
- PVRDMA_EVENT_COMM_EST,
- PVRDMA_EVENT_SQ_DRAINED,
- PVRDMA_EVENT_PATH_MIG,
- PVRDMA_EVENT_PATH_MIG_ERR,
- PVRDMA_EVENT_DEVICE_FATAL,
- PVRDMA_EVENT_PORT_ACTIVE,
- PVRDMA_EVENT_PORT_ERR,
- PVRDMA_EVENT_LID_CHANGE,
- PVRDMA_EVENT_PKEY_CHANGE,
- PVRDMA_EVENT_SM_CHANGE,
- PVRDMA_EVENT_SRQ_ERR,
- PVRDMA_EVENT_SRQ_LIMIT_REACHED,
- PVRDMA_EVENT_QP_LAST_WQE_REACHED,
- PVRDMA_EVENT_CLIENT_REREGISTER,
- PVRDMA_EVENT_GID_CHANGE,
-};
-
-/* Event queue element. */
-struct pvrdma_eqe {
- uint32_t type; /* Event type. */
- uint32_t info; /* Handle, other. */
-};
-
-/* CQ notification queue element. */
-struct pvrdma_cqne {
- uint32_t info; /* Handle */
-};
-
-enum {
- PVRDMA_CMD_FIRST,
- PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
- PVRDMA_CMD_QUERY_PKEY,
- PVRDMA_CMD_CREATE_PD,
- PVRDMA_CMD_DESTROY_PD,
- PVRDMA_CMD_CREATE_MR,
- PVRDMA_CMD_DESTROY_MR,
- PVRDMA_CMD_CREATE_CQ,
- PVRDMA_CMD_RESIZE_CQ,
- PVRDMA_CMD_DESTROY_CQ,
- PVRDMA_CMD_CREATE_QP,
- PVRDMA_CMD_MODIFY_QP,
- PVRDMA_CMD_QUERY_QP,
- PVRDMA_CMD_DESTROY_QP,
- PVRDMA_CMD_CREATE_UC,
- PVRDMA_CMD_DESTROY_UC,
- PVRDMA_CMD_CREATE_BIND,
- PVRDMA_CMD_DESTROY_BIND,
- PVRDMA_CMD_CREATE_SRQ,
- PVRDMA_CMD_MODIFY_SRQ,
- PVRDMA_CMD_QUERY_SRQ,
- PVRDMA_CMD_DESTROY_SRQ,
- PVRDMA_CMD_MAX,
-};
-
-enum {
- PVRDMA_CMD_FIRST_RESP = (1 << 31),
- PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
- PVRDMA_CMD_QUERY_PKEY_RESP,
- PVRDMA_CMD_CREATE_PD_RESP,
- PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
- PVRDMA_CMD_CREATE_MR_RESP,
- PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
- PVRDMA_CMD_CREATE_CQ_RESP,
- PVRDMA_CMD_RESIZE_CQ_RESP,
- PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
- PVRDMA_CMD_CREATE_QP_RESP,
- PVRDMA_CMD_MODIFY_QP_RESP,
- PVRDMA_CMD_QUERY_QP_RESP,
- PVRDMA_CMD_DESTROY_QP_RESP,
- PVRDMA_CMD_CREATE_UC_RESP,
- PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
- PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
- PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
- PVRDMA_CMD_CREATE_SRQ_RESP,
- PVRDMA_CMD_MODIFY_SRQ_RESP,
- PVRDMA_CMD_QUERY_SRQ_RESP,
- PVRDMA_CMD_DESTROY_SRQ_RESP,
- PVRDMA_CMD_MAX_RESP,
-};
-
-struct pvrdma_cmd_hdr {
- uint64_t response; /* Key for response lookup. */
- uint32_t cmd; /* PVRDMA_CMD_ */
- uint32_t reserved; /* Reserved. */
-};
-
-struct pvrdma_cmd_resp_hdr {
- uint64_t response; /* From cmd hdr. */
- uint32_t ack; /* PVRDMA_CMD_XXX_RESP */
- uint8_t err; /* Error. */
- uint8_t reserved[3]; /* Reserved. */
-};
-
-struct pvrdma_cmd_query_port {
- struct pvrdma_cmd_hdr hdr;
- uint8_t port_num;
- uint8_t reserved[7];
-};
-
-struct pvrdma_cmd_query_port_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- struct pvrdma_port_attr attrs;
-};
-
-struct pvrdma_cmd_query_pkey {
- struct pvrdma_cmd_hdr hdr;
- uint8_t port_num;
- uint8_t index;
- uint8_t reserved[6];
-};
-
-struct pvrdma_cmd_query_pkey_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint16_t pkey;
- uint8_t reserved[6];
-};
-
-struct pvrdma_cmd_create_uc {
- struct pvrdma_cmd_hdr hdr;
- uint32_t pfn; /* UAR page frame number */
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_uc_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t ctx_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_uc {
- struct pvrdma_cmd_hdr hdr;
- uint32_t ctx_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_pd {
- struct pvrdma_cmd_hdr hdr;
- uint32_t ctx_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_pd_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t pd_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_pd {
- struct pvrdma_cmd_hdr hdr;
- uint32_t pd_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_mr {
- struct pvrdma_cmd_hdr hdr;
- uint64_t start;
- uint64_t length;
- uint64_t pdir_dma;
- uint32_t pd_handle;
- uint32_t access_flags;
- uint32_t flags;
- uint32_t nchunks;
-};
-
-struct pvrdma_cmd_create_mr_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t mr_handle;
- uint32_t lkey;
- uint32_t rkey;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_mr {
- struct pvrdma_cmd_hdr hdr;
- uint32_t mr_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_cq {
- struct pvrdma_cmd_hdr hdr;
- uint64_t pdir_dma;
- uint32_t ctx_handle;
- uint32_t cqe;
- uint32_t nchunks;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_cq_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t cq_handle;
- uint32_t cqe;
-};
-
-struct pvrdma_cmd_resize_cq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t cq_handle;
- uint32_t cqe;
-};
-
-struct pvrdma_cmd_resize_cq_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t cqe;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_cq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t cq_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_srq {
- struct pvrdma_cmd_hdr hdr;
- uint64_t pdir_dma;
- uint32_t pd_handle;
- uint32_t nchunks;
- struct pvrdma_srq_attr attrs;
- uint8_t srq_type;
- uint8_t reserved[7];
-};
-
-struct pvrdma_cmd_create_srq_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t srqn;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_modify_srq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t srq_handle;
- uint32_t attr_mask;
- struct pvrdma_srq_attr attrs;
-};
-
-struct pvrdma_cmd_query_srq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t srq_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_query_srq_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- struct pvrdma_srq_attr attrs;
-};
-
-struct pvrdma_cmd_destroy_srq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t srq_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_qp {
- struct pvrdma_cmd_hdr hdr;
- uint64_t pdir_dma;
- uint32_t pd_handle;
- uint32_t send_cq_handle;
- uint32_t recv_cq_handle;
- uint32_t srq_handle;
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t max_recv_sge;
- uint32_t max_inline_data;
- uint32_t lkey;
- uint32_t access_flags;
- uint16_t total_chunks;
- uint16_t send_chunks;
- uint16_t max_atomic_arg;
- uint8_t sq_sig_all;
- uint8_t qp_type;
- uint8_t is_srq;
- uint8_t reserved[3];
-};
-
-struct pvrdma_cmd_create_qp_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t qpn;
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t max_recv_sge;
- uint32_t max_inline_data;
-};
-
-struct pvrdma_cmd_modify_qp {
- struct pvrdma_cmd_hdr hdr;
- uint32_t qp_handle;
- uint32_t attr_mask;
- struct pvrdma_qp_attr attrs;
-};
-
-struct pvrdma_cmd_query_qp {
- struct pvrdma_cmd_hdr hdr;
- uint32_t qp_handle;
- uint32_t attr_mask;
-};
-
-struct pvrdma_cmd_query_qp_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- struct pvrdma_qp_attr attrs;
-};
-
-struct pvrdma_cmd_destroy_qp {
- struct pvrdma_cmd_hdr hdr;
- uint32_t qp_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_qp_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t events_reported;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_bind {
- struct pvrdma_cmd_hdr hdr;
- uint32_t mtu;
- uint32_t vlan;
- uint32_t index;
- uint8_t new_gid[16];
- uint8_t gid_type;
- uint8_t reserved[3];
-};
-
-struct pvrdma_cmd_destroy_bind {
- struct pvrdma_cmd_hdr hdr;
- uint32_t index;
- uint8_t dest_gid[16];
- uint8_t reserved[4];
-};
-
-union pvrdma_cmd_req {
- struct pvrdma_cmd_hdr hdr;
- struct pvrdma_cmd_query_port query_port;
- struct pvrdma_cmd_query_pkey query_pkey;
- struct pvrdma_cmd_create_uc create_uc;
- struct pvrdma_cmd_destroy_uc destroy_uc;
- struct pvrdma_cmd_create_pd create_pd;
- struct pvrdma_cmd_destroy_pd destroy_pd;
- struct pvrdma_cmd_create_mr create_mr;
- struct pvrdma_cmd_destroy_mr destroy_mr;
- struct pvrdma_cmd_create_cq create_cq;
- struct pvrdma_cmd_resize_cq resize_cq;
- struct pvrdma_cmd_destroy_cq destroy_cq;
- struct pvrdma_cmd_create_qp create_qp;
- struct pvrdma_cmd_modify_qp modify_qp;
- struct pvrdma_cmd_query_qp query_qp;
- struct pvrdma_cmd_destroy_qp destroy_qp;
- struct pvrdma_cmd_create_bind create_bind;
- struct pvrdma_cmd_destroy_bind destroy_bind;
- struct pvrdma_cmd_create_srq create_srq;
- struct pvrdma_cmd_modify_srq modify_srq;
- struct pvrdma_cmd_query_srq query_srq;
- struct pvrdma_cmd_destroy_srq destroy_srq;
-};
-
-union pvrdma_cmd_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- struct pvrdma_cmd_query_port_resp query_port_resp;
- struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
- struct pvrdma_cmd_create_uc_resp create_uc_resp;
- struct pvrdma_cmd_create_pd_resp create_pd_resp;
- struct pvrdma_cmd_create_mr_resp create_mr_resp;
- struct pvrdma_cmd_create_cq_resp create_cq_resp;
- struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
- struct pvrdma_cmd_create_qp_resp create_qp_resp;
- struct pvrdma_cmd_query_qp_resp query_qp_resp;
- struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
- struct pvrdma_cmd_create_srq_resp create_srq_resp;
- struct pvrdma_cmd_query_srq_resp query_srq_resp;
-};
-
-#endif /* __PVRDMA_DEV_API_H__ */
diff --git a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
deleted file mode 100644
index acd4c8346d..0000000000
--- a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of EITHER the GNU General Public License
- * version 2 as published by the Free Software Foundation or the BSD
- * 2-Clause License. This program is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License version 2 for more details at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program available in the file COPYING in the main
- * directory of this source tree.
- *
- * The BSD 2-Clause License
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PVRDMA_RING_H__
-#define __PVRDMA_RING_H__
-
-#include "standard-headers/linux/types.h"
-
-#define PVRDMA_INVALID_IDX -1 /* Invalid index. */
-
-struct pvrdma_ring {
- int prod_tail; /* Producer tail. */
- int cons_head; /* Consumer head. */
-};
-
-struct pvrdma_ring_state {
- struct pvrdma_ring tx; /* Tx ring. */
- struct pvrdma_ring rx; /* Rx ring. */
-};
-
-static inline int pvrdma_idx_valid(uint32_t idx, uint32_t max_elems)
-{
- /* Generates fewer instructions than a less-than. */
- return (idx & ~((max_elems << 1) - 1)) == 0;
-}
-
-static inline int32_t pvrdma_idx(int *var, uint32_t max_elems)
-{
- const unsigned int idx = atomic_read(var);
-
- if (pvrdma_idx_valid(idx, max_elems))
- return idx & (max_elems - 1);
- return PVRDMA_INVALID_IDX;
-}
-
-static inline void pvrdma_idx_ring_inc(int *var, uint32_t max_elems)
-{
- uint32_t idx = atomic_read(var) + 1; /* Increment. */
-
- idx &= (max_elems << 1) - 1; /* Modulo size, flip gen. */
- atomic_set(var, idx);
-}
-
-static inline int32_t pvrdma_idx_ring_has_space(const struct pvrdma_ring *r,
- uint32_t max_elems, uint32_t *out_tail)
-{
- const uint32_t tail = atomic_read(&r->prod_tail);
- const uint32_t head = atomic_read(&r->cons_head);
-
- if (pvrdma_idx_valid(tail, max_elems) &&
- pvrdma_idx_valid(head, max_elems)) {
- *out_tail = tail & (max_elems - 1);
- return tail != (head ^ max_elems);
- }
- return PVRDMA_INVALID_IDX;
-}
-
-static inline int32_t pvrdma_idx_ring_has_data(const struct pvrdma_ring *r,
- uint32_t max_elems, uint32_t *out_head)
-{
- const uint32_t tail = atomic_read(&r->prod_tail);
- const uint32_t head = atomic_read(&r->cons_head);
-
- if (pvrdma_idx_valid(tail, max_elems) &&
- pvrdma_idx_valid(head, max_elems)) {
- *out_head = head & (max_elems - 1);
- return tail != head;
- }
- return PVRDMA_INVALID_IDX;
-}
-
-#endif /* __PVRDMA_RING_H__ */
diff --git a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
deleted file mode 100644
index 1677208a41..0000000000
--- a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of EITHER the GNU General Public License
- * version 2 as published by the Free Software Foundation or the BSD
- * 2-Clause License. This program is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License version 2 for more details at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program available in the file COPYING in the main
- * directory of this source tree.
- *
- * The BSD 2-Clause License
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PVRDMA_VERBS_H__
-#define __PVRDMA_VERBS_H__
-
-#include "standard-headers/linux/types.h"
-
-union pvrdma_gid {
- uint8_t raw[16];
- struct {
- uint64_t subnet_prefix;
- uint64_t interface_id;
- } global;
-};
-
-enum pvrdma_link_layer {
- PVRDMA_LINK_LAYER_UNSPECIFIED,
- PVRDMA_LINK_LAYER_INFINIBAND,
- PVRDMA_LINK_LAYER_ETHERNET,
-};
-
-enum pvrdma_mtu {
- PVRDMA_MTU_256 = 1,
- PVRDMA_MTU_512 = 2,
- PVRDMA_MTU_1024 = 3,
- PVRDMA_MTU_2048 = 4,
- PVRDMA_MTU_4096 = 5,
-};
-
-static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu)
-{
- switch (mtu) {
- case PVRDMA_MTU_256: return 256;
- case PVRDMA_MTU_512: return 512;
- case PVRDMA_MTU_1024: return 1024;
- case PVRDMA_MTU_2048: return 2048;
- case PVRDMA_MTU_4096: return 4096;
- default: return -1;
- }
-}
-
-static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu)
-{
- switch (mtu) {
- case 256: return PVRDMA_MTU_256;
- case 512: return PVRDMA_MTU_512;
- case 1024: return PVRDMA_MTU_1024;
- case 2048: return PVRDMA_MTU_2048;
- case 4096:
- default: return PVRDMA_MTU_4096;
- }
-}
-
-enum pvrdma_port_state {
- PVRDMA_PORT_NOP = 0,
- PVRDMA_PORT_DOWN = 1,
- PVRDMA_PORT_INIT = 2,
- PVRDMA_PORT_ARMED = 3,
- PVRDMA_PORT_ACTIVE = 4,
- PVRDMA_PORT_ACTIVE_DEFER = 5,
-};
-
-enum pvrdma_port_cap_flags {
- PVRDMA_PORT_SM = 1 << 1,
- PVRDMA_PORT_NOTICE_SUP = 1 << 2,
- PVRDMA_PORT_TRAP_SUP = 1 << 3,
- PVRDMA_PORT_OPT_IPD_SUP = 1 << 4,
- PVRDMA_PORT_AUTO_MIGR_SUP = 1 << 5,
- PVRDMA_PORT_SL_MAP_SUP = 1 << 6,
- PVRDMA_PORT_MKEY_NVRAM = 1 << 7,
- PVRDMA_PORT_PKEY_NVRAM = 1 << 8,
- PVRDMA_PORT_LED_INFO_SUP = 1 << 9,
- PVRDMA_PORT_SM_DISABLED = 1 << 10,
- PVRDMA_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
- PVRDMA_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
- PVRDMA_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
- PVRDMA_PORT_CM_SUP = 1 << 16,
- PVRDMA_PORT_SNMP_TUNNEL_SUP = 1 << 17,
- PVRDMA_PORT_REINIT_SUP = 1 << 18,
- PVRDMA_PORT_DEVICE_MGMT_SUP = 1 << 19,
- PVRDMA_PORT_VENDOR_CLASS_SUP = 1 << 20,
- PVRDMA_PORT_DR_NOTICE_SUP = 1 << 21,
- PVRDMA_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
- PVRDMA_PORT_BOOT_MGMT_SUP = 1 << 23,
- PVRDMA_PORT_LINK_LATENCY_SUP = 1 << 24,
- PVRDMA_PORT_CLIENT_REG_SUP = 1 << 25,
- PVRDMA_PORT_IP_BASED_GIDS = 1 << 26,
- PVRDMA_PORT_CAP_FLAGS_MAX = PVRDMA_PORT_IP_BASED_GIDS,
-};
-
-enum pvrdma_port_width {
- PVRDMA_WIDTH_1X = 1,
- PVRDMA_WIDTH_4X = 2,
- PVRDMA_WIDTH_8X = 4,
- PVRDMA_WIDTH_12X = 8,
-};
-
-static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width)
-{
- switch (width) {
- case PVRDMA_WIDTH_1X: return 1;
- case PVRDMA_WIDTH_4X: return 4;
- case PVRDMA_WIDTH_8X: return 8;
- case PVRDMA_WIDTH_12X: return 12;
- default: return -1;
- }
-}
-
-enum pvrdma_port_speed {
- PVRDMA_SPEED_SDR = 1,
- PVRDMA_SPEED_DDR = 2,
- PVRDMA_SPEED_QDR = 4,
- PVRDMA_SPEED_FDR10 = 8,
- PVRDMA_SPEED_FDR = 16,
- PVRDMA_SPEED_EDR = 32,
-};
-
-struct pvrdma_port_attr {
- enum pvrdma_port_state state;
- enum pvrdma_mtu max_mtu;
- enum pvrdma_mtu active_mtu;
- uint32_t gid_tbl_len;
- uint32_t port_cap_flags;
- uint32_t max_msg_sz;
- uint32_t bad_pkey_cntr;
- uint32_t qkey_viol_cntr;
- uint16_t pkey_tbl_len;
- uint16_t lid;
- uint16_t sm_lid;
- uint8_t lmc;
- uint8_t max_vl_num;
- uint8_t sm_sl;
- uint8_t subnet_timeout;
- uint8_t init_type_reply;
- uint8_t active_width;
- uint8_t active_speed;
- uint8_t phys_state;
- uint8_t reserved[2];
-};
-
-struct pvrdma_global_route {
- union pvrdma_gid dgid;
- uint32_t flow_label;
- uint8_t sgid_index;
- uint8_t hop_limit;
- uint8_t traffic_class;
- uint8_t reserved;
-};
-
-struct pvrdma_grh {
- uint32_t version_tclass_flow;
- uint16_t paylen;
- uint8_t next_hdr;
- uint8_t hop_limit;
- union pvrdma_gid sgid;
- union pvrdma_gid dgid;
-};
-
-enum pvrdma_ah_flags {
- PVRDMA_AH_GRH = 1,
-};
-
-enum pvrdma_rate {
- PVRDMA_RATE_PORT_CURRENT = 0,
- PVRDMA_RATE_2_5_GBPS = 2,
- PVRDMA_RATE_5_GBPS = 5,
- PVRDMA_RATE_10_GBPS = 3,
- PVRDMA_RATE_20_GBPS = 6,
- PVRDMA_RATE_30_GBPS = 4,
- PVRDMA_RATE_40_GBPS = 7,
- PVRDMA_RATE_60_GBPS = 8,
- PVRDMA_RATE_80_GBPS = 9,
- PVRDMA_RATE_120_GBPS = 10,
- PVRDMA_RATE_14_GBPS = 11,
- PVRDMA_RATE_56_GBPS = 12,
- PVRDMA_RATE_112_GBPS = 13,
- PVRDMA_RATE_168_GBPS = 14,
- PVRDMA_RATE_25_GBPS = 15,
- PVRDMA_RATE_100_GBPS = 16,
- PVRDMA_RATE_200_GBPS = 17,
- PVRDMA_RATE_300_GBPS = 18,
-};
-
-struct pvrdma_ah_attr {
- struct pvrdma_global_route grh;
- uint16_t dlid;
- uint16_t vlan_id;
- uint8_t sl;
- uint8_t src_path_bits;
- uint8_t static_rate;
- uint8_t ah_flags;
- uint8_t port_num;
- uint8_t dmac[6];
- uint8_t reserved;
-};
-
-enum pvrdma_cq_notify_flags {
- PVRDMA_CQ_SOLICITED = 1 << 0,
- PVRDMA_CQ_NEXT_COMP = 1 << 1,
- PVRDMA_CQ_SOLICITED_MASK = PVRDMA_CQ_SOLICITED |
- PVRDMA_CQ_NEXT_COMP,
- PVRDMA_CQ_REPORT_MISSED_EVENTS = 1 << 2,
-};
-
-struct pvrdma_qp_cap {
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t max_recv_sge;
- uint32_t max_inline_data;
- uint32_t reserved;
-};
-
-enum pvrdma_sig_type {
- PVRDMA_SIGNAL_ALL_WR,
- PVRDMA_SIGNAL_REQ_WR,
-};
-
-enum pvrdma_qp_type {
- PVRDMA_QPT_SMI,
- PVRDMA_QPT_GSI,
- PVRDMA_QPT_RC,
- PVRDMA_QPT_UC,
- PVRDMA_QPT_UD,
- PVRDMA_QPT_RAW_IPV6,
- PVRDMA_QPT_RAW_ETHERTYPE,
- PVRDMA_QPT_RAW_PACKET = 8,
- PVRDMA_QPT_XRC_INI = 9,
- PVRDMA_QPT_XRC_TGT,
- PVRDMA_QPT_MAX,
-};
-
-enum pvrdma_qp_create_flags {
- PVRDMA_QP_CREATE_IPOPVRDMA_UD_LSO = 1 << 0,
- PVRDMA_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
-};
-
-enum pvrdma_qp_attr_mask {
- PVRDMA_QP_STATE = 1 << 0,
- PVRDMA_QP_CUR_STATE = 1 << 1,
- PVRDMA_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
- PVRDMA_QP_ACCESS_FLAGS = 1 << 3,
- PVRDMA_QP_PKEY_INDEX = 1 << 4,
- PVRDMA_QP_PORT = 1 << 5,
- PVRDMA_QP_QKEY = 1 << 6,
- PVRDMA_QP_AV = 1 << 7,
- PVRDMA_QP_PATH_MTU = 1 << 8,
- PVRDMA_QP_TIMEOUT = 1 << 9,
- PVRDMA_QP_RETRY_CNT = 1 << 10,
- PVRDMA_QP_RNR_RETRY = 1 << 11,
- PVRDMA_QP_RQ_PSN = 1 << 12,
- PVRDMA_QP_MAX_QP_RD_ATOMIC = 1 << 13,
- PVRDMA_QP_ALT_PATH = 1 << 14,
- PVRDMA_QP_MIN_RNR_TIMER = 1 << 15,
- PVRDMA_QP_SQ_PSN = 1 << 16,
- PVRDMA_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
- PVRDMA_QP_PATH_MIG_STATE = 1 << 18,
- PVRDMA_QP_CAP = 1 << 19,
- PVRDMA_QP_DEST_QPN = 1 << 20,
- PVRDMA_QP_ATTR_MASK_MAX = PVRDMA_QP_DEST_QPN,
-};
-
-enum pvrdma_qp_state {
- PVRDMA_QPS_RESET,
- PVRDMA_QPS_INIT,
- PVRDMA_QPS_RTR,
- PVRDMA_QPS_RTS,
- PVRDMA_QPS_SQD,
- PVRDMA_QPS_SQE,
- PVRDMA_QPS_ERR,
-};
-
-enum pvrdma_mig_state {
- PVRDMA_MIG_MIGRATED,
- PVRDMA_MIG_REARM,
- PVRDMA_MIG_ARMED,
-};
-
-enum pvrdma_mw_type {
- PVRDMA_MW_TYPE_1 = 1,
- PVRDMA_MW_TYPE_2 = 2,
-};
-
-struct pvrdma_srq_attr {
- uint32_t max_wr;
- uint32_t max_sge;
- uint32_t srq_limit;
- uint32_t reserved;
-};
-
-struct pvrdma_qp_attr {
- enum pvrdma_qp_state qp_state;
- enum pvrdma_qp_state cur_qp_state;
- enum pvrdma_mtu path_mtu;
- enum pvrdma_mig_state path_mig_state;
- uint32_t qkey;
- uint32_t rq_psn;
- uint32_t sq_psn;
- uint32_t dest_qp_num;
- uint32_t qp_access_flags;
- uint16_t pkey_index;
- uint16_t alt_pkey_index;
- uint8_t en_sqd_async_notify;
- uint8_t sq_draining;
- uint8_t max_rd_atomic;
- uint8_t max_dest_rd_atomic;
- uint8_t min_rnr_timer;
- uint8_t port_num;
- uint8_t timeout;
- uint8_t retry_cnt;
- uint8_t rnr_retry;
- uint8_t alt_port_num;
- uint8_t alt_timeout;
- uint8_t reserved[5];
- struct pvrdma_qp_cap cap;
- struct pvrdma_ah_attr ah_attr;
- struct pvrdma_ah_attr alt_ah_attr;
-};
-
-enum pvrdma_send_flags {
- PVRDMA_SEND_FENCE = 1 << 0,
- PVRDMA_SEND_SIGNALED = 1 << 1,
- PVRDMA_SEND_SOLICITED = 1 << 2,
- PVRDMA_SEND_INLINE = 1 << 3,
- PVRDMA_SEND_IP_CSUM = 1 << 4,
- PVRDMA_SEND_FLAGS_MAX = PVRDMA_SEND_IP_CSUM,
-};
-
-enum pvrdma_access_flags {
- PVRDMA_ACCESS_LOCAL_WRITE = 1 << 0,
- PVRDMA_ACCESS_REMOTE_WRITE = 1 << 1,
- PVRDMA_ACCESS_REMOTE_READ = 1 << 2,
- PVRDMA_ACCESS_REMOTE_ATOMIC = 1 << 3,
- PVRDMA_ACCESS_MW_BIND = 1 << 4,
- PVRDMA_ZERO_BASED = 1 << 5,
- PVRDMA_ACCESS_ON_DEMAND = 1 << 6,
- PVRDMA_ACCESS_FLAGS_MAX = PVRDMA_ACCESS_ON_DEMAND,
-};
-
-#endif /* __PVRDMA_VERBS_H__ */
diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h
index b53f8d7c8c..b72917073d 100644
--- a/include/standard-headers/drm/drm_fourcc.h
+++ b/include/standard-headers/drm/drm_fourcc.h
@@ -29,18 +29,123 @@
extern "C" {
#endif
+/**
+ * DOC: overview
+ *
+ * In the DRM subsystem, framebuffer pixel formats are described using the
+ * fourcc codes defined in `include/uapi/drm/drm_fourcc.h`. In addition to the
+ * fourcc code, a Format Modifier may optionally be provided, in order to
+ * further describe the buffer's format - for example tiling or compression.
+ *
+ * Format Modifiers
+ * ----------------
+ *
+ * Format modifiers are used in conjunction with a fourcc code, forming a
+ * unique fourcc:modifier pair. This format:modifier pair must fully define the
+ * format and data layout of the buffer, and should be the only way to describe
+ * that particular buffer.
+ *
+ * Having multiple fourcc:modifier pairs which describe the same layout should
+ * be avoided, as such aliases run the risk of different drivers exposing
+ * different names for the same data format, forcing userspace to understand
+ * that they are aliases.
+ *
+ * Format modifiers may change any property of the buffer, including the number
+ * of planes and/or the required allocation size. Format modifiers are
+ * vendor-namespaced, and as such the relationship between a fourcc code and a
+ * modifier is specific to the modifier being used. For example, some modifiers
+ * may preserve meaning - such as number of planes - from the fourcc code,
+ * whereas others may not.
+ *
+ * Modifiers must uniquely encode buffer layout. In other words, a buffer must
+ * match only a single modifier. A modifier must not be a subset of layouts of
+ * another modifier. For instance, it's incorrect to encode pitch alignment in
+ * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
+ * aligned modifier. That said, modifiers can have implicit minimal
+ * requirements.
+ *
+ * For modifiers where the combination of fourcc code and modifier can alias,
+ * a canonical pair needs to be defined and used by all drivers. Preferred
+ * combinations are also encouraged where all combinations might lead to
+ * confusion and unnecessarily reduced interoperability. An example for the
+ * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
+ *
+ * There are two kinds of modifier users:
+ *
+ * - Kernel and user-space drivers: for drivers it's important that modifiers
+ * don't alias, otherwise two drivers might support the same format but use
+ * different aliases, preventing them from sharing buffers in an efficient
+ * format.
+ * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
+ * see modifiers as opaque tokens they can check for equality and intersect.
+ * These users mustn't need to know to reason about the modifier value
+ * (i.e. they are not expected to extract information out of the modifier).
+ *
+ * Vendors should document their modifier usage in as much detail as
+ * possible, to ensure maximum compatibility across devices, drivers and
+ * applications.
+ *
+ * The authoritative list of format modifier codes is found in
+ * `include/uapi/drm/drm_fourcc.h`
+ *
+ * Open Source User Waiver
+ * -----------------------
+ *
+ * Because this is the authoritative source for pixel formats and modifiers
+ * referenced by GL, Vulkan extensions and other standards and hence used both
+ * by open source and closed source driver stacks, the usual requirement for an
+ * upstream in-kernel or open source userspace user does not apply.
+ *
+ * To ensure, as much as feasible, compatibility across stacks and avoid
+ * confusion with incompatible enumerations stakeholders for all relevant driver
+ * stacks should approve additions.
+ */
+
#define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
-#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+#define DRM_FORMAT_BIG_ENDIAN (1U<<31) /* format is big endian instead of little endian */
+
+/* Reserve 0 for the invalid format specifier */
+#define DRM_FORMAT_INVALID 0
/* color index */
+#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */
+#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */
+#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
-/* 8 bpp Red */
+/* 1 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */
+
+/* 8 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */
+
+/* 1 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */
+
+/* 8 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
-/* 16 bpp Red */
+/* 10 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
+
+/* 12 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
+
+/* 16 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
/* 16 bpp RG */
@@ -104,6 +209,30 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
+/*
+ * Floating point 64bpp RGB
+ * IEEE 754-2008 binary16 half-precision float
+ * [15:0] sign:exponent:mantissa 1:5:10
+ */
+#define DRM_FORMAT_XRGB16161616F fourcc_code('X', 'R', '4', 'H') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616F fourcc_code('X', 'B', '4', 'H') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
+/*
+ * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
+ * of unused padding per component:
+ */
+#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
+
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -111,6 +240,54 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
+#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */
+#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
+#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
+
+/*
+ * packed Y2xx indicate for each component, xx valid data occupy msb
+ * 16-xx padding occupy lsb
+ */
+#define DRM_FORMAT_Y210 fourcc_code('Y', '2', '1', '0') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 10:6:10:6:10:6:10:6 little endian per 2 Y pixels */
+#define DRM_FORMAT_Y212 fourcc_code('Y', '2', '1', '2') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 12:4:12:4:12:4:12:4 little endian per 2 Y pixels */
+#define DRM_FORMAT_Y216 fourcc_code('Y', '2', '1', '6') /* [63:0] Cr0:Y1:Cb0:Y0 16:16:16:16 little endian per 2 Y pixels */
+
+/*
+ * packed Y4xx indicate for each component, xx valid data occupy msb
+ * 16-xx padding occupy lsb except Y410
+ */
+#define DRM_FORMAT_Y410 fourcc_code('Y', '4', '1', '0') /* [31:0] A:Cr:Y:Cb 2:10:10:10 little endian */
+#define DRM_FORMAT_Y412 fourcc_code('Y', '4', '1', '2') /* [63:0] A:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */
+#define DRM_FORMAT_Y416 fourcc_code('Y', '4', '1', '6') /* [63:0] A:Cr:Y:Cb 16:16:16:16 little endian */
+
+#define DRM_FORMAT_XVYU2101010 fourcc_code('X', 'V', '3', '0') /* [31:0] X:Cr:Y:Cb 2:10:10:10 little endian */
+#define DRM_FORMAT_XVYU12_16161616 fourcc_code('X', 'V', '3', '6') /* [63:0] X:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */
+#define DRM_FORMAT_XVYU16161616 fourcc_code('X', 'V', '4', '8') /* [63:0] X:Cr:Y:Cb 16:16:16:16 little endian */
+
+/*
+ * packed YCbCr420 2x2 tiled formats
+ * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile
+ */
+/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0')
+/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0')
+
+/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2')
+/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2')
+
+/*
+ * 1-plane YUV 4:2:0
+ * In these formats, the component ordering is specified (Y, followed by U
+ * then V), but the exact Linear layout is undefined.
+ * These formats can only be used with a non-Linear modifier.
+ */
+#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8')
+#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0')
/*
* 2 plane RGB + A
@@ -139,6 +316,65 @@ extern "C" {
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [39:0] Y3:Y2:Y1:Y0 little endian
+ * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
+ */
+#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y:x [10:6] little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
+ */
+#define DRM_FORMAT_P210 fourcc_code('P', '2', '1', '0') /* 2x1 subsampled Cr:Cb plane, 10 bit per channel */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y:x [10:6] little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
+ */
+#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y:x [12:4] little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian
+ */
+#define DRM_FORMAT_P012 fourcc_code('P', '0', '1', '2') /* 2x2 subsampled Cr:Cb plane 12 bits per channel */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:Cb [16:16] little endian
+ */
+#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
+
+/* 2 plane YCbCr420.
+ * 3 10 bit components and 2 padding bits packed into 4 bytes.
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
+ */
+#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
+
+/* 3 plane non-subsampled (444) YCbCr
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cb plane, [15:0] Cb:x [10:6] little endian
+ * index 2: Cr plane, [15:0] Cr:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q410 fourcc_code('Q', '4', '1', '0')
+
+/* 3 plane non-subsampled (444) YCrCb
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cr plane, [15:0] Cr:x [10:6] little endian
+ * index 2: Cb plane, [15:0] Cb:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
/*
* 3 plane YCbCr
@@ -173,7 +409,6 @@ extern "C" {
*/
/* Vendor Ids: */
-#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
@@ -183,10 +418,19 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
+#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
+#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
+
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+
+#define fourcc_mod_is_vendor(modifier, vendor) \
+ (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
+
#define fourcc_mod_code(vendor, val) \
((((uint64_t)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
@@ -196,8 +440,33 @@ extern "C" {
* When adding a new token please document the layout with a code comment,
* similar to the fourcc codes above. drm_fourcc.h is considered the
* authoritative source for all of these.
+ *
+ * Generic modifier names:
+ *
+ * DRM_FORMAT_MOD_GENERIC_* definitions are used to provide vendor-neutral names
+ * for layouts which are common across multiple vendors. To preserve
+ * compatibility, in cases where a vendor-specific definition already exists and
+ * a generic name for it is desired, the common name is a purely symbolic alias
+ * and must use the same numerical value as the original definition.
+ *
+ * Note that generic names should only be used for modifiers which describe
+ * generic layouts (such as pixel re-ordering), which may have
+ * independently-developed support across multiple vendors.
+ *
+ * In future cases where a generic layout is identified before merging with a
+ * vendor-specific modifier, a new 'GENERIC' vendor or modifier using vendor
+ * 'NONE' could be considered. This should only be for obvious, exceptional
+ * cases to avoid polluting the 'GENERIC' namespace with modifiers which only
+ * apply to a single vendor.
+ *
+ * Generic names should not be used for cases where multiple hardware vendors
+ * have implementations of the same standardised compression scheme (such as
+ * AFBC). In those cases, all implementations should use the same format
+ * modifier(s), reflecting the vendor of the standard.
*/
+#define DRM_FORMAT_MOD_GENERIC_16_16_TILE DRM_FORMAT_MOD_SAMSUNG_16_16_TILE
+
/*
* Invalid Modifier
*
@@ -217,6 +486,16 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+/*
+ * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
+ *
+ * The "none" format modifier doesn't actually mean that the modifier is
+ * implicit, instead it means that the layout is linear. Whether modifiers are
+ * used is out-of-band information carried in an API-specific way (e.g. in a
+ * flag for drm_mode_fb_cmd2).
+ */
+#define DRM_FORMAT_MOD_NONE 0
+
/* Intel framebuffer modifiers */
/*
@@ -227,9 +506,12 @@ extern "C" {
* a platform-dependent stride. On top of that the memory can apply
* platform-depending swizzling of some higher address bits into bit6.
*
- * This format is highly platforms specific and not useful for cross-driver
- * sharing. It exists since on a given platform it does uniquely identify the
- * layout in a simple way for i915-specific userspace.
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
*/
#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
@@ -242,9 +524,12 @@ extern "C" {
* memory can apply platform-depending swizzling of some higher address bits
* into bit6.
*
- * This format is highly platforms specific and not useful for cross-driver
- * sharing. It exists since on a given platform it does uniquely identify the
- * layout in a simple way for i915-specific userspace.
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
*/
#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
@@ -254,7 +539,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consits out of four 256 byte units, which are also laid
+ * Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@@ -284,6 +569,139 @@ extern "C" {
#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
/*
+ * Intel color control surfaces (CCS) for Gen-12 render compression.
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6)
+
+/*
+ * Intel color control surfaces (CCS) for Gen-12 media compression
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
+/*
+ * Intel Tile 4 layout
+ *
+ * This is a tiled layout using 4KB tiles in a row-major layout. It has the same
+ * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It
+ * only differs from Tile Y at the 256B granularity in between. At this
+ * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape
+ * of 64B x 8 rows.
+ */
+#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 media compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface
+ * pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths. The
+ * clear color is stored at plane index 1 and the pitch should be 64 bytes
+ * aligned. The format of the 256 bits of clear color data matches the one used
+ * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
+ * for details.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 media compression
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
+ * compression.
+ *
+ * The main surface is tile4 and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -299,6 +717,15 @@ extern "C" {
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
/*
+ * Tiled, 16 (pixels) x 16 (lines) - sized macroblocks
+ *
+ * This is a simple tiled layout using tiles of 16x16 pixels in a row-major
+ * layout. For YCbCr formats Cb/Cr components are taken in such a way that
+ * they correspond to their 16x16 luma block.
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_16_16_TILE fourcc_mod_code(SAMSUNG, 2)
+
+/*
* Qualcomm Compressed Format
*
* Refers to a compressed variant of the base format that is compressed.
@@ -311,6 +738,28 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+/*
+ * Qualcomm Tiled Format
+ *
+ * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3)
+
+/*
+ * Qualcomm Alternate Tiled Format
+ *
+ * Alternate tiled format typically only used within GMEM.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2)
+
+
/* Vivante framebuffer modifiers */
/*
@@ -351,6 +800,35 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
+/*
+ * Vivante TS (tile-status) buffer modifiers. They can be combined with all of
+ * the color buffer tiling modifiers defined above. When TS is present it's a
+ * separate buffer containing the clear/compression status of each tile. The
+ * modifiers are defined as VIVANTE_MOD_TS_c_s, where c is the color buffer
+ * tile size in bytes covered by one entry in the status buffer and s is the
+ * number of status bits per entry.
+ * We reserve the top 8 bits of the Vivante modifier space for tile status
+ * clear/compression modifiers, as future cores might add some more TS layout
+ * variations.
+ */
+#define VIVANTE_MOD_TS_64_4 (1ULL << 48)
+#define VIVANTE_MOD_TS_64_2 (2ULL << 48)
+#define VIVANTE_MOD_TS_128_4 (3ULL << 48)
+#define VIVANTE_MOD_TS_256_4 (4ULL << 48)
+#define VIVANTE_MOD_TS_MASK (0xfULL << 48)
+
+/*
+ * Vivante compression modifiers. Those depend on a TS modifier being present
+ * as the TS bits get reinterpreted as compression tags instead of simple
+ * clear markers when compression is enabled.
+ */
+#define VIVANTE_MOD_COMP_DEC400 (1ULL << 52)
+#define VIVANTE_MOD_COMP_MASK (0xfULL << 52)
+
+/* Masking out the extension bits will yield the base modifier. */
+#define VIVANTE_MOD_EXT_MASK (VIVANTE_MOD_TS_MASK | \
+ VIVANTE_MOD_COMP_MASK)
+
/* NVIDIA frame buffer modifiers */
/*
@@ -361,7 +839,113 @@ extern "C" {
#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
/*
- * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
+ * Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80,
+ * and Tegra GPUs starting with Tegra K1.
+ *
+ * Pixels are arranged in Groups of Bytes (GOBs). GOB size and layout varies
+ * based on the architecture generation. GOBs themselves are then arranged in
+ * 3D blocks, with the block dimensions (in terms of GOBs) always being a power
+ * of two, and hence expressible as their log2 equivalent (E.g., "2" represents
+ * a block depth or height of "4").
+ *
+ * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
+ * in full detail.
+ *
+ * Macro
+ * Bits Param Description
+ * ---- ----- -----------------------------------------------------------------
+ *
+ * 3:0 h log2(height) of each block, in GOBs. Placed here for
+ * compatibility with the existing
+ * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
+ *
+ * 4:4 - Must be 1, to indicate block-linear layout. Necessary for
+ * compatibility with the existing
+ * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
+ *
+ * 8:5 - Reserved (To support 3D-surfaces with variable log2(depth) block
+ * size). Must be zero.
+ *
+ * Note there is no log2(width) parameter. Some portions of the
+ * hardware support a block width of two gobs, but it is impractical
+ * to use due to lack of support elsewhere, and has no known
+ * benefits.
+ *
+ * 11:9 - Reserved (To support 2D-array textures with variable array stride
+ * in blocks, specified via log2(tile width in blocks)). Must be
+ * zero.
+ *
+ * 19:12 k Page Kind. This value directly maps to a field in the page
+ * tables of all GPUs >= NV50. It affects the exact layout of bits
+ * in memory and can be derived from the tuple
+ *
+ * (format, GPU model, compression type, samples per pixel)
+ *
+ * Where compression type is defined below. If GPU model were
+ * implied by the format modifier, format, or memory buffer, page
+ * kind would not need to be included in the modifier itself, but
+ * since the modifier should define the layout of the associated
+ * memory buffer independent from any device or other context, it
+ * must be included here.
+ *
+ * 21:20 g GOB Height and Page Kind Generation. The height of a GOB changed
+ * starting with Fermi GPUs. Additionally, the mapping between page
+ * kind and bit layout has changed at various points.
+ *
+ * 0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping
+ * 1 = Gob Height 4, G80 - GT2XX Page Kind mapping
+ * 2 = Gob Height 8, Turing+ Page Kind mapping
+ * 3 = Reserved for future use.
+ *
+ * 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further
+ * bit remapping step that occurs at an even lower level than the
+ * page kind and block linear swizzles. This causes the layout of
+ * surfaces mapped in those SOC's GPUs to be incompatible with the
+ * equivalent mapping on other GPUs in the same system.
+ *
+ * 0 = Tegra K1 - Tegra Parker/TX2 Layout.
+ * 1 = Desktop GPU and Tegra Xavier+ Layout
+ *
+ * 25:23 c Lossless Framebuffer Compression type.
+ *
+ * 0 = none
+ * 1 = ROP/3D, layout 1, exact compression format implied by Page
+ * Kind field
+ * 2 = ROP/3D, layout 2, exact compression format implied by Page
+ * Kind field
+ * 3 = CDE horizontal
+ * 4 = CDE vertical
+ * 5 = Reserved for future use
+ * 6 = Reserved for future use
+ * 7 = Reserved for future use
+ *
+ * 55:25 - Reserved for future use. Must be zero.
+ */
+#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
+ fourcc_mod_code(NVIDIA, (0x10 | \
+ ((h) & 0xf) | \
+ (((k) & 0xff) << 12) | \
+ (((g) & 0x3) << 20) | \
+ (((s) & 0x1) << 22) | \
+ (((c) & 0x7) << 23)))
+
+/* To grandfather in prior block linear format modifiers to the above layout,
+ * the page kind "0", which corresponds to "pitch/linear" and hence is unusable
+ * with block-linear layouts, is remapped within drivers to the value 0xfe,
+ * which corresponds to the "generic" kind used for simple single-sample
+ * uncompressed color formats on Fermi - Volta GPUs.
+ */
+static inline uint64_t
+drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
+{
+ if (!(modifier & 0x10) || (modifier & (0xff << 12)))
+ return modifier;
+ else
+ return modifier | (0xfe << 12);
+}
+
+/*
+ * 16Bx2 Block Linear layout, used by Tegra K1 and later
*
* Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
* vertically by a power of 2 (1 to 32 GOBs) to form a block.
@@ -382,20 +966,20 @@ extern "C" {
* in full detail.
*/
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
- fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v))
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
- fourcc_mod_code(NVIDIA, 0x10)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
- fourcc_mod_code(NVIDIA, 0x11)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
- fourcc_mod_code(NVIDIA, 0x12)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
- fourcc_mod_code(NVIDIA, 0x13)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
- fourcc_mod_code(NVIDIA, 0x14)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
- fourcc_mod_code(NVIDIA, 0x15)
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5)
/*
* Some Broadcom modifiers take parameters, for example the number of
@@ -457,6 +1041,10 @@ extern "C" {
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
+ *
+ * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
+ * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
+ * wide, but as this is a 10 bpp format that translates to 96 pixels.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
@@ -508,8 +1096,25 @@ extern "C" {
* AFBC has several features which may be supported and/or used, which are
* represented using bits in the modifier. Not all combinations are valid,
* and different devices or use-cases may support different combinations.
+ *
+ * Further information on the use of AFBC modifiers can be found in
+ * Documentation/gpu/afbc.rst
*/
-#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode)
+
+/*
+ * The top 4 bits (out of the 56 bits allotted for specifying vendor specific
+ * modifiers) denote the category for modifiers. Currently we have three
+ * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
+ * sixteen different categories.
+ */
+#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
+ fourcc_mod_code(ARM, ((uint64_t)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFBC 0x00
+#define DRM_FORMAT_MOD_ARM_TYPE_MISC 0x01
+
+#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFBC, __afbc_mode)
/*
* AFBC superblock size
@@ -517,10 +1122,18 @@ extern "C" {
* Indicates the superblock size(s) used for the AFBC buffer. The buffer
* size (in pixels) must be aligned to a multiple of the superblock size.
* Four lowest significant bits(LSBs) are reserved for block size.
+ *
+ * Where one superblock size is specified, it applies to all planes of the
+ * buffer (e.g. 16x16, 32x8). When multiple superblock sizes are specified,
+ * the first applies to the Luma plane and the second applies to the Chroma
+ * plane(s). e.g. (32x8_64x4 means 32x8 Luma, with 64x4 Chroma).
+ * Multiple superblock sizes are only valid for multi-plane YCbCr formats.
*/
#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf
#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL)
#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_64x4 (3ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4 (4ULL)
/*
* AFBC lossless colorspace transform
@@ -580,6 +1193,376 @@ extern "C" {
*/
#define AFBC_FORMAT_MOD_SC (1ULL << 9)
+/*
+ * AFBC double-buffer
+ *
+ * Indicates that the buffer is allocated in a layout safe for front-buffer
+ * rendering.
+ */
+#define AFBC_FORMAT_MOD_DB (1ULL << 10)
+
+/*
+ * AFBC buffer content hints
+ *
+ * Indicates that the buffer includes per-superblock content hints.
+ */
+#define AFBC_FORMAT_MOD_BCH (1ULL << 11)
+
+/* AFBC uncompressed storage mode
+ *
+ * Indicates that the buffer is using AFBC uncompressed storage mode.
+ * In this mode all superblock payloads in the buffer use the uncompressed
+ * storage mode, which is usually only used for data which cannot be compressed.
+ * The buffer layout is the same as for AFBC buffers without USM set, this only
+ * affects the storage mode of the individual superblocks. Note that even a
+ * buffer without USM set may use uncompressed storage mode for some or all
+ * superblocks, USM just guarantees it for all.
+ */
+#define AFBC_FORMAT_MOD_USM (1ULL << 12)
+
+/*
+ * Arm Fixed-Rate Compression (AFRC) modifiers
+ *
+ * AFRC is a proprietary fixed rate image compression protocol and format,
+ * designed to provide guaranteed bandwidth and memory footprint
+ * reductions in graphics and media use-cases.
+ *
+ * AFRC buffers consist of one or more planes, with the same components
+ * and meaning as an uncompressed buffer using the same pixel format.
+ *
+ * Within each plane, the pixel/luma/chroma values are grouped into
+ * "coding unit" blocks which are individually compressed to a
+ * fixed size (in bytes). All coding units within a given plane of a buffer
+ * store the same number of values, and have the same compressed size.
+ *
+ * The coding unit size is configurable, allowing different rates of compression.
+ *
+ * The start of each AFRC buffer plane must be aligned to an alignment granule which
+ * depends on the coding unit size.
+ *
+ * Coding Unit Size Plane Alignment
+ * ---------------- ---------------
+ * 16 bytes 1024 bytes
+ * 24 bytes 512 bytes
+ * 32 bytes 2048 bytes
+ *
+ * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned
+ * to a multiple of the paging tile dimensions.
+ * The dimensions of each paging tile depend on whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Layout Paging Tile Width Paging Tile Height
+ * ------ ----------------- ------------------
+ * SCAN 16 coding units 4 coding units
+ * ROT 8 coding units 8 coding units
+ *
+ * The dimensions of each coding unit depend on the number of components
+ * in the compressed plane and whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Number of Components in Plane Layout Coding Unit Width Coding Unit Height
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 SCAN 16 samples 4 samples
+ * Example: 16x4 luma samples in a 'Y' plane
+ * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 ROT 8 samples 8 samples
+ * Example: 8x8 luma samples in a 'Y' plane
+ * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 2 DONT CARE 8 samples 4 samples
+ * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 3 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer without alpha
+ * ----------------------------- --------- ----------------- ------------------
+ * 4 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer with alpha
+ */
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02
+
+#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode)
+
+/*
+ * AFRC coding unit size modifier.
+ *
+ * Indicates the number of bytes used to store each compressed coding unit for
+ * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance
+ * is the same for both Cb and Cr, which may be stored in separate planes.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store
+ * each compressed coding unit in the first plane of the buffer. For RGBA buffers
+ * this is the only plane, while for semi-planar and fully-planar YUV buffers,
+ * this corresponds to the luma plane.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store
+ * each compressed coding unit in the second and third planes in the buffer.
+ * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s).
+ *
+ * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified
+ * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero.
+ * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified.
+ */
+#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf
+#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL)
+
+#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size)
+#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4)
+
+/*
+ * AFRC scanline memory layout.
+ *
+ * Indicates if the buffer uses the scanline-optimised layout
+ * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout.
+ * The memory layout is the same for all planes.
+ */
+#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8)
+
+/*
+ * Arm 16x16 Block U-Interleaved modifier
+ *
+ * This is used by Arm Mali Utgard and Midgard GPUs. It divides the image
+ * into 16x16 pixel blocks. Blocks are stored linearly in order, but pixels
+ * in the block are reordered.
+ */
+#define DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL)
+
+/*
+ * Allwinner tiled modifier
+ *
+ * This tiling mode is implemented by the VPU found on all Allwinner platforms,
+ * codenamed sunxi. It is associated with a YUV format that uses either 2 or 3
+ * planes.
+ *
+ * With this tiling, the luminance samples are disposed in tiles representing
+ * 32x32 pixels and the chrominance samples in tiles representing 32x64 pixels.
+ * The pixel order in each tile is linear and the tiles are disposed linearly,
+ * both in row-major order.
+ */
+#define DRM_FORMAT_MOD_ALLWINNER_TILED fourcc_mod_code(ALLWINNER, 1)
+
+/*
+ * Amlogic Video Framebuffer Compression modifiers
+ *
+ * Amlogic uses a proprietary lossless image compression protocol and format
+ * for their hardware video codec accelerators, either video decoders or
+ * video input encoders.
+ *
+ * It considerably reduces memory bandwidth while writing and reading
+ * frames in memory.
+ *
+ * The underlying storage is considered to be 3 components, 8bit or 10-bit
+ * per component YCbCr 420, single plane :
+ * - DRM_FORMAT_YUV420_8BIT
+ * - DRM_FORMAT_YUV420_10BIT
+ *
+ * The first 8 bits of the mode defines the layout, then the following 8 bits
+ * defines the options changing the layout.
+ *
+ * Not all combinations are valid, and different SoCs may support different
+ * combinations of layout and options.
+ */
+#define __fourcc_mod_amlogic_layout_mask 0xff
+#define __fourcc_mod_amlogic_options_shift 8
+#define __fourcc_mod_amlogic_options_mask 0xff
+
+#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
+ fourcc_mod_code(AMLOGIC, \
+ ((__layout) & __fourcc_mod_amlogic_layout_mask) | \
+ (((__options) & __fourcc_mod_amlogic_options_mask) \
+ << __fourcc_mod_amlogic_options_shift))
+
+/* Amlogic FBC Layouts */
+
+/*
+ * Amlogic FBC Basic Layout
+ *
+ * The basic layout is composed of:
+ * - a body content organized in 64x32 superblocks with 4096 bytes per
+ * superblock in default mode.
+ * - a 32 bytes per 128x64 header block
+ *
+ * This layout is transferrable between Amlogic SoCs supporting this modifier.
+ */
+#define AMLOGIC_FBC_LAYOUT_BASIC (1ULL)
+
+/*
+ * Amlogic FBC Scatter Memory layout
+ *
+ * Indicates the header contains IOMMU references to the compressed
+ * frames content to optimize memory access and layout.
+ *
+ * In this mode, only the header memory address is needed, thus the
+ * content memory organization is tied to the current producer
+ * execution and cannot be saved/dumped neither transferrable between
+ * Amlogic SoCs supporting this modifier.
+ *
+ * Due to the nature of the layout, these buffers are not expected to
+ * be accessible by the user-space clients, but only accessible by the
+ * hardware producers and consumers.
+ *
+ * The user-space clients should expect a failure while trying to mmap
+ * the DMA-BUF handle returned by the producer.
+ */
+#define AMLOGIC_FBC_LAYOUT_SCATTER (2ULL)
+
+/* Amlogic FBC Layout Options Bit Mask */
+
+/*
+ * Amlogic FBC Memory Saving mode
+ *
+ * Indicates the storage is packed when pixel size is multiple of word
+ * boundaries, i.e. 8bit should be stored in this mode to save allocation
+ * memory.
+ *
+ * This mode reduces body layout to 3072 bytes per 64x32 superblock with
+ * the basic layout and 3200 bytes per 64x32 superblock combined with
+ * the scatter layout.
+ */
+#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
+
+/*
+ * AMD modifiers
+ *
+ * Memory layout:
+ *
+ * without DCC:
+ * - main surface
+ *
+ * with DCC & without DCC_RETILE:
+ * - main surface in plane 0
+ * - DCC surface in plane 1 (RB-aligned, pipe-aligned if DCC_PIPE_ALIGN is set)
+ *
+ * with DCC & DCC_RETILE:
+ * - main surface in plane 0
+ * - displayable DCC surface in plane 1 (not RB-aligned & not pipe-aligned)
+ * - pipe-aligned DCC surface in plane 2 (RB-aligned & pipe-aligned)
+ *
+ * For multi-plane formats the above surfaces get merged into one plane for
+ * each format plane, based on the required alignment only.
+ *
+ * Bits Parameter Notes
+ * ----- ------------------------ ---------------------------------------------
+ *
+ * 7:0 TILE_VERSION Values are AMD_FMT_MOD_TILE_VER_*
+ * 12:8 TILE Values are AMD_FMT_MOD_TILE_<version>_*
+ * 13 DCC
+ * 14 DCC_RETILE
+ * 15 DCC_PIPE_ALIGN
+ * 16 DCC_INDEPENDENT_64B
+ * 17 DCC_INDEPENDENT_128B
+ * 19:18 DCC_MAX_COMPRESSED_BLOCK Values are AMD_FMT_MOD_DCC_BLOCK_*
+ * 20 DCC_CONSTANT_ENCODE
+ * 23:21 PIPE_XOR_BITS Only for some chips
+ * 26:24 BANK_XOR_BITS Only for some chips
+ * 29:27 PACKERS Only for some chips
+ * 32:30 RB Only for some chips
+ * 35:33 PIPE Only for some chips
+ * 55:36 - Reserved for future use, must be zero
+ */
+#define AMD_FMT_MOD fourcc_mod_code(AMD, 0)
+
+#define IS_AMD_FMT_MOD(val) (((val) >> 56) == DRM_FORMAT_MOD_VENDOR_AMD)
+
+/* Reserve 0 for GFX8 and older */
+#define AMD_FMT_MOD_TILE_VER_GFX9 1
+#define AMD_FMT_MOD_TILE_VER_GFX10 2
+#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+#define AMD_FMT_MOD_TILE_VER_GFX11 4
+
+/*
+ * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
+ * version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_S 9
+
+/*
+ * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
+ * GFX9 as canonical version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
+#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
+#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
+#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+
+#define AMD_FMT_MOD_DCC_BLOCK_64B 0
+#define AMD_FMT_MOD_DCC_BLOCK_128B 1
+#define AMD_FMT_MOD_DCC_BLOCK_256B 2
+
+#define AMD_FMT_MOD_TILE_VERSION_SHIFT 0
+#define AMD_FMT_MOD_TILE_VERSION_MASK 0xFF
+#define AMD_FMT_MOD_TILE_SHIFT 8
+#define AMD_FMT_MOD_TILE_MASK 0x1F
+
+/* Whether DCC compression is enabled. */
+#define AMD_FMT_MOD_DCC_SHIFT 13
+#define AMD_FMT_MOD_DCC_MASK 0x1
+
+/*
+ * Whether to include two DCC surfaces, one which is rb & pipe aligned, and
+ * one which is not-aligned.
+ */
+#define AMD_FMT_MOD_DCC_RETILE_SHIFT 14
+#define AMD_FMT_MOD_DCC_RETILE_MASK 0x1
+
+/* Only set if DCC_RETILE = false */
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_SHIFT 15
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_MASK 0x1
+
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_SHIFT 16
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_MASK 0x1
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_SHIFT 17
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_MASK 0x1
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_SHIFT 18
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3
+
+/*
+ * DCC supports embedding some clear colors directly in the DCC surface.
+ * However, on older GPUs the rendering HW ignores the embedded clear color
+ * and prefers the driver provided color. This necessitates doing a fastclear
+ * eliminate operation before a process transfers control.
+ *
+ * If this bit is set that means the fastclear eliminate is not needed for these
+ * embeddable colors.
+ */
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_SHIFT 20
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_MASK 0x1
+
+/*
+ * The below fields are for accounting for per GPU differences. These are only
+ * relevant for GFX9 and later and if the tile field is *_X/_T.
+ *
+ * PIPE_XOR_BITS = always needed
+ * BANK_XOR_BITS = only for TILE_VER_GFX9
+ * PACKERS = only for TILE_VER_GFX10_RBPLUS
+ * RB = only for TILE_VER_GFX9 & DCC
+ * PIPE = only for TILE_VER_GFX9 & DCC & (DCC_RETILE | DCC_PIPE_ALIGN)
+ */
+#define AMD_FMT_MOD_PIPE_XOR_BITS_SHIFT 21
+#define AMD_FMT_MOD_PIPE_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_BANK_XOR_BITS_SHIFT 24
+#define AMD_FMT_MOD_BANK_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_PACKERS_SHIFT 27
+#define AMD_FMT_MOD_PACKERS_MASK 0x7
+#define AMD_FMT_MOD_RB_SHIFT 30
+#define AMD_FMT_MOD_RB_MASK 0x7
+#define AMD_FMT_MOD_PIPE_SHIFT 33
+#define AMD_FMT_MOD_PIPE_MASK 0x7
+
+#define AMD_FMT_MOD_SET(field, value) \
+ ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT)
+#define AMD_FMT_MOD_GET(field, value) \
+ (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
+#define AMD_FMT_MOD_CLEAR(field) \
+ (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/standard-headers/linux/const.h b/include/standard-headers/linux/const.h
new file mode 100644
index 0000000000..1eb84b5087
--- /dev/null
+++ b/include/standard-headers/linux/const.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* const.h: Macros for dealing with constants. */
+
+#ifndef _LINUX_CONST_H
+#define _LINUX_CONST_H
+
+/* Some constant macros are used in both assembler and
+ * C code. Therefore we cannot annotate them always with
+ * 'UL' and other type specifiers unilaterally. We
+ * use the following macros to deal with this.
+ *
+ * Similarly, _AT() will cast an expression with a type in C, but
+ * leave it unchanged in asm.
+ */
+
+#ifdef __ASSEMBLY__
+#define _AC(X,Y) X
+#define _AT(T,X) X
+#else
+#define __AC(X,Y) (X##Y)
+#define _AC(X,Y) __AC(X,Y)
+#define _AT(T,X) ((T)(X))
+#endif
+
+#define _UL(x) (_AC(x, UL))
+#define _ULL(x) (_AC(x, ULL))
+
+#define _BITUL(x) (_UL(1) << (x))
+#define _BITULL(x) (_ULL(1) << (x))
+
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+#endif /* _LINUX_CONST_H */
diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h
index 57ffcb5341..01503784d2 100644
--- a/include/standard-headers/linux/ethtool.h
+++ b/include/standard-headers/linux/ethtool.h
@@ -16,7 +16,7 @@
#include "net/eth.h"
-#include "standard-headers/linux/kernel.h"
+#include "standard-headers/linux/const.h"
#include "standard-headers/linux/types.h"
#include "standard-headers/linux/if_ether.h"
@@ -26,6 +26,14 @@
* have the same layout for 32-bit and 64-bit userland.
*/
+/* Note on reserved space.
+ * Reserved fields must not be accessed directly by user space because
+ * they may be replaced by a different field in the future. They must
+ * be initialized to zero before making the request, e.g. via memset
+ * of the entire structure or implicitly by not being set in a structure
+ * initializer.
+ */
+
/**
* struct ethtool_cmd - DEPRECATED, link control and status
* This structure is DEPRECATED, please use struct ethtool_link_settings.
@@ -67,6 +75,7 @@
* and other link features that the link partner advertised
* through autonegotiation; 0 if unknown or not applicable.
* Read-only.
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* The link speed in Mbps is split between @speed and @speed_hi. Use
* the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
@@ -91,10 +100,6 @@
* %ETHTOOL_GSET to get the current values before making specific
* changes and then applying them with %ETHTOOL_SSET.
*
- * Drivers that implement set_settings() should validate all fields
- * other than @cmd that are not described as read-only or deprecated,
- * and must ignore all fields described as read-only.
- *
* Deprecated fields should be ignored by both users and drivers.
*/
struct ethtool_cmd {
@@ -154,11 +159,14 @@ static inline uint32_t ethtool_cmd_speed(const struct ethtool_cmd *ep)
* in its bus driver structure (e.g. pci_driver::name). Must
* not be an empty string.
* @version: Driver version string; may be an empty string
- * @fw_version: Firmware version string; may be an empty string
- * @erom_version: Expansion ROM version string; may be an empty string
+ * @fw_version: Firmware version string; driver defined; may be an
+ * empty string
+ * @erom_version: Expansion ROM version string; driver defined; may be
+ * an empty string
* @bus_info: Device bus address. This should match the dev_name()
* string for the underlying bus device, if there is one. May be
* an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
* @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
* %ETHTOOL_SPFLAGS commands; also the number of strings in the
* %ETH_SS_PRIV_FLAGS set
@@ -173,10 +181,6 @@ static inline uint32_t ethtool_cmd_speed(const struct ethtool_cmd *ep)
*
* Users can use the %ETHTOOL_GSSET_INFO command to get the number of
* strings in any string set (from Linux 2.6.34).
- *
- * Drivers should set at most @driver, @version, @fw_version and
- * @bus_info in their get_drvinfo() implementation. The ethtool
- * core fills in the other fields using other driver operations.
*/
struct ethtool_drvinfo {
uint32_t cmd;
@@ -225,9 +229,10 @@ enum tunable_id {
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
+ ETHTOOL_TX_COPYBREAK_BUF_SIZE,
/*
* Add your fresh new tunable attribute above and remember to update
- * tunable_strings[] in net/core/ethtool.c
+ * tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_TUNABLE_COUNT,
};
@@ -250,18 +255,48 @@ struct ethtool_tunable {
uint32_t id;
uint32_t type_id;
uint32_t len;
- void *data[0];
+ void *data[];
};
#define DOWNSHIFT_DEV_DEFAULT_COUNT 0xff
#define DOWNSHIFT_DEV_DISABLE 0
+/* Time in msecs after which link is reported as down
+ * 0 = lowest time supported by the PHY
+ * 0xff = off, link down detection according to standard
+ */
+#define ETHTOOL_PHY_FAST_LINK_DOWN_ON 0
+#define ETHTOOL_PHY_FAST_LINK_DOWN_OFF 0xff
+
+/* Energy Detect Power Down (EDPD) is a feature supported by some PHYs, where
+ * the PHY's RX & TX blocks are put into a low-power mode when there is no
+ * link detected (typically cable is un-plugged). For RX, only a minimal
+ * link-detection is available, and for TX the PHY wakes up to send link pulses
+ * to avoid any lock-ups in case the peer PHY may also be running in EDPD mode.
+ *
+ * Some PHYs may support configuration of the wake-up interval for TX pulses,
+ * and some PHYs may support only disabling TX pulses entirely. For the latter
+ * a special value is required (ETHTOOL_PHY_EDPD_NO_TX) so that this can be
+ * configured from userspace (should the user want it).
+ *
+ * The interval units for TX wake-up are in milliseconds, since this should
+ * cover a reasonable range of intervals:
+ * - from 1 millisecond, which does not sound like much of a power-saver
+ * - to ~65 seconds which is quite a lot to wait for a link to come up when
+ * plugging a cable
+ */
+#define ETHTOOL_PHY_EDPD_DFLT_TX_MSECS 0xffff
+#define ETHTOOL_PHY_EDPD_NO_TX 0xfffe
+#define ETHTOOL_PHY_EDPD_DISABLE 0
+
enum phy_tunable_id {
ETHTOOL_PHY_ID_UNSPEC,
ETHTOOL_PHY_DOWNSHIFT,
+ ETHTOOL_PHY_FAST_LINK_DOWN,
+ ETHTOOL_PHY_EDPD,
/*
* Add your fresh new phy tunable attribute above and remember to update
- * phy_tunable_strings[] in net/core/ethtool.c
+ * phy_tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_PHY_TUNABLE_COUNT,
};
@@ -285,7 +320,7 @@ struct ethtool_regs {
uint32_t cmd;
uint32_t version;
uint32_t len;
- uint8_t data[0];
+ uint8_t data[];
};
/**
@@ -311,7 +346,7 @@ struct ethtool_eeprom {
uint32_t magic;
uint32_t offset;
uint32_t len;
- uint8_t data[0];
+ uint8_t data[];
};
/**
@@ -330,6 +365,7 @@ struct ethtool_eeprom {
* @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
* its tx lpi (after reaching 'idle' state). Effective only when eee
* was negotiated and tx_lpi_enabled was set.
+ * @reserved: Reserved for future use; see the note on reserved space.
*/
struct ethtool_eee {
uint32_t cmd;
@@ -348,6 +384,7 @@ struct ethtool_eee {
* @cmd: %ETHTOOL_GMODULEINFO
* @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
* @eeprom_len: Length of the eeprom
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* This structure is used to return the information to
* properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
@@ -553,6 +590,70 @@ struct ethtool_pauseparam {
uint32_t tx_pause;
};
+/* Link extended state */
+enum ethtool_link_ext_state {
+ ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_STATE_NO_CABLE,
+ ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE,
+ ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE,
+ ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
+ ETHTOOL_LINK_EXT_STATE_OVERHEAT,
+ ETHTOOL_LINK_EXT_STATE_MODULE,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
+enum ethtool_link_ext_substate_autoneg {
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+ */
+enum ethtool_link_ext_substate_link_training {
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+ */
+enum ethtool_link_ext_substate_link_logical_mismatch {
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+ */
+enum ethtool_link_ext_substate_bad_signal_integrity {
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */
+enum ethtool_link_ext_substate_cable_issue {
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_MODULE. */
+enum ethtool_link_ext_substate_module {
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1,
+};
+
#define ETH_GSTRING_LEN 32
/**
@@ -565,8 +666,23 @@ struct ethtool_pauseparam {
* now deprecated
* @ETH_SS_FEATURES: Device feature names
* @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
+ * @ETH_SS_TUNABLES: tunable names
* @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
* @ETH_SS_PHY_TUNABLES: PHY tunable names
+ * @ETH_SS_LINK_MODES: link mode names
+ * @ETH_SS_MSG_CLASSES: debug message class names
+ * @ETH_SS_WOL_MODES: wake-on-lan modes
+ * @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags
+ * @ETH_SS_TS_TX_TYPES: timestamping Tx types
+ * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
+ * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
+ * @ETH_SS_STATS_STD: standardized stats
+ * @ETH_SS_STATS_ETH_PHY: names of IEEE 802.3 PHY statistics
+ * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
+ * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
+ * @ETH_SS_STATS_RMON: names of RMON statistics
+ *
+ * @ETH_SS_COUNT: number of defined string sets
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -578,6 +694,132 @@ enum ethtool_stringset {
ETH_SS_TUNABLES,
ETH_SS_PHY_STATS,
ETH_SS_PHY_TUNABLES,
+ ETH_SS_LINK_MODES,
+ ETH_SS_MSG_CLASSES,
+ ETH_SS_WOL_MODES,
+ ETH_SS_SOF_TIMESTAMPING,
+ ETH_SS_TS_TX_TYPES,
+ ETH_SS_TS_RX_FILTERS,
+ ETH_SS_UDP_TUNNEL_TYPES,
+ ETH_SS_STATS_STD,
+ ETH_SS_STATS_ETH_PHY,
+ ETH_SS_STATS_ETH_MAC,
+ ETH_SS_STATS_ETH_CTRL,
+ ETH_SS_STATS_RMON,
+
+ /* add new constants above here */
+ ETH_SS_COUNT
+};
+
+/**
+ * enum ethtool_mac_stats_src - source of ethtool MAC statistics
+ * @ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ * if device supports a MAC merge layer, this retrieves the aggregate
+ * statistics of the eMAC and pMAC. Otherwise, it retrieves just the
+ * statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_EMAC:
+ * if device supports a MM layer, this retrieves the eMAC statistics.
+ * Otherwise, it retrieves the statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_PMAC:
+ * if device supports a MM layer, this retrieves the pMAC statistics.
+ */
+enum ethtool_mac_stats_src {
+ ETHTOOL_MAC_STATS_SRC_AGGREGATE,
+ ETHTOOL_MAC_STATS_SRC_EMAC,
+ ETHTOOL_MAC_STATS_SRC_PMAC,
+};
+
+/**
+ * enum ethtool_module_power_mode_policy - plug-in module power mode policy
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host
+ * to high power mode when the first port using it is put administratively
+ * up and to low power mode when the last port using it is put
+ * administratively down.
+ */
+enum ethtool_module_power_mode_policy {
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1,
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO,
+};
+
+/**
+ * enum ethtool_module_power_mode - plug-in module power mode
+ * @ETHTOOL_MODULE_POWER_MODE_LOW: Module is in low power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_HIGH: Module is in high power mode.
+ */
+enum ethtool_module_power_mode {
+ ETHTOOL_MODULE_POWER_MODE_LOW = 1,
+ ETHTOOL_MODULE_POWER_MODE_HIGH,
+};
+
+/**
+ * enum ethtool_podl_pse_admin_state - operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN: state of PoDL PSE functions are
+ * unknown
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: PoDL PSE functions are disabled
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: PoDL PSE functions are enabled
+ */
+enum ethtool_podl_pse_admin_state {
+ ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED,
+};
+
+/**
+ * enum ethtool_podl_pse_pw_d_status - power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN: PoDL PSE
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED: "The enumeration “disabled” is
+ * asserted true when the PoDL PSE state diagram variable mr_pse_enable is
+ * false"
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING: "The enumeration “searching” is
+ * asserted true when either of the PSE state diagram variables
+ * pi_detecting or pi_classifying is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING: "The enumeration “deliveringPower”
+ * is asserted true when the PoDL PSE state diagram variable pi_powered is
+ * true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP: "The enumeration “sleep” is asserted
+ * true when the PoDL PSE state diagram variable pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE: "The enumeration “idle” is asserted true
+ * when the logical combination of the PoDL PSE state diagram variables
+ * pi_prebiased*!pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR: "The enumeration “error” is asserted
+ * true when the PoDL PSE state diagram variable overload_held is true."
+ */
+enum ethtool_podl_pse_pw_d_status {
+ ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR,
+};
+
+/**
+ * enum ethtool_mm_verify_status - status of MAC Merge Verify function
+ * @ETHTOOL_MM_VERIFY_STATUS_UNKNOWN:
+ * verification status is unknown
+ * @ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ * the 802.3 Verify State diagram is in the state INIT_VERIFICATION
+ * @ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ * the Verify State diagram is in the state VERIFICATION_IDLE,
+ * SEND_VERIFY or WAIT_FOR_RESPONSE
+ * @ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ * indicates that the Verify State diagram is in the state VERIFIED
+ * @ETHTOOL_MM_VERIFY_STATUS_FAILED:
+ * the Verify State diagram is in the state VERIFY_FAIL
+ * @ETHTOOL_MM_VERIFY_STATUS_DISABLED:
+ * verification of preemption operation is disabled
+ */
+enum ethtool_mm_verify_status {
+ ETHTOOL_MM_VERIFY_STATUS_UNKNOWN,
+ ETHTOOL_MM_VERIFY_STATUS_INITIAL,
+ ETHTOOL_MM_VERIFY_STATUS_VERIFYING,
+ ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED,
+ ETHTOOL_MM_VERIFY_STATUS_FAILED,
+ ETHTOOL_MM_VERIFY_STATUS_DISABLED,
};
/**
@@ -596,12 +838,13 @@ struct ethtool_gstrings {
uint32_t cmd;
uint32_t string_set;
uint32_t len;
- uint8_t data[0];
+ uint8_t data[];
};
/**
* struct ethtool_sset_info - string set information
* @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @reserved: Reserved for future use; see the note on reserved space.
* @sset_mask: On entry, a bitmask of string sets to query, with bits
* numbered according to &enum ethtool_stringset. On return, a
* bitmask of those string sets queried that are supported.
@@ -620,7 +863,7 @@ struct ethtool_sset_info {
uint32_t cmd;
uint32_t reserved;
uint64_t sset_mask;
- uint32_t data[0];
+ uint32_t data[];
};
/**
@@ -646,6 +889,7 @@ enum ethtool_test_flags {
* @flags: A bitmask of flags from &enum ethtool_test_flags. Some
* flags may be set by the user on entry; others may be set by
* the driver on return.
+ * @reserved: Reserved for future use; see the note on reserved space.
* @len: On return, the number of test results
* @data: Array of test results
*
@@ -659,7 +903,7 @@ struct ethtool_test {
uint32_t flags;
uint32_t reserved;
uint32_t len;
- uint64_t data[0];
+ uint64_t data[];
};
/**
@@ -676,7 +920,7 @@ struct ethtool_test {
struct ethtool_stats {
uint32_t cmd;
uint32_t n_stats;
- uint64_t data[0];
+ uint64_t data[];
};
/**
@@ -693,7 +937,7 @@ struct ethtool_stats {
struct ethtool_perm_addr {
uint32_t cmd;
uint32_t size;
- uint8_t data[0];
+ uint8_t data[];
};
/* boolean flags controlling per-interface behavior characteristics.
@@ -846,6 +1090,7 @@ union ethtool_flow_union {
* @vlan_etype: VLAN EtherType
* @vlan_tci: VLAN tag control information
* @data: user defined data
+ * @padding: Reserved for future use; see the note on reserved space.
*
* Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
* is set in &struct ethtool_rx_flow_spec @flow_type.
@@ -886,7 +1131,7 @@ struct ethtool_rx_flow_spec {
uint32_t location;
};
-/* How rings are layed out when accessing virtual functions or
+/* How rings are laid out when accessing virtual functions or
* offloaded queues is device specific. To allow users to do flow
* steering and specify these queues the ring cookie is partitioned
* into a 32bit queue index with an 8 bit virtual function id.
@@ -895,7 +1140,7 @@ struct ethtool_rx_flow_spec {
* devices start supporting PCIe w/ARI. However at the moment I
* do not know of any devices that support this so I do not reserve
* space for this at this time. If a future patch consumes the next
- * byte it should be aware of this possiblity.
+ * byte it should be aware of this possibility.
*/
#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
@@ -981,7 +1226,7 @@ struct ethtool_rxnfc {
uint32_t rule_cnt;
uint32_t rss_context;
};
- uint32_t rule_locs[0];
+ uint32_t rule_locs[];
};
@@ -1001,7 +1246,7 @@ struct ethtool_rxnfc {
struct ethtool_rxfh_indir {
uint32_t cmd;
uint32_t size;
- uint32_t ring_index[0];
+ uint32_t ring_index[];
};
/**
@@ -1021,7 +1266,10 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
- * @rsvd: Reserved for future extensions.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ * @rsvd8: Reserved for future use; see the note on reserved space.
+ * @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
* of @indir_size uint32_t elements, followed by hash key of @key_size
* bytes.
@@ -1039,9 +1287,10 @@ struct ethtool_rxfh {
uint32_t indir_size;
uint32_t key_size;
uint8_t hfunc;
- uint8_t rsvd8[3];
+ uint8_t input_xfrm;
+ uint8_t rsvd8[2];
uint32_t rsvd32;
- uint32_t rss_config[0];
+ uint32_t rss_config[];
};
#define ETH_RXFH_CONTEXT_ALLOC 0xffffffff
#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
@@ -1126,7 +1375,7 @@ struct ethtool_dump {
uint32_t version;
uint32_t flag;
uint32_t len;
- uint8_t data[0];
+ uint8_t data[];
};
#define ETH_FW_DUMP_DISABLE 0
@@ -1158,7 +1407,7 @@ struct ethtool_get_features_block {
struct ethtool_gfeatures {
uint32_t cmd;
uint32_t size;
- struct ethtool_get_features_block features[0];
+ struct ethtool_get_features_block features[];
};
/**
@@ -1180,7 +1429,7 @@ struct ethtool_set_features_block {
struct ethtool_sfeatures {
uint32_t cmd;
uint32_t size;
- struct ethtool_set_features_block features[0];
+ struct ethtool_set_features_block features[];
};
/**
@@ -1189,7 +1438,9 @@ struct ethtool_sfeatures {
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
* @phc_index: device index of the associated PHC, or -1 if there is none
* @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @tx_reserved: Reserved for future use; see the note on reserved space.
* @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ * @rx_reserved: Reserved for future use; see the note on reserved space.
*
* The bits in the 'tx_types' and 'rx_filters' fields correspond to
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
@@ -1263,15 +1514,33 @@ struct ethtool_per_queue_op {
};
/**
- * struct ethtool_fecparam - Ethernet forward error correction(fec) parameters
+ * struct ethtool_fecparam - Ethernet Forward Error Correction parameters
* @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM
- * @active_fec: FEC mode which is active on porte
- * @fec: Bitmask of supported/configured FEC modes
- * @rsvd: Reserved for future extensions. i.e FEC bypass feature.
+ * @active_fec: FEC mode which is active on the port, single bit set, GET only.
+ * @fec: Bitmask of configured FEC modes.
+ * @reserved: Reserved for future extensions, ignore on GET, write 0 for SET.
*
- * Drivers should reject a non-zero setting of @autoneg when
- * autoneogotiation is disabled (or not supported) for the link.
+ * Note that @reserved was never validated on input and ethtool user space
+ * left it uninitialized when calling SET. Hence going forward it can only be
+ * used to return a value to userspace with GET.
+ *
+ * FEC modes supported by the device can be read via %ETHTOOL_GLINKSETTINGS.
+ * FEC settings are configured by link autonegotiation whenever it's enabled.
+ * With autoneg on %ETHTOOL_GFECPARAM can be used to read the current mode.
+ *
+ * When autoneg is disabled %ETHTOOL_SFECPARAM controls the FEC settings.
+ * It is recommended that drivers only accept a single bit set in @fec.
+ * When multiple bits are set in @fec drivers may pick mode in an implementation
+ * dependent way. Drivers should reject mixing %ETHTOOL_FEC_AUTO_BIT with other
+ * FEC modes, because it's unclear whether in this case other modes constrain
+ * AUTO or are independent choices.
+ * Drivers must reject SET requests if they support none of the requested modes.
+ *
+ * If device does not support FEC drivers may use %ETHTOOL_FEC_NONE instead
+ * of returning %EOPNOTSUPP from %ETHTOOL_GFECPARAM.
*
+ * See enum ethtool_fec_config_bits for definition of valid bits for both
+ * @fec and @active_fec.
*/
struct ethtool_fecparam {
uint32_t cmd;
@@ -1283,11 +1552,16 @@ struct ethtool_fecparam {
/**
* enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration
- * @ETHTOOL_FEC_NONE: FEC mode configuration is not supported
- * @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver
- * @ETHTOOL_FEC_OFF: No FEC Mode
- * @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode
- * @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode
+ * @ETHTOOL_FEC_NONE_BIT: FEC mode configuration is not supported. Should not
+ * be used together with other bits. GET only.
+ * @ETHTOOL_FEC_AUTO_BIT: Select default/best FEC mode automatically, usually
+ * based link mode and SFP parameters read from module's
+ * EEPROM. This bit does _not_ mean autonegotiation.
+ * @ETHTOOL_FEC_OFF_BIT: No FEC Mode
+ * @ETHTOOL_FEC_RS_BIT: Reed-Solomon FEC Mode
+ * @ETHTOOL_FEC_BASER_BIT: Base-R/Reed-Solomon FEC Mode
+ * @ETHTOOL_FEC_LLRS_BIT: Low Latency Reed Solomon FEC Mode (25G/50G Ethernet
+ * Consortium)
*/
enum ethtool_fec_config_bits {
ETHTOOL_FEC_NONE_BIT,
@@ -1295,6 +1569,7 @@ enum ethtool_fec_config_bits {
ETHTOOL_FEC_OFF_BIT,
ETHTOOL_FEC_RS_BIT,
ETHTOOL_FEC_BASER_BIT,
+ ETHTOOL_FEC_LLRS_BIT,
};
#define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT)
@@ -1302,6 +1577,7 @@ enum ethtool_fec_config_bits {
#define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT)
#define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT)
#define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT)
+#define ETHTOOL_FEC_LLRS (1 << ETHTOOL_FEC_LLRS_BIT)
/* CMDs currently supported */
#define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings.
@@ -1436,6 +1712,13 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
+
+ /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
+ * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
+ * macro for bits > 31. The only way to use indices > 31 is to
+ * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API.
+ */
+
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
@@ -1457,15 +1740,59 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49,
ETHTOOL_LINK_MODE_FEC_RS_BIT = 50,
ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51,
-
- /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
- * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
- * macro for bits > 31. The only way to use indices > 31 is to
- * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API.
- */
-
- __ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61,
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66,
+ ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67,
+ ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68,
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
+ ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74,
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76,
+ ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77,
+ ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78,
+ ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79,
+ ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80,
+ ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81,
+ ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82,
+ ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83,
+ ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84,
+ ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85,
+ ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86,
+ ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
+ ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88,
+ ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
+ ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90,
+ ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91,
+ ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92,
+ ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93,
+ ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94,
+ ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95,
+ ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96,
+ ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97,
+ ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98,
+ ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99,
+ ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100,
+ ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101,
+
+ /* must be last entry */
+ __ETHTOOL_LINK_MODE_MASK_NBITS
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
@@ -1573,12 +1900,15 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_50000 50000
#define SPEED_56000 56000
#define SPEED_100000 100000
+#define SPEED_200000 200000
+#define SPEED_400000 400000
+#define SPEED_800000 800000
#define SPEED_UNKNOWN -1
static inline int ethtool_validate_speed(uint32_t speed)
{
- return speed <= INT_MAX || speed == SPEED_UNKNOWN;
+ return speed <= INT_MAX || speed == (uint32_t)SPEED_UNKNOWN;
}
/* Duplex, half or full. */
@@ -1598,6 +1928,32 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
return 0;
}
+#define MASTER_SLAVE_CFG_UNSUPPORTED 0
+#define MASTER_SLAVE_CFG_UNKNOWN 1
+#define MASTER_SLAVE_CFG_MASTER_PREFERRED 2
+#define MASTER_SLAVE_CFG_SLAVE_PREFERRED 3
+#define MASTER_SLAVE_CFG_MASTER_FORCE 4
+#define MASTER_SLAVE_CFG_SLAVE_FORCE 5
+#define MASTER_SLAVE_STATE_UNSUPPORTED 0
+#define MASTER_SLAVE_STATE_UNKNOWN 1
+#define MASTER_SLAVE_STATE_MASTER 2
+#define MASTER_SLAVE_STATE_SLAVE 3
+#define MASTER_SLAVE_STATE_ERR 4
+
+/* These are used to throttle the rate of data on the phy interface when the
+ * native speed of the interface is higher than the link speed. These should
+ * not be used for phy interfaces which natively support multiple speeds (e.g.
+ * MII or SGMII).
+ */
+/* No rate matching performed. */
+#define RATE_MATCH_NONE 0
+/* The phy sends pause frames to throttle the MAC. */
+#define RATE_MATCH_PAUSE 1
+/* The phy asserts CRS to prevent the MAC from transmitting. */
+#define RATE_MATCH_CRS 2
+/* The MAC is programmed with a sufficiently-large IPG. */
+#define RATE_MATCH_OPEN_LOOP 3
+
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_AUI 0x01
@@ -1637,6 +1993,17 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
#define WAKE_FILTER (1 << 7)
+#define WOL_MODE_COUNT 8
+
+/* RSS hash function data
+ * XOR the corresponding source and destination fields of each specified
+ * protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH
+ * calculation. Note that this XORing reduces the input set entropy and could
+ * be exploited to reduce the RSS queue spread.
+ */
+#define RXH_XFRM_SYM_XOR (1 << 0)
+#define RXH_XFRM_NO_CHANGE 0xff
+
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
@@ -1656,6 +2023,53 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define IPV4_FLOW 0x10 /* hash only */
#define IPV6_FLOW 0x11 /* hash only */
#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
+
+/* Used for GTP-U IPv4 and IPv6.
+ * The format of GTP packets only includes
+ * elements such as TEID and GTP version.
+ * It is primarily intended for data communication of the UE.
+ */
+#define GTPU_V4_FLOW 0x13 /* hash only */
+#define GTPU_V6_FLOW 0x14 /* hash only */
+
+/* Use for GTP-C IPv4 and v6.
+ * The format of these GTP packets does not include TEID.
+ * Primarily expected to be used for communication
+ * to create sessions for UE data communication,
+ * commonly referred to as CSR (Create Session Request).
+ */
+#define GTPC_V4_FLOW 0x15 /* hash only */
+#define GTPC_V6_FLOW 0x16 /* hash only */
+
+/* Use for GTP-C IPv4 and v6.
+ * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID.
+ * After session creation, it becomes this packet.
+ * This is mainly used for requests to realize UE handover.
+ */
+#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
+#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
+
+/* Use for GTP-U and extended headers for the PSC (PDU Session Container).
+ * The format of these GTP packets includes TEID and QFI.
+ * In 5G communication using UPF (User Plane Function),
+ * data communication with this extended header is performed.
+ */
+#define GTPU_EH_V4_FLOW 0x19 /* hash only */
+#define GTPU_EH_V6_FLOW 0x1a /* hash only */
+
+/* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers.
+ * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by
+ * UL/DL included in the PSC.
+ * There are differences in the data included based on Downlink/Uplink,
+ * and can be used to distinguish packets.
+ * The functions described so far are useful when you want to
+ * handle communication from the mobile network in UPF, PGW, etc.
+ */
+#define GTPU_UL_V4_FLOW 0x1b /* hash only */
+#define GTPU_UL_V6_FLOW 0x1c /* hash only */
+#define GTPU_DL_V4_FLOW 0x1d /* hash only */
+#define GTPU_DL_V6_FLOW 0x1e /* hash only */
+
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
#define FLOW_MAC_EXT 0x40000000
@@ -1670,6 +2084,7 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define RXH_IP_DST (1 << 5)
#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
+#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
@@ -1691,6 +2106,9 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define ETH_MODULE_SFF_8436 0x4
#define ETH_MODULE_SFF_8436_LEN 256
+#define ETH_MODULE_SFF_8636_MAX_LEN 640
+#define ETH_MODULE_SFF_8436_MAX_LEN 640
+
/* Reset flags */
/* The reset() operation must clear the flags for the components which
* were actually reset. On successful return, the flags indicate the
@@ -1770,20 +2188,13 @@ enum ethtool_reset_flags {
* refused. For drivers: ignore this field (use kernel's
* __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will
* be overwritten by kernel.
- * @supported: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features for which the interface
- * supports autonegotiation or auto-detection. Read-only.
- * @advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features that are advertised through
- * autonegotiation or enabled for auto-detection.
- * @lp_advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, and other
- * link features that the link partner advertised through
- * autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
+ * @master_slave_cfg: Master/slave port mode.
+ * @master_slave_state: Master/slave port state.
+ * @rate_matching: Rate adaptation performed by the PHY
+ * @reserved: Reserved for future use; see the note on reserved space.
+ * @link_mode_masks: Variable length bitmaps.
*
* If autonegotiation is disabled, the speed and @duplex represent the
* fixed link mode and are writable if the driver supports multiple
@@ -1800,14 +2211,9 @@ enum ethtool_reset_flags {
* rejected.
*
* Deprecated %ethtool_cmd fields transceiver, maxtxpkt and maxrxpkt
- * are not available in %ethtool_link_settings. Until all drivers are
- * converted to ignore them or to the new %ethtool_link_settings API,
- * for both queries and changes, users should always try
- * %ETHTOOL_GLINKSETTINGS first, and if it fails with -ENOTSUPP stick
- * only to %ETHTOOL_GSET and %ETHTOOL_SSET consistently. If it
- * succeeds, then users should stick to %ETHTOOL_GLINKSETTINGS and
- * %ETHTOOL_SLINKSETTINGS (which would support drivers implementing
- * either %ethtool_cmd or %ethtool_link_settings).
+ * are not available in %ethtool_link_settings. These fields will be
+ * always set to zero in %ETHTOOL_GSET reply and %ETHTOOL_SSET will
+ * fail if any of them is set to non-zero value.
*
* Users should assume that all fields not marked read-only are
* writable and subject to validation by the driver. They should use
@@ -1823,6 +2229,21 @@ enum ethtool_reset_flags {
* %set_link_ksettings() should validate all fields other than @cmd
* and @link_mode_masks_nwords that are not described as read-only or
* deprecated, and must ignore all fields described as read-only.
+ *
+ * @link_mode_masks is divided into three bitfields, each of length
+ * @link_mode_masks_nwords:
+ * - supported: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features for which the interface
+ * supports autonegotiation or auto-detection. Read-only.
+ * - advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features that are advertised through
+ * autonegotiation or enabled for auto-detection.
+ * - lp_advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, and other
+ * link features that the link partner advertised through
+ * autonegotiation; 0 if unknown or not applicable. Read-only.
*/
struct ethtool_link_settings {
uint32_t cmd;
@@ -1836,9 +2257,11 @@ struct ethtool_link_settings {
uint8_t eth_tp_mdix_ctrl;
int8_t link_mode_masks_nwords;
uint8_t transceiver;
- uint8_t reserved1[3];
+ uint8_t master_slave_cfg;
+ uint8_t master_slave_state;
+ uint8_t rate_matching;
uint32_t reserved[7];
- uint32_t link_mode_masks[0];
+ uint32_t link_mode_masks[];
/* layout of link_mode_masks fields:
* uint32_t map_supported[link_mode_masks_nwords];
* uint32_t map_advertising[link_mode_masks_nwords];
diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h
new file mode 100644
index 0000000000..bac9dbc49f
--- /dev/null
+++ b/include/standard-headers/linux/fuse.h
@@ -0,0 +1,1185 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/*
+ This file defines the kernel interface of FUSE
+ Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
+
+ This program can be distributed under the terms of the GNU GPL.
+ See the file COPYING.
+
+ This -- and only this -- header file may also be distributed under
+ the terms of the BSD Licence as follows:
+
+ Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ SUCH DAMAGE.
+*/
+
+/*
+ * This file defines the kernel interface of FUSE
+ *
+ * Protocol changelog:
+ *
+ * 7.1:
+ * - add the following messages:
+ * FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK,
+ * FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE,
+ * FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR,
+ * FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR,
+ * FUSE_RELEASEDIR
+ * - add padding to messages to accommodate 32-bit servers on 64-bit kernels
+ *
+ * 7.2:
+ * - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags
+ * - add FUSE_FSYNCDIR message
+ *
+ * 7.3:
+ * - add FUSE_ACCESS message
+ * - add FUSE_CREATE message
+ * - add filehandle to fuse_setattr_in
+ *
+ * 7.4:
+ * - add frsize to fuse_kstatfs
+ * - clean up request size limit checking
+ *
+ * 7.5:
+ * - add flags and max_write to fuse_init_out
+ *
+ * 7.6:
+ * - add max_readahead to fuse_init_in and fuse_init_out
+ *
+ * 7.7:
+ * - add FUSE_INTERRUPT message
+ * - add POSIX file lock support
+ *
+ * 7.8:
+ * - add lock_owner and flags fields to fuse_release_in
+ * - add FUSE_BMAP message
+ * - add FUSE_DESTROY message
+ *
+ * 7.9:
+ * - new fuse_getattr_in input argument of GETATTR
+ * - add lk_flags in fuse_lk_in
+ * - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in
+ * - add blksize field to fuse_attr
+ * - add file flags field to fuse_read_in and fuse_write_in
+ * - Add ATIME_NOW and MTIME_NOW flags to fuse_setattr_in
+ *
+ * 7.10
+ * - add nonseekable open flag
+ *
+ * 7.11
+ * - add IOCTL message
+ * - add unsolicited notification support
+ * - add POLL message and NOTIFY_POLL notification
+ *
+ * 7.12
+ * - add umask flag to input argument of create, mknod and mkdir
+ * - add notification messages for invalidation of inodes and
+ * directory entries
+ *
+ * 7.13
+ * - make max number of background requests and congestion threshold
+ * tunables
+ *
+ * 7.14
+ * - add splice support to fuse device
+ *
+ * 7.15
+ * - add store notify
+ * - add retrieve notify
+ *
+ * 7.16
+ * - add BATCH_FORGET request
+ * - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct
+ * fuse_ioctl_iovec' instead of ambiguous 'struct iovec'
+ * - add FUSE_IOCTL_32BIT flag
+ *
+ * 7.17
+ * - add FUSE_FLOCK_LOCKS and FUSE_RELEASE_FLOCK_UNLOCK
+ *
+ * 7.18
+ * - add FUSE_IOCTL_DIR flag
+ * - add FUSE_NOTIFY_DELETE
+ *
+ * 7.19
+ * - add FUSE_FALLOCATE
+ *
+ * 7.20
+ * - add FUSE_AUTO_INVAL_DATA
+ *
+ * 7.21
+ * - add FUSE_READDIRPLUS
+ * - send the requested events in POLL request
+ *
+ * 7.22
+ * - add FUSE_ASYNC_DIO
+ *
+ * 7.23
+ * - add FUSE_WRITEBACK_CACHE
+ * - add time_gran to fuse_init_out
+ * - add reserved space to fuse_init_out
+ * - add FATTR_CTIME
+ * - add ctime and ctimensec to fuse_setattr_in
+ * - add FUSE_RENAME2 request
+ * - add FUSE_NO_OPEN_SUPPORT flag
+ *
+ * 7.24
+ * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support
+ *
+ * 7.25
+ * - add FUSE_PARALLEL_DIROPS
+ *
+ * 7.26
+ * - add FUSE_HANDLE_KILLPRIV
+ * - add FUSE_POSIX_ACL
+ *
+ * 7.27
+ * - add FUSE_ABORT_ERROR
+ *
+ * 7.28
+ * - add FUSE_COPY_FILE_RANGE
+ * - add FOPEN_CACHE_DIR
+ * - add FUSE_MAX_PAGES, add max_pages to init_out
+ * - add FUSE_CACHE_SYMLINKS
+ *
+ * 7.29
+ * - add FUSE_NO_OPENDIR_SUPPORT flag
+ *
+ * 7.30
+ * - add FUSE_EXPLICIT_INVAL_DATA
+ * - add FUSE_IOCTL_COMPAT_X32
+ *
+ * 7.31
+ * - add FUSE_WRITE_KILL_PRIV flag
+ * - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING
+ * - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag
+ *
+ * 7.32
+ * - add flags to fuse_attr, add FUSE_ATTR_SUBMOUNT, add FUSE_SUBMOUNTS
+ *
+ * 7.33
+ * - add FUSE_HANDLE_KILLPRIV_V2, FUSE_WRITE_KILL_SUIDGID, FATTR_KILL_SUIDGID
+ * - add FUSE_OPEN_KILL_SUIDGID
+ * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT
+ * - add FUSE_SETXATTR_ACL_KILL_SGID
+ *
+ * 7.34
+ * - add FUSE_SYNCFS
+ *
+ * 7.35
+ * - add FOPEN_NOFLUSH
+ *
+ * 7.36
+ * - extend fuse_init_in with reserved fields, add FUSE_INIT_EXT init flag
+ * - add flags2 to fuse_init_in and fuse_init_out
+ * - add FUSE_SECURITY_CTX init flag
+ * - add security context to create, mkdir, symlink, and mknod requests
+ * - add FUSE_HAS_INODE_DAX, FUSE_ATTR_DAX
+ *
+ * 7.37
+ * - add FUSE_TMPFILE
+ *
+ * 7.38
+ * - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry
+ * - add FOPEN_PARALLEL_DIRECT_WRITES
+ * - add total_extlen to fuse_in_header
+ * - add FUSE_MAX_NR_SECCTX
+ * - add extension header
+ * - add FUSE_EXT_GROUPS
+ * - add FUSE_CREATE_SUPP_GROUP
+ * - add FUSE_HAS_EXPIRE_ONLY
+ *
+ * 7.39
+ * - add FUSE_DIRECT_IO_ALLOW_MMAP
+ * - add FUSE_STATX and related structures
+ *
+ * 7.40
+ * - add max_stack_depth to fuse_init_out, add FUSE_PASSTHROUGH init flag
+ * - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
+ * - add FUSE_NO_EXPORT_SUPPORT init flag
+ * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
+ */
+
+#ifndef _LINUX_FUSE_H
+#define _LINUX_FUSE_H
+
+#include <stdint.h>
+
+/*
+ * Version negotiation:
+ *
+ * Both the kernel and userspace send the version they support in the
+ * INIT request and reply respectively.
+ *
+ * If the major versions match then both shall use the smallest
+ * of the two minor versions for communication.
+ *
+ * If the kernel supports a larger major version, then userspace shall
+ * reply with the major version it supports, ignore the rest of the
+ * INIT message and expect a new INIT message from the kernel with a
+ * matching major version.
+ *
+ * If the library supports a larger major version, then it shall fall
+ * back to the major protocol version sent by the kernel for
+ * communication and reply with that major version (and an arbitrary
+ * supported minor version).
+ */
+
+/** Version number of this interface */
+#define FUSE_KERNEL_VERSION 7
+
+/** Minor version number of this interface */
+#define FUSE_KERNEL_MINOR_VERSION 40
+
+/** The node ID of the root inode */
+#define FUSE_ROOT_ID 1
+
+/* Make sure all structures are padded to 64bit boundary, so 32bit
+ userspace works under 64bit kernels */
+
+struct fuse_attr {
+ uint64_t ino;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t atime;
+ uint64_t mtime;
+ uint64_t ctime;
+ uint32_t atimensec;
+ uint32_t mtimensec;
+ uint32_t ctimensec;
+ uint32_t mode;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint32_t rdev;
+ uint32_t blksize;
+ uint32_t flags;
+};
+
+/*
+ * The following structures are bit-for-bit compatible with the statx(2) ABI in
+ * Linux.
+ */
+struct fuse_sx_time {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t __reserved;
+};
+
+struct fuse_statx {
+ uint32_t mask;
+ uint32_t blksize;
+ uint64_t attributes;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint16_t mode;
+ uint16_t __spare0[1];
+ uint64_t ino;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t attributes_mask;
+ struct fuse_sx_time atime;
+ struct fuse_sx_time btime;
+ struct fuse_sx_time ctime;
+ struct fuse_sx_time mtime;
+ uint32_t rdev_major;
+ uint32_t rdev_minor;
+ uint32_t dev_major;
+ uint32_t dev_minor;
+ uint64_t __spare2[14];
+};
+
+struct fuse_kstatfs {
+ uint64_t blocks;
+ uint64_t bfree;
+ uint64_t bavail;
+ uint64_t files;
+ uint64_t ffree;
+ uint32_t bsize;
+ uint32_t namelen;
+ uint32_t frsize;
+ uint32_t padding;
+ uint32_t spare[6];
+};
+
+struct fuse_file_lock {
+ uint64_t start;
+ uint64_t end;
+ uint32_t type;
+ uint32_t pid; /* tgid */
+};
+
+/**
+ * Bitmasks for fuse_setattr_in.valid
+ */
+#define FATTR_MODE (1 << 0)
+#define FATTR_UID (1 << 1)
+#define FATTR_GID (1 << 2)
+#define FATTR_SIZE (1 << 3)
+#define FATTR_ATIME (1 << 4)
+#define FATTR_MTIME (1 << 5)
+#define FATTR_FH (1 << 6)
+#define FATTR_ATIME_NOW (1 << 7)
+#define FATTR_MTIME_NOW (1 << 8)
+#define FATTR_LOCKOWNER (1 << 9)
+#define FATTR_CTIME (1 << 10)
+#define FATTR_KILL_SUIDGID (1 << 11)
+
+/**
+ * Flags returned by the OPEN request
+ *
+ * FOPEN_DIRECT_IO: bypass page cache for this open file
+ * FOPEN_KEEP_CACHE: don't invalidate the data cache on open
+ * FOPEN_NONSEEKABLE: the file is not seekable
+ * FOPEN_CACHE_DIR: allow caching this directory
+ * FOPEN_STREAM: the file is stream-like (no file position at all)
+ * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE)
+ * FOPEN_PARALLEL_DIRECT_WRITES: Allow concurrent direct writes on the same inode
+ * FOPEN_PASSTHROUGH: passthrough read/write io for this open file
+ */
+#define FOPEN_DIRECT_IO (1 << 0)
+#define FOPEN_KEEP_CACHE (1 << 1)
+#define FOPEN_NONSEEKABLE (1 << 2)
+#define FOPEN_CACHE_DIR (1 << 3)
+#define FOPEN_STREAM (1 << 4)
+#define FOPEN_NOFLUSH (1 << 5)
+#define FOPEN_PARALLEL_DIRECT_WRITES (1 << 6)
+#define FOPEN_PASSTHROUGH (1 << 7)
+
+/**
+ * INIT request/reply flags
+ *
+ * FUSE_ASYNC_READ: asynchronous read requests
+ * FUSE_POSIX_LOCKS: remote locking for POSIX file locks
+ * FUSE_FILE_OPS: kernel sends file handle for fstat, etc... (not yet supported)
+ * FUSE_ATOMIC_O_TRUNC: handles the O_TRUNC open flag in the filesystem
+ * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
+ * FUSE_BIG_WRITES: filesystem can handle write size larger than 4kB
+ * FUSE_DONT_MASK: don't apply umask to file mode on create operations
+ * FUSE_SPLICE_WRITE: kernel supports splice write on the device
+ * FUSE_SPLICE_MOVE: kernel supports splice move on the device
+ * FUSE_SPLICE_READ: kernel supports splice read on the device
+ * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks
+ * FUSE_HAS_IOCTL_DIR: kernel supports ioctl on directories
+ * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages
+ * FUSE_DO_READDIRPLUS: do READDIRPLUS (READDIR+LOOKUP in one)
+ * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
+ * FUSE_ASYNC_DIO: asynchronous direct I/O submission
+ * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
+ * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
+ * FUSE_HANDLE_KILLPRIV: fs handles killing suid/sgid/cap on write/chown/trunc
+ * FUSE_POSIX_ACL: filesystem supports posix acls
+ * FUSE_ABORT_ERROR: reading the device after abort returns ECONNABORTED
+ * FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages
+ * FUSE_CACHE_SYMLINKS: cache READLINK responses
+ * FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir
+ * FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request
+ * FUSE_MAP_ALIGNMENT: init_out.map_alignment contains log2(byte alignment) for
+ * foffset and moffset fields in struct
+ * fuse_setupmapping_out and fuse_removemapping_one.
+ * FUSE_SUBMOUNTS: kernel supports auto-mounting directory submounts
+ * FUSE_HANDLE_KILLPRIV_V2: fs kills suid/sgid/cap on write/chown/trunc.
+ * Upon write/truncate suid/sgid is only killed if caller
+ * does not have CAP_FSETID. Additionally upon
+ * write/truncate sgid is killed only if file has group
+ * execute permission. (Same as Linux VFS behavior).
+ * FUSE_SETXATTR_EXT: Server supports extended struct fuse_setxattr_in
+ * FUSE_INIT_EXT: extended fuse_init_in request
+ * FUSE_INIT_RESERVED: reserved, do not use
+ * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and
+ * mknod
+ * FUSE_HAS_INODE_DAX: use per inode DAX
+ * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
+ * symlink and mknod (single group that matches parent)
+ * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+ * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode.
+ * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
+ * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
+ * of the request ID indicates resend requests
+ */
+#define FUSE_ASYNC_READ (1 << 0)
+#define FUSE_POSIX_LOCKS (1 << 1)
+#define FUSE_FILE_OPS (1 << 2)
+#define FUSE_ATOMIC_O_TRUNC (1 << 3)
+#define FUSE_EXPORT_SUPPORT (1 << 4)
+#define FUSE_BIG_WRITES (1 << 5)
+#define FUSE_DONT_MASK (1 << 6)
+#define FUSE_SPLICE_WRITE (1 << 7)
+#define FUSE_SPLICE_MOVE (1 << 8)
+#define FUSE_SPLICE_READ (1 << 9)
+#define FUSE_FLOCK_LOCKS (1 << 10)
+#define FUSE_HAS_IOCTL_DIR (1 << 11)
+#define FUSE_AUTO_INVAL_DATA (1 << 12)
+#define FUSE_DO_READDIRPLUS (1 << 13)
+#define FUSE_READDIRPLUS_AUTO (1 << 14)
+#define FUSE_ASYNC_DIO (1 << 15)
+#define FUSE_WRITEBACK_CACHE (1 << 16)
+#define FUSE_NO_OPEN_SUPPORT (1 << 17)
+#define FUSE_PARALLEL_DIROPS (1 << 18)
+#define FUSE_HANDLE_KILLPRIV (1 << 19)
+#define FUSE_POSIX_ACL (1 << 20)
+#define FUSE_ABORT_ERROR (1 << 21)
+#define FUSE_MAX_PAGES (1 << 22)
+#define FUSE_CACHE_SYMLINKS (1 << 23)
+#define FUSE_NO_OPENDIR_SUPPORT (1 << 24)
+#define FUSE_EXPLICIT_INVAL_DATA (1 << 25)
+#define FUSE_MAP_ALIGNMENT (1 << 26)
+#define FUSE_SUBMOUNTS (1 << 27)
+#define FUSE_HANDLE_KILLPRIV_V2 (1 << 28)
+#define FUSE_SETXATTR_EXT (1 << 29)
+#define FUSE_INIT_EXT (1 << 30)
+#define FUSE_INIT_RESERVED (1 << 31)
+/* bits 32..63 get shifted down 32 bits into the flags2 field */
+#define FUSE_SECURITY_CTX (1ULL << 32)
+#define FUSE_HAS_INODE_DAX (1ULL << 33)
+#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
+#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
+#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36)
+#define FUSE_PASSTHROUGH (1ULL << 37)
+#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38)
+#define FUSE_HAS_RESEND (1ULL << 39)
+
+/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
+#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
+
+/**
+ * CUSE INIT request/reply flags
+ *
+ * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl
+ */
+#define CUSE_UNRESTRICTED_IOCTL (1 << 0)
+
+/**
+ * Release flags
+ */
+#define FUSE_RELEASE_FLUSH (1 << 0)
+#define FUSE_RELEASE_FLOCK_UNLOCK (1 << 1)
+
+/**
+ * Getattr flags
+ */
+#define FUSE_GETATTR_FH (1 << 0)
+
+/**
+ * Lock flags
+ */
+#define FUSE_LK_FLOCK (1 << 0)
+
+/**
+ * WRITE flags
+ *
+ * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed
+ * FUSE_WRITE_LOCKOWNER: lock_owner field is valid
+ * FUSE_WRITE_KILL_SUIDGID: kill suid and sgid bits
+ */
+#define FUSE_WRITE_CACHE (1 << 0)
+#define FUSE_WRITE_LOCKOWNER (1 << 1)
+#define FUSE_WRITE_KILL_SUIDGID (1 << 2)
+
+/* Obsolete alias; this flag implies killing suid/sgid only. */
+#define FUSE_WRITE_KILL_PRIV FUSE_WRITE_KILL_SUIDGID
+
+/**
+ * Read flags
+ */
+#define FUSE_READ_LOCKOWNER (1 << 1)
+
+/**
+ * Ioctl flags
+ *
+ * FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine
+ * FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed
+ * FUSE_IOCTL_RETRY: retry with new iovecs
+ * FUSE_IOCTL_32BIT: 32bit ioctl
+ * FUSE_IOCTL_DIR: is a directory
+ * FUSE_IOCTL_COMPAT_X32: x32 compat ioctl on 64bit machine (64bit time_t)
+ *
+ * FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs
+ */
+#define FUSE_IOCTL_COMPAT (1 << 0)
+#define FUSE_IOCTL_UNRESTRICTED (1 << 1)
+#define FUSE_IOCTL_RETRY (1 << 2)
+#define FUSE_IOCTL_32BIT (1 << 3)
+#define FUSE_IOCTL_DIR (1 << 4)
+#define FUSE_IOCTL_COMPAT_X32 (1 << 5)
+
+#define FUSE_IOCTL_MAX_IOV 256
+
+/**
+ * Poll flags
+ *
+ * FUSE_POLL_SCHEDULE_NOTIFY: request poll notify
+ */
+#define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0)
+
+/**
+ * Fsync flags
+ *
+ * FUSE_FSYNC_FDATASYNC: Sync data only, not metadata
+ */
+#define FUSE_FSYNC_FDATASYNC (1 << 0)
+
+/**
+ * fuse_attr flags
+ *
+ * FUSE_ATTR_SUBMOUNT: Object is a submount root
+ * FUSE_ATTR_DAX: Enable DAX for this file in per inode DAX mode
+ */
+#define FUSE_ATTR_SUBMOUNT (1 << 0)
+#define FUSE_ATTR_DAX (1 << 1)
+
+/**
+ * Open flags
+ * FUSE_OPEN_KILL_SUIDGID: Kill suid and sgid if executable
+ */
+#define FUSE_OPEN_KILL_SUIDGID (1 << 0)
+
+/**
+ * setxattr flags
+ * FUSE_SETXATTR_ACL_KILL_SGID: Clear SGID when system.posix_acl_access is set
+ */
+#define FUSE_SETXATTR_ACL_KILL_SGID (1 << 0)
+
+/**
+ * notify_inval_entry flags
+ * FUSE_EXPIRE_ONLY
+ */
+#define FUSE_EXPIRE_ONLY (1 << 0)
+
+/**
+ * extension type
+ * FUSE_MAX_NR_SECCTX: maximum value of &fuse_secctx_header.nr_secctx
+ * FUSE_EXT_GROUPS: &fuse_supp_groups extension
+ */
+enum fuse_ext_type {
+ /* Types 0..31 are reserved for fuse_secctx_header */
+ FUSE_MAX_NR_SECCTX = 31,
+ FUSE_EXT_GROUPS = 32,
+};
+
+enum fuse_opcode {
+ FUSE_LOOKUP = 1,
+ FUSE_FORGET = 2, /* no reply */
+ FUSE_GETATTR = 3,
+ FUSE_SETATTR = 4,
+ FUSE_READLINK = 5,
+ FUSE_SYMLINK = 6,
+ FUSE_MKNOD = 8,
+ FUSE_MKDIR = 9,
+ FUSE_UNLINK = 10,
+ FUSE_RMDIR = 11,
+ FUSE_RENAME = 12,
+ FUSE_LINK = 13,
+ FUSE_OPEN = 14,
+ FUSE_READ = 15,
+ FUSE_WRITE = 16,
+ FUSE_STATFS = 17,
+ FUSE_RELEASE = 18,
+ FUSE_FSYNC = 20,
+ FUSE_SETXATTR = 21,
+ FUSE_GETXATTR = 22,
+ FUSE_LISTXATTR = 23,
+ FUSE_REMOVEXATTR = 24,
+ FUSE_FLUSH = 25,
+ FUSE_INIT = 26,
+ FUSE_OPENDIR = 27,
+ FUSE_READDIR = 28,
+ FUSE_RELEASEDIR = 29,
+ FUSE_FSYNCDIR = 30,
+ FUSE_GETLK = 31,
+ FUSE_SETLK = 32,
+ FUSE_SETLKW = 33,
+ FUSE_ACCESS = 34,
+ FUSE_CREATE = 35,
+ FUSE_INTERRUPT = 36,
+ FUSE_BMAP = 37,
+ FUSE_DESTROY = 38,
+ FUSE_IOCTL = 39,
+ FUSE_POLL = 40,
+ FUSE_NOTIFY_REPLY = 41,
+ FUSE_BATCH_FORGET = 42,
+ FUSE_FALLOCATE = 43,
+ FUSE_READDIRPLUS = 44,
+ FUSE_RENAME2 = 45,
+ FUSE_LSEEK = 46,
+ FUSE_COPY_FILE_RANGE = 47,
+ FUSE_SETUPMAPPING = 48,
+ FUSE_REMOVEMAPPING = 49,
+ FUSE_SYNCFS = 50,
+ FUSE_TMPFILE = 51,
+ FUSE_STATX = 52,
+
+ /* CUSE specific operations */
+ CUSE_INIT = 4096,
+
+ /* Reserved opcodes: helpful to detect structure endian-ness */
+ CUSE_INIT_BSWAP_RESERVED = 1048576, /* CUSE_INIT << 8 */
+ FUSE_INIT_BSWAP_RESERVED = 436207616, /* FUSE_INIT << 24 */
+};
+
+enum fuse_notify_code {
+ FUSE_NOTIFY_POLL = 1,
+ FUSE_NOTIFY_INVAL_INODE = 2,
+ FUSE_NOTIFY_INVAL_ENTRY = 3,
+ FUSE_NOTIFY_STORE = 4,
+ FUSE_NOTIFY_RETRIEVE = 5,
+ FUSE_NOTIFY_DELETE = 6,
+ FUSE_NOTIFY_RESEND = 7,
+ FUSE_NOTIFY_CODE_MAX,
+};
+
+/* The read buffer is required to be at least 8k, but may be much larger */
+#define FUSE_MIN_READ_BUFFER 8192
+
+#define FUSE_COMPAT_ENTRY_OUT_SIZE 120
+
+struct fuse_entry_out {
+ uint64_t nodeid; /* Inode ID */
+ uint64_t generation; /* Inode generation: nodeid:gen must
+ be unique for the fs's lifetime */
+ uint64_t entry_valid; /* Cache timeout for the name */
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t entry_valid_nsec;
+ uint32_t attr_valid_nsec;
+ struct fuse_attr attr;
+};
+
+struct fuse_forget_in {
+ uint64_t nlookup;
+};
+
+struct fuse_forget_one {
+ uint64_t nodeid;
+ uint64_t nlookup;
+};
+
+struct fuse_batch_forget_in {
+ uint32_t count;
+ uint32_t dummy;
+};
+
+struct fuse_getattr_in {
+ uint32_t getattr_flags;
+ uint32_t dummy;
+ uint64_t fh;
+};
+
+#define FUSE_COMPAT_ATTR_OUT_SIZE 96
+
+struct fuse_attr_out {
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t attr_valid_nsec;
+ uint32_t dummy;
+ struct fuse_attr attr;
+};
+
+struct fuse_statx_in {
+ uint32_t getattr_flags;
+ uint32_t reserved;
+ uint64_t fh;
+ uint32_t sx_flags;
+ uint32_t sx_mask;
+};
+
+struct fuse_statx_out {
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t attr_valid_nsec;
+ uint32_t flags;
+ uint64_t spare[2];
+ struct fuse_statx stat;
+};
+
+#define FUSE_COMPAT_MKNOD_IN_SIZE 8
+
+struct fuse_mknod_in {
+ uint32_t mode;
+ uint32_t rdev;
+ uint32_t umask;
+ uint32_t padding;
+};
+
+struct fuse_mkdir_in {
+ uint32_t mode;
+ uint32_t umask;
+};
+
+struct fuse_rename_in {
+ uint64_t newdir;
+};
+
+struct fuse_rename2_in {
+ uint64_t newdir;
+ uint32_t flags;
+ uint32_t padding;
+};
+
+struct fuse_link_in {
+ uint64_t oldnodeid;
+};
+
+struct fuse_setattr_in {
+ uint32_t valid;
+ uint32_t padding;
+ uint64_t fh;
+ uint64_t size;
+ uint64_t lock_owner;
+ uint64_t atime;
+ uint64_t mtime;
+ uint64_t ctime;
+ uint32_t atimensec;
+ uint32_t mtimensec;
+ uint32_t ctimensec;
+ uint32_t mode;
+ uint32_t unused4;
+ uint32_t uid;
+ uint32_t gid;
+ uint32_t unused5;
+};
+
+struct fuse_open_in {
+ uint32_t flags;
+ uint32_t open_flags; /* FUSE_OPEN_... */
+};
+
+struct fuse_create_in {
+ uint32_t flags;
+ uint32_t mode;
+ uint32_t umask;
+ uint32_t open_flags; /* FUSE_OPEN_... */
+};
+
+struct fuse_open_out {
+ uint64_t fh;
+ uint32_t open_flags;
+ int32_t backing_id;
+};
+
+struct fuse_release_in {
+ uint64_t fh;
+ uint32_t flags;
+ uint32_t release_flags;
+ uint64_t lock_owner;
+};
+
+struct fuse_flush_in {
+ uint64_t fh;
+ uint32_t unused;
+ uint32_t padding;
+ uint64_t lock_owner;
+};
+
+struct fuse_read_in {
+ uint64_t fh;
+ uint64_t offset;
+ uint32_t size;
+ uint32_t read_flags;
+ uint64_t lock_owner;
+ uint32_t flags;
+ uint32_t padding;
+};
+
+#define FUSE_COMPAT_WRITE_IN_SIZE 24
+
+struct fuse_write_in {
+ uint64_t fh;
+ uint64_t offset;
+ uint32_t size;
+ uint32_t write_flags;
+ uint64_t lock_owner;
+ uint32_t flags;
+ uint32_t padding;
+};
+
+struct fuse_write_out {
+ uint32_t size;
+ uint32_t padding;
+};
+
+#define FUSE_COMPAT_STATFS_SIZE 48
+
+struct fuse_statfs_out {
+ struct fuse_kstatfs st;
+};
+
+struct fuse_fsync_in {
+ uint64_t fh;
+ uint32_t fsync_flags;
+ uint32_t padding;
+};
+
+#define FUSE_COMPAT_SETXATTR_IN_SIZE 8
+
+struct fuse_setxattr_in {
+ uint32_t size;
+ uint32_t flags;
+ uint32_t setxattr_flags;
+ uint32_t padding;
+};
+
+struct fuse_getxattr_in {
+ uint32_t size;
+ uint32_t padding;
+};
+
+struct fuse_getxattr_out {
+ uint32_t size;
+ uint32_t padding;
+};
+
+struct fuse_lk_in {
+ uint64_t fh;
+ uint64_t owner;
+ struct fuse_file_lock lk;
+ uint32_t lk_flags;
+ uint32_t padding;
+};
+
+struct fuse_lk_out {
+ struct fuse_file_lock lk;
+};
+
+struct fuse_access_in {
+ uint32_t mask;
+ uint32_t padding;
+};
+
+struct fuse_init_in {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t max_readahead;
+ uint32_t flags;
+ uint32_t flags2;
+ uint32_t unused[11];
+};
+
+#define FUSE_COMPAT_INIT_OUT_SIZE 8
+#define FUSE_COMPAT_22_INIT_OUT_SIZE 24
+
+struct fuse_init_out {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t max_readahead;
+ uint32_t flags;
+ uint16_t max_background;
+ uint16_t congestion_threshold;
+ uint32_t max_write;
+ uint32_t time_gran;
+ uint16_t max_pages;
+ uint16_t map_alignment;
+ uint32_t flags2;
+ uint32_t max_stack_depth;
+ uint32_t unused[6];
+};
+
+#define CUSE_INIT_INFO_MAX 4096
+
+struct cuse_init_in {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t unused;
+ uint32_t flags;
+};
+
+struct cuse_init_out {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t unused;
+ uint32_t flags;
+ uint32_t max_read;
+ uint32_t max_write;
+ uint32_t dev_major; /* chardev major */
+ uint32_t dev_minor; /* chardev minor */
+ uint32_t spare[10];
+};
+
+struct fuse_interrupt_in {
+ uint64_t unique;
+};
+
+struct fuse_bmap_in {
+ uint64_t block;
+ uint32_t blocksize;
+ uint32_t padding;
+};
+
+struct fuse_bmap_out {
+ uint64_t block;
+};
+
+struct fuse_ioctl_in {
+ uint64_t fh;
+ uint32_t flags;
+ uint32_t cmd;
+ uint64_t arg;
+ uint32_t in_size;
+ uint32_t out_size;
+};
+
+struct fuse_ioctl_iovec {
+ uint64_t base;
+ uint64_t len;
+};
+
+struct fuse_ioctl_out {
+ int32_t result;
+ uint32_t flags;
+ uint32_t in_iovs;
+ uint32_t out_iovs;
+};
+
+struct fuse_poll_in {
+ uint64_t fh;
+ uint64_t kh;
+ uint32_t flags;
+ uint32_t events;
+};
+
+struct fuse_poll_out {
+ uint32_t revents;
+ uint32_t padding;
+};
+
+struct fuse_notify_poll_wakeup_out {
+ uint64_t kh;
+};
+
+struct fuse_fallocate_in {
+ uint64_t fh;
+ uint64_t offset;
+ uint64_t length;
+ uint32_t mode;
+ uint32_t padding;
+};
+
+/**
+ * FUSE request unique ID flag
+ *
+ * Indicates whether this is a resend request. The receiver should handle this
+ * request accordingly.
+ */
+#define FUSE_UNIQUE_RESEND (1ULL << 63)
+
+struct fuse_in_header {
+ uint32_t len;
+ uint32_t opcode;
+ uint64_t unique;
+ uint64_t nodeid;
+ uint32_t uid;
+ uint32_t gid;
+ uint32_t pid;
+ uint16_t total_extlen; /* length of extensions in 8byte units */
+ uint16_t padding;
+};
+
+struct fuse_out_header {
+ uint32_t len;
+ int32_t error;
+ uint64_t unique;
+};
+
+struct fuse_dirent {
+ uint64_t ino;
+ uint64_t off;
+ uint32_t namelen;
+ uint32_t type;
+ char name[];
+};
+
+/* Align variable length records to 64bit boundary */
+#define FUSE_REC_ALIGN(x) \
+ (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
+
+#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
+#define FUSE_DIRENT_ALIGN(x) FUSE_REC_ALIGN(x)
+#define FUSE_DIRENT_SIZE(d) \
+ FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
+
+struct fuse_direntplus {
+ struct fuse_entry_out entry_out;
+ struct fuse_dirent dirent;
+};
+
+#define FUSE_NAME_OFFSET_DIRENTPLUS \
+ offsetof(struct fuse_direntplus, dirent.name)
+#define FUSE_DIRENTPLUS_SIZE(d) \
+ FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen)
+
+struct fuse_notify_inval_inode_out {
+ uint64_t ino;
+ int64_t off;
+ int64_t len;
+};
+
+struct fuse_notify_inval_entry_out {
+ uint64_t parent;
+ uint32_t namelen;
+ uint32_t flags;
+};
+
+struct fuse_notify_delete_out {
+ uint64_t parent;
+ uint64_t child;
+ uint32_t namelen;
+ uint32_t padding;
+};
+
+struct fuse_notify_store_out {
+ uint64_t nodeid;
+ uint64_t offset;
+ uint32_t size;
+ uint32_t padding;
+};
+
+struct fuse_notify_retrieve_out {
+ uint64_t notify_unique;
+ uint64_t nodeid;
+ uint64_t offset;
+ uint32_t size;
+ uint32_t padding;
+};
+
+/* Matches the size of fuse_write_in */
+struct fuse_notify_retrieve_in {
+ uint64_t dummy1;
+ uint64_t offset;
+ uint32_t size;
+ uint32_t dummy2;
+ uint64_t dummy3;
+ uint64_t dummy4;
+};
+
+struct fuse_backing_map {
+ int32_t fd;
+ uint32_t flags;
+ uint64_t padding;
+};
+
+/* Device ioctls: */
+#define FUSE_DEV_IOC_MAGIC 229
+#define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t)
+#define FUSE_DEV_IOC_BACKING_OPEN _IOW(FUSE_DEV_IOC_MAGIC, 1, \
+ struct fuse_backing_map)
+#define FUSE_DEV_IOC_BACKING_CLOSE _IOW(FUSE_DEV_IOC_MAGIC, 2, uint32_t)
+
+struct fuse_lseek_in {
+ uint64_t fh;
+ uint64_t offset;
+ uint32_t whence;
+ uint32_t padding;
+};
+
+struct fuse_lseek_out {
+ uint64_t offset;
+};
+
+struct fuse_copy_file_range_in {
+ uint64_t fh_in;
+ uint64_t off_in;
+ uint64_t nodeid_out;
+ uint64_t fh_out;
+ uint64_t off_out;
+ uint64_t len;
+ uint64_t flags;
+};
+
+#define FUSE_SETUPMAPPING_FLAG_WRITE (1ull << 0)
+#define FUSE_SETUPMAPPING_FLAG_READ (1ull << 1)
+struct fuse_setupmapping_in {
+ /* An already open handle */
+ uint64_t fh;
+ /* Offset into the file to start the mapping */
+ uint64_t foffset;
+ /* Length of mapping required */
+ uint64_t len;
+ /* Flags, FUSE_SETUPMAPPING_FLAG_* */
+ uint64_t flags;
+ /* Offset in Memory Window */
+ uint64_t moffset;
+};
+
+struct fuse_removemapping_in {
+ /* number of fuse_removemapping_one follows */
+ uint32_t count;
+};
+
+struct fuse_removemapping_one {
+ /* Offset into the dax window start the unmapping */
+ uint64_t moffset;
+ /* Length of mapping required */
+ uint64_t len;
+};
+
+#define FUSE_REMOVEMAPPING_MAX_ENTRY \
+ (PAGE_SIZE / sizeof(struct fuse_removemapping_one))
+
+struct fuse_syncfs_in {
+ uint64_t padding;
+};
+
+/*
+ * For each security context, send fuse_secctx with size of security context
+ * fuse_secctx will be followed by security context name and this in turn
+ * will be followed by actual context label.
+ * fuse_secctx, name, context
+ */
+struct fuse_secctx {
+ uint32_t size;
+ uint32_t padding;
+};
+
+/*
+ * Contains the information about how many fuse_secctx structures are being
+ * sent and what's the total size of all security contexts (including
+ * size of fuse_secctx_header).
+ *
+ */
+struct fuse_secctx_header {
+ uint32_t size;
+ uint32_t nr_secctx;
+};
+
+/**
+ * struct fuse_ext_header - extension header
+ * @size: total size of this extension including this header
+ * @type: type of extension
+ *
+ * This is made compatible with fuse_secctx_header by using type values >
+ * FUSE_MAX_NR_SECCTX
+ */
+struct fuse_ext_header {
+ uint32_t size;
+ uint32_t type;
+};
+
+/**
+ * struct fuse_supp_groups - Supplementary group extension
+ * @nr_groups: number of supplementary groups
+ * @groups: flexible array of group IDs
+ */
+struct fuse_supp_groups {
+ uint32_t nr_groups;
+ uint32_t groups[];
+};
+
+#endif /* _LINUX_FUSE_H */
diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h
index 9e6a8ba4ce..2221b0c383 100644
--- a/include/standard-headers/linux/input-event-codes.h
+++ b/include/standard-headers/linux/input-event-codes.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Input event codes
*
@@ -278,7 +278,8 @@
#define KEY_PAUSECD 201
#define KEY_PROG3 202
#define KEY_PROG4 203
-#define KEY_DASHBOARD 204 /* AL Dashboard */
+#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */
+#define KEY_DASHBOARD KEY_ALL_APPLICATIONS
#define KEY_SUSPEND 205
#define KEY_CLOSE 206 /* AC Close */
#define KEY_PLAY 207
@@ -439,10 +440,12 @@
#define KEY_TITLE 0x171
#define KEY_SUBTITLE 0x172
#define KEY_ANGLE 0x173
-#define KEY_ZOOM 0x174
+#define KEY_FULL_SCREEN 0x174 /* AC View Toggle */
+#define KEY_ZOOM KEY_FULL_SCREEN
#define KEY_MODE 0x175
#define KEY_KEYBOARD 0x176
-#define KEY_SCREEN 0x177
+#define KEY_ASPECT_RATIO 0x177 /* HUTRR37: Aspect */
+#define KEY_SCREEN KEY_ASPECT_RATIO
#define KEY_PC 0x178 /* Media Select Computer */
#define KEY_TV 0x179 /* Media Select TV */
#define KEY_TV2 0x17a /* Media Select Cable */
@@ -513,6 +516,9 @@
#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
#define KEY_IMAGES 0x1ba /* AL Image Browser */
+#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */
+#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */
+#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
@@ -540,6 +546,7 @@
#define KEY_FN_F 0x1e2
#define KEY_FN_S 0x1e3
#define KEY_FN_B 0x1e4
+#define KEY_FN_RIGHT_SHIFT 0x1e5
#define KEY_BRL_DOT1 0x1f1
#define KEY_BRL_DOT2 0x1f2
@@ -595,6 +602,7 @@
#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
+#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */
#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
@@ -604,6 +612,12 @@
#define KEY_SCREENSAVER 0x245 /* AL Screen Saver */
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
+#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */
+#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */
+#define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
+#define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
+#define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
@@ -646,6 +660,107 @@
*/
#define KEY_DATA 0x277
#define KEY_ONSCREEN_KEYBOARD 0x278
+/* Electronic privacy screen control */
+#define KEY_PRIVACY_SCREEN_TOGGLE 0x279
+
+/* Select an area of screen to be copied */
+#define KEY_SELECTIVE_SCREENSHOT 0x27a
+
+/* Move the focus to the next or previous user controllable element within a UI container */
+#define KEY_NEXT_ELEMENT 0x27b
+#define KEY_PREVIOUS_ELEMENT 0x27c
+
+/* Toggle Autopilot engagement */
+#define KEY_AUTOPILOT_ENGAGE_TOGGLE 0x27d
+
+/* Shortcut Keys */
+#define KEY_MARK_WAYPOINT 0x27e
+#define KEY_SOS 0x27f
+#define KEY_NAV_CHART 0x280
+#define KEY_FISHING_CHART 0x281
+#define KEY_SINGLE_RANGE_RADAR 0x282
+#define KEY_DUAL_RANGE_RADAR 0x283
+#define KEY_RADAR_OVERLAY 0x284
+#define KEY_TRADITIONAL_SONAR 0x285
+#define KEY_CLEARVU_SONAR 0x286
+#define KEY_SIDEVU_SONAR 0x287
+#define KEY_NAV_INFO 0x288
+#define KEY_BRIGHTNESS_MENU 0x289
+
+/*
+ * Some keyboards have keys which do not have a defined meaning, these keys
+ * are intended to be programmed / bound to macros by the user. For most
+ * keyboards with these macro-keys the key-sequence to inject, or action to
+ * take, is all handled by software on the host side. So from the kernel's
+ * point of view these are just normal keys.
+ *
+ * The KEY_MACRO# codes below are intended for such keys, which may be labeled
+ * e.g. G1-G18, or S1 - S30. The KEY_MACRO# codes MUST NOT be used for keys
+ * where the marking on the key does indicate a defined meaning / purpose.
+ *
+ * The KEY_MACRO# codes MUST also NOT be used as fallback for when no existing
+ * KEY_FOO define matches the marking / purpose. In this case a new KEY_FOO
+ * define MUST be added.
+ */
+#define KEY_MACRO1 0x290
+#define KEY_MACRO2 0x291
+#define KEY_MACRO3 0x292
+#define KEY_MACRO4 0x293
+#define KEY_MACRO5 0x294
+#define KEY_MACRO6 0x295
+#define KEY_MACRO7 0x296
+#define KEY_MACRO8 0x297
+#define KEY_MACRO9 0x298
+#define KEY_MACRO10 0x299
+#define KEY_MACRO11 0x29a
+#define KEY_MACRO12 0x29b
+#define KEY_MACRO13 0x29c
+#define KEY_MACRO14 0x29d
+#define KEY_MACRO15 0x29e
+#define KEY_MACRO16 0x29f
+#define KEY_MACRO17 0x2a0
+#define KEY_MACRO18 0x2a1
+#define KEY_MACRO19 0x2a2
+#define KEY_MACRO20 0x2a3
+#define KEY_MACRO21 0x2a4
+#define KEY_MACRO22 0x2a5
+#define KEY_MACRO23 0x2a6
+#define KEY_MACRO24 0x2a7
+#define KEY_MACRO25 0x2a8
+#define KEY_MACRO26 0x2a9
+#define KEY_MACRO27 0x2aa
+#define KEY_MACRO28 0x2ab
+#define KEY_MACRO29 0x2ac
+#define KEY_MACRO30 0x2ad
+
+/*
+ * Some keyboards with the macro-keys described above have some extra keys
+ * for controlling the host-side software responsible for the macro handling:
+ * -A macro recording start/stop key. Note that not all keyboards which emit
+ * KEY_MACRO_RECORD_START will also emit KEY_MACRO_RECORD_STOP if
+ * KEY_MACRO_RECORD_STOP is not advertised, then KEY_MACRO_RECORD_START
+ * should be interpreted as a recording start/stop toggle;
+ * -Keys for switching between different macro (pre)sets, either a key for
+ * cycling through the configured presets or keys to directly select a preset.
+ */
+#define KEY_MACRO_RECORD_START 0x2b0
+#define KEY_MACRO_RECORD_STOP 0x2b1
+#define KEY_MACRO_PRESET_CYCLE 0x2b2
+#define KEY_MACRO_PRESET1 0x2b3
+#define KEY_MACRO_PRESET2 0x2b4
+#define KEY_MACRO_PRESET3 0x2b5
+
+/*
+ * Some keyboards have a buildin LCD panel where the contents are controlled
+ * by the host. Often these have a number of keys directly below the LCD
+ * intended for controlling a menu shown on the LCD. These keys often don't
+ * have any labeling so we just name them KEY_KBD_LCD_MENU#
+ */
+#define KEY_KBD_LCD_MENU1 0x2b8
+#define KEY_KBD_LCD_MENU2 0x2b9
+#define KEY_KBD_LCD_MENU3 0x2ba
+#define KEY_KBD_LCD_MENU4 0x2bb
+#define KEY_KBD_LCD_MENU5 0x2bc
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
@@ -708,6 +823,16 @@
#define REL_DIAL 0x07
#define REL_WHEEL 0x08
#define REL_MISC 0x09
+/*
+ * 0x0a is reserved and should not be used in input drivers.
+ * It was used by HID as REL_MISC+1 and userspace needs to detect if
+ * the next REL_* event is correct or is just REL_MISC + n.
+ * We define here REL_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define REL_RESERVED 0x0a
+#define REL_WHEEL_HI_RES 0x0b
+#define REL_HWHEEL_HI_RES 0x0c
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX+1)
@@ -741,9 +866,19 @@
#define ABS_TOOL_WIDTH 0x1c
#define ABS_VOLUME 0x20
+#define ABS_PROFILE 0x21
#define ABS_MISC 0x28
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED 0x2e
+
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
@@ -786,7 +921,8 @@
#define SW_LINEIN_INSERT 0x0d /* set = inserted */
#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
-#define SW_MAX_ 0x0f
+#define SW_MACHINE_COVER 0x10 /* set = cover closed */
+#define SW_MAX_ 0x10
#define SW_CNT (SW_MAX_+1)
/*
diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h
index c0ad9fc2c3..942ea6aaa9 100644
--- a/include/standard-headers/linux/input.h
+++ b/include/standard-headers/linux/input.h
@@ -23,13 +23,18 @@
*/
struct input_event {
-#if (HOST_LONG_BITS != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
+#if (HOST_LONG_BITS != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
struct timeval time;
#define input_event_sec time.tv_sec
#define input_event_usec time.tv_usec
#else
unsigned long __sec;
+#if defined(__sparc__) && defined(__arch64__)
+ unsigned int __usec;
+ unsigned int __pad;
+#else
unsigned long __usec;
+#endif
#define input_event_sec __sec
#define input_event_usec __usec
#endif
@@ -70,13 +75,16 @@ struct input_id {
* Note that input core does not clamp reported values to the
* [minimum, maximum] limits, such task is left to userspace.
*
- * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z)
- * is reported in units per millimeter (units/mm), resolution
- * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported
- * in units per radian.
+ * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z,
+ * ABS_MT_POSITION_X, ABS_MT_POSITION_Y) is reported in units
+ * per millimeter (units/mm), resolution for rotational axes
+ * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian.
+ * The resolution for the size axes (ABS_MT_TOUCH_MAJOR,
+ * ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR)
+ * is reported in units per millimeter (units/mm).
* When INPUT_PROP_ACCELEROMETER is set the resolution changes.
* The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in
- * in units per g (units/g) and in units per degree per second
+ * units per g (units/g) and in units per degree per second
* (units/deg/s) for rotational axes (ABS_RX, ABS_RY, ABS_RZ).
*/
struct input_absinfo {
@@ -263,6 +271,7 @@ struct input_mask {
#define BUS_RMI 0x1D
#define BUS_CEC 0x1E
#define BUS_INTEL_ISHTP 0x1F
+#define BUS_AMD_SFH 0x20
/*
* MT_TOOL types
diff --git a/include/standard-headers/linux/kernel.h b/include/standard-headers/linux/kernel.h
index 1eeba2ef92..7848c5ae25 100644
--- a/include/standard-headers/linux/kernel.h
+++ b/include/standard-headers/linux/kernel.h
@@ -3,13 +3,6 @@
#define _LINUX_KERNEL_H
#include "standard-headers/linux/sysinfo.h"
-
-/*
- * 'kernel.h' contains some often-used function prototypes etc
- */
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
-
-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#include "standard-headers/linux/const.h"
#endif /* _LINUX_KERNEL_H */
diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h
index ee556ccc93..a39193213f 100644
--- a/include/standard-headers/linux/pci_regs.h
+++ b/include/standard-headers/linux/pci_regs.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * pci_regs.h
- *
* PCI standard defines
* Copyright 1994, Drew Eckhardt
* Copyright 1997--1999 Martin Mares <mj@ucw.cz>
@@ -15,7 +13,7 @@
* PCI System Design Guide
*
* For HyperTransport information, please consult the following manuals
- * from http://www.hypertransport.org
+ * from http://www.hypertransport.org :
*
* The HyperTransport I/O Link Specification
*/
@@ -36,6 +34,7 @@
* of which the first 64 bytes are standardized as follows:
*/
#define PCI_STD_HEADER_SIZEOF 64
+#define PCI_STD_NUM_BARS 6 /* Number of standard BARs */
#define PCI_VENDOR_ID 0x00 /* 16 bits */
#define PCI_DEVICE_ID 0x02 /* 16 bits */
#define PCI_COMMAND 0x04 /* 16 bits */
@@ -52,6 +51,7 @@
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_IMM_READY 0x01 /* Immediate Readiness */
#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */
#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
#define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */
@@ -76,9 +76,11 @@
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_HEADER_TYPE_MASK 0x7f
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
+#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */
#define PCI_BIST 0x0f /* 8 bits */
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
@@ -246,7 +248,7 @@
#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
-#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
+#define PCI_PM_CAP_PME_D3hot 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */
@@ -300,25 +302,25 @@
#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
-/* Message Signalled Interrupts registers */
+/* Message Signaled Interrupt registers */
-#define PCI_MSI_FLAGS 2 /* Message Control */
+#define PCI_MSI_FLAGS 0x02 /* Message Control */
#define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */
#define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */
#define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */
#define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */
#define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */
#define PCI_MSI_RFU 3 /* Rest of capability flags */
-#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
-#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
-#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
-#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
-#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
-#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
-#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
-
-/* MSI-X registers */
+#define PCI_MSI_ADDRESS_LO 0x04 /* Lower 32 bits */
+#define PCI_MSI_ADDRESS_HI 0x08 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
+#define PCI_MSI_DATA_32 0x08 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 0x0c /* Mask bits register for 32-bit devices */
+#define PCI_MSI_PENDING_32 0x10 /* Pending intrs for 32-bit devices */
+#define PCI_MSI_DATA_64 0x0c /* 16 bits of data for 64-bit devices */
+#define PCI_MSI_MASK_64 0x10 /* Mask bits register for 64-bit devices */
+#define PCI_MSI_PENDING_64 0x14 /* Pending intrs for 64-bit devices */
+
+/* MSI-X registers (in MSI-X capability) */
#define PCI_MSIX_FLAGS 2 /* Message Control */
#define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */
#define PCI_MSIX_FLAGS_MASKALL 0x4000 /* Mask all vectors for this function */
@@ -332,13 +334,13 @@
#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
-/* MSI-X Table entry format */
+/* MSI-X Table entry format (in memory mapped by a BAR) */
#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4
-#define PCI_MSIX_ENTRY_DATA 8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
-#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0x0 /* Message Address */
+#define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */
+#define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001
/* CompactPCI Hotswap Register */
@@ -371,6 +373,12 @@
#define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */
#define PCI_EA_ES 0x00000007 /* Entry Size */
#define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */
+
+/* EA fixed Secondary and Subordinate bus numbers for Bridge */
+#define PCI_EA_SEC_BUS_MASK 0xff
+#define PCI_EA_SUB_BUS_MASK 0xff00
+#define PCI_EA_SUB_BUS_SHIFT 8
+
/* 0-5 map to BARs 0-5 respectively */
#define PCI_EA_BEI_BAR0 0
#define PCI_EA_BEI_BAR5 5
@@ -463,21 +471,21 @@
/* PCI Express capability registers */
-#define PCI_EXP_FLAGS 2 /* Capabilities register */
-#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
-#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
-#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
-#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
-#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
-#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
-#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
-#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */
-#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */
-#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
-#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
-#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
-#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
-#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_FLAGS 0x02 /* Capabilities register */
+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
+#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
+#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */
+#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */
+#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
+#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
+#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
+#define PCI_EXP_DEVCAP 0x04 /* Device capabilities */
#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
#define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */
@@ -490,13 +498,19 @@
#define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */
#define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */
#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
-#define PCI_EXP_DEVCTL 8 /* Device Control */
+#define PCI_EXP_DEVCTL 0x08 /* Device Control */
#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
+#define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
@@ -509,7 +523,7 @@
#define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */
#define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
-#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define PCI_EXP_DEVSTA 0x0a /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
#define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */
#define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */
@@ -517,14 +531,18 @@
#define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
-#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
+#define PCI_EXP_LNKCAP_SLS_32_0GB 0x00000005 /* LNKCAP2 SLS Vector bit 4 */
+#define PCI_EXP_LNKCAP_SLS_64_0GB 0x00000006 /* LNKCAP2 SLS Vector bit 5 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#define PCI_EXP_LNKCAP_ASPM_L0S 0x00000400 /* ASPM L0s Support */
+#define PCI_EXP_LNKCAP_ASPM_L1 0x00000800 /* ASPM L1 Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */
#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* Clock Power Management */
@@ -532,7 +550,7 @@
#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */
#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
-#define PCI_EXP_LNKCTL 16 /* Link Control */
+#define PCI_EXP_LNKCTL 0x10 /* Link Control */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
#define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */
#define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */
@@ -545,12 +563,14 @@
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */
-#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_LNKSTA 0x12 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
+#define PCI_EXP_LNKSTA_CLS_32_0GB 0x0005 /* Current Link Speed 32.0GT/s */
+#define PCI_EXP_LNKSTA_CLS_64_0GB 0x0006 /* Current Link Speed 64.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
#define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */
#define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */
@@ -563,7 +583,7 @@
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints with link end here */
-#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCAP 0x14 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */
@@ -576,7 +596,7 @@
#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */
#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */
#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */
-#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTCTL 0x18 /* Slot Control */
#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */
#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */
#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */
@@ -584,6 +604,7 @@
#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */
#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */
#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */
+#define PCI_EXP_SLTCTL_ATTN_IND_SHIFT 6 /* Attention Indicator shift */
#define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */
#define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */
#define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */
@@ -596,7 +617,9 @@
#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
-#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_SLTCTL_ASPL_DISABLE 0x2000 /* Auto Slot Power Limit Disable */
+#define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */
+#define PCI_EXP_SLTSTA 0x1a /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */
#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */
@@ -606,17 +629,18 @@
#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */
#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */
-#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCTL 0x1c /* Root Control */
#define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
-#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
-#define PCI_EXP_RTSTA 32 /* Root Status */
-#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
-#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
+#define PCI_EXP_RTSTA 0x20 /* Root Status */
+#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
+#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
+#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
* The Device Capabilities 2, Device Status 2, Device Control 2,
* Link Capabilities 2, Link Status 2, Link Control 2,
@@ -625,7 +649,7 @@
* Use pcie_capability_read_word() and similar interfaces to use them
* safely.
*/
-#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCAP2 0x24 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
@@ -636,38 +660,47 @@
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
-#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
-#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
+#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
-#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
-#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
+#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
#define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */
#define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */
#define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
-#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */
-#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */
-#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
+#define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */
+#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */
+#define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
+#define PCI_EXP_LNKCAP2_SLS_32_0GB 0x00000020 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCAP2_SLS_64_0GB 0x00000040 /* Supported Speed 64GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
-#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
-#define PCI_EXP_LNKCTL2_TLS 0x000f
-#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
-#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
-#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
-#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
-#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
-#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
-#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
-#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
+#define PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */
+#define PCI_EXP_LNKCTL2_TLS 0x000f
+#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
+#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
+#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCTL2_TLS_64_0GT 0x0006 /* Supported Speed 64GT/s */
+#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
+#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
+#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
+#define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+#define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
+#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+#define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
+#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
+#define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+#define PCI_EXP_SLTSTA2 0x3a /* Slot Status 2 */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -704,13 +737,18 @@
#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */
#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM
+#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
+#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
+#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
+#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
+#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */
-#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
+#define PCI_ERR_UNCOR_STATUS 0x04 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
@@ -728,11 +766,11 @@
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
-#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
+#define PCI_ERR_UNCOR_MASK 0x08 /* Uncorrectable Error Mask */
/* Same bits as above */
-#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
+#define PCI_ERR_UNCOR_SEVER 0x0c /* Uncorrectable Error Severity */
/* Same bits as above */
-#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */
+#define PCI_ERR_COR_STATUS 0x10 /* Correctable Error Status */
#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */
#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
@@ -741,74 +779,81 @@
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
-#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
+#define PCI_ERR_COR_MASK 0x14 /* Correctable Error Mask */
/* Same bits as above */
-#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
-#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */
+#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/
+#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */
#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
-#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
-#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
-#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
-#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_STATUS 48
-#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
-#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
-#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
-#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 /* Multiple FATAL/NONFATAL */
-#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */
-#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
-#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
-#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
-#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
+#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
+#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */
+#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
+#define PCI_ERR_ROOT_STATUS 0x30
+#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
+#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
+#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
+#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 /* Multiple FATAL/NONFATAL */
+#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */
+#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
+#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
+#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
+#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */
/* Virtual Channel */
-#define PCI_VC_PORT_CAP1 4
+#define PCI_VC_PORT_CAP1 0x04
#define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */
#define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */
#define PCI_VC_CAP1_ARB_SIZE 0x00000c00
-#define PCI_VC_PORT_CAP2 8
+#define PCI_VC_PORT_CAP2 0x08
#define PCI_VC_CAP2_32_PHASE 0x00000002
#define PCI_VC_CAP2_64_PHASE 0x00000004
#define PCI_VC_CAP2_128_PHASE 0x00000008
#define PCI_VC_CAP2_ARB_OFF 0xff000000
-#define PCI_VC_PORT_CTRL 12
+#define PCI_VC_PORT_CTRL 0x0c
#define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001
-#define PCI_VC_PORT_STATUS 14
+#define PCI_VC_PORT_STATUS 0x0e
#define PCI_VC_PORT_STATUS_TABLE 0x00000001
-#define PCI_VC_RES_CAP 16
+#define PCI_VC_RES_CAP 0x10
#define PCI_VC_RES_CAP_32_PHASE 0x00000002
#define PCI_VC_RES_CAP_64_PHASE 0x00000004
#define PCI_VC_RES_CAP_128_PHASE 0x00000008
#define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010
#define PCI_VC_RES_CAP_256_PHASE 0x00000020
#define PCI_VC_RES_CAP_ARB_OFF 0xff000000
-#define PCI_VC_RES_CTRL 20
+#define PCI_VC_RES_CTRL 0x14
#define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000
#define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000
#define PCI_VC_RES_CTRL_ID 0x07000000
#define PCI_VC_RES_CTRL_ENABLE 0x80000000
-#define PCI_VC_RES_STATUS 26
+#define PCI_VC_RES_STATUS 0x1a
#define PCI_VC_RES_STATUS_TABLE 0x00000001
#define PCI_VC_RES_STATUS_NEGO 0x00000002
#define PCI_CAP_VC_BASE_SIZEOF 0x10
-#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
+#define PCI_CAP_VC_PER_VC_SIZEOF 0x0c
/* Power Budgeting */
-#define PCI_PWR_DSR 4 /* Data Select Register */
-#define PCI_PWR_DATA 8 /* Data Register */
+#define PCI_PWR_DSR 0x04 /* Data Select Register */
+#define PCI_PWR_DATA 0x08 /* Data Register */
#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */
#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */
#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */
#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
-#define PCI_PWR_CAP 12 /* Capability */
+#define PCI_PWR_CAP 0x0c /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
-#define PCI_EXT_CAP_PWR_SIZEOF 16
+#define PCI_EXT_CAP_PWR_SIZEOF 0x10
+
+/* Root Complex Event Collector Endpoint Association */
+#define PCI_RCEC_RCIEP_BITMAP 4 /* Associated Bitmap for RCiEPs */
+#define PCI_RCEC_BUSN 8 /* RCEC Associated Bus Numbers */
+#define PCI_RCEC_BUSN_REG_VER 0x02 /* Least version with BUSN present */
+#define PCI_RCEC_BUSN_NEXT(x) (((x) >> 8) & 0xff)
+#define PCI_RCEC_BUSN_LAST(x) (((x) >> 16) & 0xff)
/* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */
#define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */
@@ -865,6 +910,7 @@
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
+#define PCI_ATS_CAP_PAGE_ALIGNED 0x0020 /* Page Aligned Request */
#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
@@ -873,38 +919,40 @@
/* Page Request Interface */
#define PCI_PRI_CTRL 0x04 /* PRI control register */
-#define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */
-#define PCI_PRI_CTRL_RESET 0x02 /* Reset */
+#define PCI_PRI_CTRL_ENABLE 0x0001 /* Enable */
+#define PCI_PRI_CTRL_RESET 0x0002 /* Reset */
#define PCI_PRI_STATUS 0x06 /* PRI status register */
-#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */
-#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */
-#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
+#define PCI_PRI_STATUS_RF 0x0001 /* Response Failure */
+#define PCI_PRI_STATUS_UPRGI 0x0002 /* Unexpected PRG index */
+#define PCI_PRI_STATUS_STOPPED 0x0100 /* PRI Stopped */
+#define PCI_PRI_STATUS_PASID 0x8000 /* PRG Response PASID Required */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
#define PCI_EXT_CAP_PRI_SIZEOF 16
/* Process Address Space ID */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
-#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
-#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */
+#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_WIDTH 0x1f00
#define PCI_PASID_CTRL 0x06 /* PASID control register */
-#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
-#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
-#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
+#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */
+#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */
+#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
-#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
+#define PCI_SRIOV_CAP_VFM 0x00000001 /* VF Migration Capable */
#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */
#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */
-#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */
-#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */
-#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */
-#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */
-#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */
+#define PCI_SRIOV_CTRL_VFE 0x0001 /* VF Enable */
+#define PCI_SRIOV_CTRL_VFM 0x0002 /* VF Migration Enable */
+#define PCI_SRIOV_CTRL_INTR 0x0004 /* VF Migration Interrupt Enable */
+#define PCI_SRIOV_CTRL_MSE 0x0008 /* VF Memory Space Enable */
+#define PCI_SRIOV_CTRL_ARI 0x0010 /* ARI Capable Hierarchy */
#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */
-#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */
+#define PCI_SRIOV_STATUS_VFM 0x0001 /* VF Migration Status */
#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */
#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */
@@ -923,24 +971,26 @@
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
-#define PCI_EXT_CAP_SRIOV_SIZEOF 64
+#define PCI_EXT_CAP_SRIOV_SIZEOF 0x40
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
+#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */
+#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */
#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */
#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
-#define PCI_ACS_SV 0x01 /* Source Validation */
-#define PCI_ACS_TB 0x02 /* Translation Blocking */
-#define PCI_ACS_RR 0x04 /* P2P Request Redirect */
-#define PCI_ACS_CR 0x08 /* P2P Completion Redirect */
-#define PCI_ACS_UF 0x10 /* Upstream Forwarding */
-#define PCI_ACS_EC 0x20 /* P2P Egress Control */
-#define PCI_ACS_DT 0x40 /* Direct Translated P2P */
+#define PCI_ACS_SV 0x0001 /* Source Validation */
+#define PCI_ACS_TB 0x0002 /* Translation Blocking */
+#define PCI_ACS_RR 0x0004 /* P2P Request Redirect */
+#define PCI_ACS_CR 0x0008 /* P2P Completion Redirect */
+#define PCI_ACS_UF 0x0010 /* Upstream Forwarding */
+#define PCI_ACS_EC 0x0020 /* P2P Egress Control */
+#define PCI_ACS_DT 0x0040 /* Direct Translated P2P */
#define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
@@ -976,12 +1026,12 @@
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
-#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
-#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
-#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */
+#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */
+#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */
/* Downstream Port Containment */
-#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */
#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */
#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
@@ -989,19 +1039,26 @@
#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
-#define PCI_EXP_DPC_CTL 6 /* DPC control */
-#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
-#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
-#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
+#define PCI_EXP_DPC_CTL 0x06 /* DPC control */
+#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
+#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
+#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
-#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
+#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */
+#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */
-#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */
@@ -1015,6 +1072,7 @@
/* Precision Time Measurement */
#define PCI_PTM_CAP 0x04 /* PTM Capability */
#define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */
+#define PCI_PTM_CAP_RES 0x00000002 /* Responder capable */
#define PCI_PTM_CAP_ROOT 0x00000004 /* Root capable */
#define PCI_PTM_GRANULARITY_MASK 0x0000FF00 /* Clock granularity */
#define PCI_PTM_CTRL 0x08 /* PTM Control */
@@ -1036,10 +1094,58 @@
#define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */
#define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */
#define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */
+#define PCI_L1SS_CTL1_L1_2_MASK 0x00000005
#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f
#define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */
#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
+#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */
+#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */
+
+/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */
+#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */
+#define PCI_DVSEC_HEADER1_VID(x) ((x) & 0xffff)
+#define PCI_DVSEC_HEADER1_REV(x) (((x) >> 16) & 0xf)
+#define PCI_DVSEC_HEADER1_LEN(x) (((x) >> 20) & 0xfff)
+#define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */
+#define PCI_DVSEC_HEADER2_ID(x) ((x) & 0xffff)
+
+/* Data Link Feature */
+#define PCI_DLF_CAP 0x04 /* Capabilities Register */
+#define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */
+
+/* Physical Layer 16.0 GT/s */
+#define PCI_PL_16GT_LE_CTRL 0x20 /* Lane Equalization Control Register */
+#define PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK 0x0000000F
+#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
+#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+
+/* Data Object Exchange */
+#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
+#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
+#define PCI_DOE_CAP_INT_MSG_NUM 0x00000ffe /* Interrupt Message Number */
+#define PCI_DOE_CTRL 0x08 /* DOE Control Register */
+#define PCI_DOE_CTRL_ABORT 0x00000001 /* DOE Abort */
+#define PCI_DOE_CTRL_INT_EN 0x00000002 /* DOE Interrupt Enable */
+#define PCI_DOE_CTRL_GO 0x80000000 /* DOE Go */
+#define PCI_DOE_STATUS 0x0c /* DOE Status Register */
+#define PCI_DOE_STATUS_BUSY 0x00000001 /* DOE Busy */
+#define PCI_DOE_STATUS_INT_STATUS 0x00000002 /* DOE Interrupt Status */
+#define PCI_DOE_STATUS_ERROR 0x00000004 /* DOE Error */
+#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */
+#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */
+#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */
+#define PCI_DOE_CAP_SIZEOF 0x18 /* Size of DOE register block */
+
+/* DOE Data Object - note not actually registers */
+#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_HEADER_1_TYPE 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH 0x0003ffff
+
+#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/standard-headers/linux/pvpanic.h b/include/standard-headers/linux/pvpanic.h
new file mode 100644
index 0000000000..54b7485390
--- /dev/null
+++ b/include/standard-headers/linux/pvpanic.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __PVPANIC_H__
+#define __PVPANIC_H__
+
+#define PVPANIC_PANICKED (1 << 0)
+#define PVPANIC_CRASH_LOADED (1 << 1)
+
+#endif /* __PVPANIC_H__ */
diff --git a/include/standard-headers/linux/udmabuf.h b/include/standard-headers/linux/udmabuf.h
new file mode 100644
index 0000000000..e19eb5b5ce
--- /dev/null
+++ b/include/standard-headers/linux/udmabuf.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_UDMABUF_H
+#define _LINUX_UDMABUF_H
+
+#include "standard-headers/linux/types.h"
+
+#define UDMABUF_FLAGS_CLOEXEC 0x01
+
+struct udmabuf_create {
+ uint32_t memfd;
+ uint32_t flags;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct udmabuf_create_item {
+ uint32_t memfd;
+ uint32_t __pad;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct udmabuf_create_list {
+ uint32_t flags;
+ uint32_t count;
+ struct udmabuf_create_item list[];
+};
+
+#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
+#define UDMABUF_CREATE_LIST _IOW('u', 0x43, struct udmabuf_create_list)
+
+#endif /* _LINUX_UDMABUF_H */
diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h
new file mode 100644
index 0000000000..fd54044936
--- /dev/null
+++ b/include/standard-headers/linux/vhost_types.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_VHOST_TYPES_H
+#define _LINUX_VHOST_TYPES_H
+/* Userspace interface for in-kernel virtio accelerators. */
+
+/* vhost is used to reduce the number of system calls involved in virtio.
+ *
+ * Existing virtio net code is used in the guest without modification.
+ *
+ * This header includes interface used by userspace hypervisor for
+ * device configuration.
+ */
+
+#include "standard-headers/linux/types.h"
+
+#include "standard-headers/linux/virtio_config.h"
+#include "standard-headers/linux/virtio_ring.h"
+
+struct vhost_vring_state {
+ unsigned int index;
+ unsigned int num;
+};
+
+struct vhost_vring_file {
+ unsigned int index;
+ int fd; /* Pass -1 to unbind from file. */
+
+};
+
+struct vhost_vring_addr {
+ unsigned int index;
+ /* Option flags. */
+ unsigned int flags;
+ /* Flag values: */
+ /* Whether log address is valid. If set enables logging. */
+#define VHOST_VRING_F_LOG 0
+
+ /* Start of array of descriptors (virtually contiguous) */
+ uint64_t desc_user_addr;
+ /* Used structure address. Must be 32 bit aligned */
+ uint64_t used_user_addr;
+ /* Available structure address. Must be 16 bit aligned */
+ uint64_t avail_user_addr;
+ /* Logging support. */
+ /* Log writes to used structure, at offset calculated from specified
+ * address. Address must be 32 bit aligned. */
+ uint64_t log_guest_addr;
+};
+
+struct vhost_worker_state {
+ /*
+ * For VHOST_NEW_WORKER the kernel will return the new vhost_worker id.
+ * For VHOST_FREE_WORKER this must be set to the id of the vhost_worker
+ * to free.
+ */
+ unsigned int worker_id;
+};
+
+struct vhost_vring_worker {
+ /* vring index */
+ unsigned int index;
+ /* The id of the vhost_worker returned from VHOST_NEW_WORKER */
+ unsigned int worker_id;
+};
+
+/* no alignment requirement */
+struct vhost_iotlb_msg {
+ uint64_t iova;
+ uint64_t size;
+ uint64_t uaddr;
+#define VHOST_ACCESS_RO 0x1
+#define VHOST_ACCESS_WO 0x2
+#define VHOST_ACCESS_RW 0x3
+ uint8_t perm;
+#define VHOST_IOTLB_MISS 1
+#define VHOST_IOTLB_UPDATE 2
+#define VHOST_IOTLB_INVALIDATE 3
+#define VHOST_IOTLB_ACCESS_FAIL 4
+/*
+ * VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
+ * multiple mappings in one go: beginning with
+ * VHOST_IOTLB_BATCH_BEGIN, followed by any number of
+ * VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
+ * When one of these two values is used as the message type, the rest
+ * of the fields in the message are ignored. There's no guarantee that
+ * these changes take place automatically in the device.
+ */
+#define VHOST_IOTLB_BATCH_BEGIN 5
+#define VHOST_IOTLB_BATCH_END 6
+ uint8_t type;
+};
+
+#define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
+
+struct vhost_msg {
+ int type;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ uint8_t padding[64];
+ };
+};
+
+struct vhost_msg_v2 {
+ uint32_t type;
+ uint32_t asid;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ uint8_t padding[64];
+ };
+};
+
+struct vhost_memory_region {
+ uint64_t guest_phys_addr;
+ uint64_t memory_size; /* bytes */
+ uint64_t userspace_addr;
+ uint64_t flags_padding; /* No flags are currently specified. */
+};
+
+/* All region addresses and sizes must be 4K aligned. */
+#define VHOST_PAGE_SIZE 0x1000
+
+struct vhost_memory {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_memory_region regions[];
+};
+
+/* VHOST_SCSI specific definitions */
+
+/*
+ * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
+ *
+ * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
+ * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
+ * ABI Rev 1: January 2013. Ignore vhost_tpgt field in struct vhost_scsi_target.
+ * All the targets under vhost_wwpn can be seen and used by guset.
+ */
+
+#define VHOST_SCSI_ABI_VERSION 1
+
+struct vhost_scsi_target {
+ int abi_version;
+ char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */
+ unsigned short vhost_tpgt;
+ unsigned short reserved;
+};
+
+/* VHOST_VDPA specific definitions */
+
+struct vhost_vdpa_config {
+ uint32_t off;
+ uint32_t len;
+ uint8_t buf[];
+};
+
+/* vhost vdpa IOVA range
+ * @first: First address that can be mapped by vhost-vDPA
+ * @last: Last address that can be mapped by vhost-vDPA
+ */
+struct vhost_vdpa_iova_range {
+ uint64_t first;
+ uint64_t last;
+};
+
+/* Feature bits */
+/* Log all write descriptors. Can be changed while device is active. */
+#define VHOST_F_LOG_ALL 26
+/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
+#define VHOST_NET_F_VIRTIO_NET_HDR 27
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
+/* IOTLB can accept address space identifier through V2 type of IOTLB
+ * message
+ */
+#define VHOST_BACKEND_F_IOTLB_ASID 0x3
+/* Device can be suspended */
+#define VHOST_BACKEND_F_SUSPEND 0x4
+/* Device can be resumed */
+#define VHOST_BACKEND_F_RESUME 0x5
+/* Device supports the driver enabling virtqueues both before and after
+ * DRIVER_OK
+ */
+#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
+/* Device may expose the virtqueue's descriptor area, driver area and
+ * device area to a different group for ASID binding than where its
+ * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
+ */
+#define VHOST_BACKEND_F_DESC_ASID 0x7
+/* IOTLB don't flush memory mapping across device reset */
+#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8
+
+#endif
diff --git a/include/standard-headers/linux/virtio_9p.h b/include/standard-headers/linux/virtio_9p.h
index e68f71dbe6..da61dee98c 100644
--- a/include/standard-headers/linux/virtio_9p.h
+++ b/include/standard-headers/linux/virtio_9p.h
@@ -25,7 +25,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
-#include "standard-headers/linux/types.h"
+#include "standard-headers/linux/virtio_types.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_config.h"
@@ -36,9 +36,9 @@
struct virtio_9p_config {
/* length of the tag name */
- uint16_t tag_len;
+ __virtio16 tag_len;
/* non-NULL terminated tag name */
- uint8_t tag[0];
+ uint8_t tag[];
} QEMU_PACKED;
#endif /* _LINUX_VIRTIO_9P_H */
diff --git a/include/standard-headers/linux/virtio_balloon.h b/include/standard-headers/linux/virtio_balloon.h
index 4dbb7dc6c0..f343bfefd8 100644
--- a/include/standard-headers/linux/virtio_balloon.h
+++ b/include/standard-headers/linux/virtio_balloon.h
@@ -34,15 +34,31 @@
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
+#define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */
+#define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */
+#define VIRTIO_BALLOON_F_REPORTING 5 /* Page reporting virtqueue */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
+#define VIRTIO_BALLOON_CMD_ID_STOP 0
+#define VIRTIO_BALLOON_CMD_ID_DONE 1
struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */
uint32_t num_pages;
/* Number of pages we've actually got in balloon. */
uint32_t actual;
+ /*
+ * Free page hint command id, readonly by guest.
+ * Was previously named free_page_report_cmd_id so we
+ * need to carry that name for legacy support.
+ */
+ union {
+ uint32_t free_page_hint_cmd_id;
+ uint32_t free_page_report_cmd_id; /* deprecated */
+ };
+ /* Stores PAGE_POISON if page poisoning is in use */
+ uint32_t poison_val;
};
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
diff --git a/include/standard-headers/linux/virtio_blk.h b/include/standard-headers/linux/virtio_blk.h
index ab16ec5fd2..d7be3cf5e4 100644
--- a/include/standard-headers/linux/virtio_blk.h
+++ b/include/standard-headers/linux/virtio_blk.h
@@ -38,6 +38,10 @@
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
+#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */
+#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */
+#define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */
+#define VIRTIO_BLK_F_ZONED 17 /* Zoned block device */
/* Legacy feature bits */
#ifndef VIRTIO_BLK_NO_LEGACY
@@ -53,20 +57,20 @@
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
- uint64_t capacity;
+ __virtio64 capacity;
/* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
- uint32_t size_max;
+ __virtio32 size_max;
/* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
- uint32_t seg_max;
+ __virtio32 seg_max;
/* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
struct virtio_blk_geometry {
- uint16_t cylinders;
+ __virtio16 cylinders;
uint8_t heads;
uint8_t sectors;
} geometry;
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
- uint32_t blk_size;
+ __virtio32 blk_size;
/* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */
/* exponent for physical block per logical block. */
@@ -74,16 +78,74 @@ struct virtio_blk_config {
/* alignment offset in logical blocks. */
uint8_t alignment_offset;
/* minimum I/O size without performance penalty in logical blocks. */
- uint16_t min_io_size;
+ __virtio16 min_io_size;
/* optimal sustained I/O size in logical blocks. */
- uint32_t opt_io_size;
+ __virtio32 opt_io_size;
/* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
uint8_t wce;
uint8_t unused;
/* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
- uint16_t num_queues;
+ __virtio16 num_queues;
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_DISCARD */
+ /*
+ * The maximum discard sectors (in 512-byte sectors) for
+ * one segment.
+ */
+ __virtio32 max_discard_sectors;
+ /*
+ * The maximum number of discard segments in a
+ * discard command.
+ */
+ __virtio32 max_discard_seg;
+ /* Discard commands must be aligned to this number of sectors. */
+ __virtio32 discard_sector_alignment;
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_WRITE_ZEROES */
+ /*
+ * The maximum number of write zeroes sectors (in 512-byte sectors) in
+ * one segment.
+ */
+ __virtio32 max_write_zeroes_sectors;
+ /*
+ * The maximum number of segments in a write zeroes
+ * command.
+ */
+ __virtio32 max_write_zeroes_seg;
+ /*
+ * Set if a VIRTIO_BLK_T_WRITE_ZEROES request may result in the
+ * deallocation of one or more of the sectors.
+ */
+ uint8_t write_zeroes_may_unmap;
+
+ uint8_t unused1[3];
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_SECURE_ERASE */
+ /*
+ * The maximum secure erase sectors (in 512-byte sectors) for
+ * one segment.
+ */
+ __virtio32 max_secure_erase_sectors;
+ /*
+ * The maximum number of secure erase segments in a
+ * secure erase command.
+ */
+ __virtio32 max_secure_erase_seg;
+ /* Secure erase commands must be aligned to this number of sectors. */
+ __virtio32 secure_erase_sector_alignment;
+
+ /* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */
+ struct virtio_blk_zoned_characteristics {
+ __virtio32 zone_sectors;
+ __virtio32 max_open_zones;
+ __virtio32 max_active_zones;
+ __virtio32 max_append_sectors;
+ __virtio32 write_granularity;
+ uint8_t model;
+ uint8_t unused2[3];
+ } zoned;
} QEMU_PACKED;
/*
@@ -112,6 +174,36 @@ struct virtio_blk_config {
/* Get device ID command */
#define VIRTIO_BLK_T_GET_ID 8
+/* Discard command */
+#define VIRTIO_BLK_T_DISCARD 11
+
+/* Write zeroes command */
+#define VIRTIO_BLK_T_WRITE_ZEROES 13
+
+/* Secure erase command */
+#define VIRTIO_BLK_T_SECURE_ERASE 14
+
+/* Zone append command */
+#define VIRTIO_BLK_T_ZONE_APPEND 15
+
+/* Report zones command */
+#define VIRTIO_BLK_T_ZONE_REPORT 16
+
+/* Open zone command */
+#define VIRTIO_BLK_T_ZONE_OPEN 18
+
+/* Close zone command */
+#define VIRTIO_BLK_T_ZONE_CLOSE 20
+
+/* Finish zone command */
+#define VIRTIO_BLK_T_ZONE_FINISH 22
+
+/* Reset zone command */
+#define VIRTIO_BLK_T_ZONE_RESET 24
+
+/* Reset All zones command */
+#define VIRTIO_BLK_T_ZONE_RESET_ALL 26
+
#ifndef VIRTIO_BLK_NO_LEGACY
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
@@ -131,6 +223,85 @@ struct virtio_blk_outhdr {
__virtio64 sector;
};
+/*
+ * Supported zoned device models.
+ */
+
+/* Regular block device */
+#define VIRTIO_BLK_Z_NONE 0
+/* Host-managed zoned device */
+#define VIRTIO_BLK_Z_HM 1
+/* Host-aware zoned device */
+#define VIRTIO_BLK_Z_HA 2
+
+/*
+ * Zone descriptor. A part of VIRTIO_BLK_T_ZONE_REPORT command reply.
+ */
+struct virtio_blk_zone_descriptor {
+ /* Zone capacity */
+ __virtio64 z_cap;
+ /* The starting sector of the zone */
+ __virtio64 z_start;
+ /* Zone write pointer position in sectors */
+ __virtio64 z_wp;
+ /* Zone type */
+ uint8_t z_type;
+ /* Zone state */
+ uint8_t z_state;
+ uint8_t reserved[38];
+};
+
+struct virtio_blk_zone_report {
+ __virtio64 nr_zones;
+ uint8_t reserved[56];
+ struct virtio_blk_zone_descriptor zones[];
+};
+
+/*
+ * Supported zone types.
+ */
+
+/* Conventional zone */
+#define VIRTIO_BLK_ZT_CONV 1
+/* Sequential Write Required zone */
+#define VIRTIO_BLK_ZT_SWR 2
+/* Sequential Write Preferred zone */
+#define VIRTIO_BLK_ZT_SWP 3
+
+/*
+ * Zone states that are available for zones of all types.
+ */
+
+/* Not a write pointer (conventional zones only) */
+#define VIRTIO_BLK_ZS_NOT_WP 0
+/* Empty */
+#define VIRTIO_BLK_ZS_EMPTY 1
+/* Implicitly Open */
+#define VIRTIO_BLK_ZS_IOPEN 2
+/* Explicitly Open */
+#define VIRTIO_BLK_ZS_EOPEN 3
+/* Closed */
+#define VIRTIO_BLK_ZS_CLOSED 4
+/* Read-Only */
+#define VIRTIO_BLK_ZS_RDONLY 13
+/* Full */
+#define VIRTIO_BLK_ZS_FULL 14
+/* Offline */
+#define VIRTIO_BLK_ZS_OFFLINE 15
+
+/* Unmap this range (only valid for write zeroes command) */
+#define VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP 0x00000001
+
+/* Discard/write zeroes range for each request. */
+struct virtio_blk_discard_write_zeroes {
+ /* discard/write zeroes start sector */
+ uint64_t sector;
+ /* number of discard/write zeroes sectors */
+ uint32_t num_sectors;
+ /* flags for this range */
+ uint32_t flags;
+};
+
#ifndef VIRTIO_BLK_NO_LEGACY
struct virtio_scsi_inhdr {
__virtio32 errors;
@@ -144,4 +315,11 @@ struct virtio_scsi_inhdr {
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
#define VIRTIO_BLK_S_UNSUPP 2
+
+/* Error codes that are specific to zoned block devices */
+#define VIRTIO_BLK_S_ZONE_INVALID_CMD 3
+#define VIRTIO_BLK_S_ZONE_UNALIGNED_WP 4
+#define VIRTIO_BLK_S_ZONE_OPEN_RESOURCE 5
+#define VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE 6
+
#endif /* _LINUX_VIRTIO_BLK_H */
diff --git a/include/standard-headers/linux/virtio_bt.h b/include/standard-headers/linux/virtio_bt.h
new file mode 100644
index 0000000000..a11ecc3f92
--- /dev/null
+++ b/include/standard-headers/linux/virtio_bt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef _LINUX_VIRTIO_BT_H
+#define _LINUX_VIRTIO_BT_H
+
+#include "standard-headers/linux/virtio_types.h"
+
+/* Feature bits */
+#define VIRTIO_BT_F_VND_HCI 0 /* Indicates vendor command support */
+#define VIRTIO_BT_F_MSFT_EXT 1 /* Indicates MSFT vendor support */
+#define VIRTIO_BT_F_AOSP_EXT 2 /* Indicates AOSP vendor support */
+#define VIRTIO_BT_F_CONFIG_V2 3 /* Use second version configuration */
+
+enum virtio_bt_config_type {
+ VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0,
+ VIRTIO_BT_CONFIG_TYPE_AMP = 1,
+};
+
+enum virtio_bt_config_vendor {
+ VIRTIO_BT_CONFIG_VENDOR_NONE = 0,
+ VIRTIO_BT_CONFIG_VENDOR_ZEPHYR = 1,
+ VIRTIO_BT_CONFIG_VENDOR_INTEL = 2,
+ VIRTIO_BT_CONFIG_VENDOR_REALTEK = 3,
+};
+
+struct virtio_bt_config {
+ uint8_t type;
+ uint16_t vendor;
+ uint16_t msft_opcode;
+} QEMU_PACKED;
+
+struct virtio_bt_config_v2 {
+ uint8_t type;
+ uint8_t alignment;
+ uint16_t vendor;
+ uint16_t msft_opcode;
+};
+
+#endif /* _LINUX_VIRTIO_BT_H */
diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h
index 0b194365a0..45be0fa1bc 100644
--- a/include/standard-headers/linux/virtio_config.h
+++ b/include/standard-headers/linux/virtio_config.h
@@ -52,7 +52,7 @@
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 38
+#define VIRTIO_TRANSPORT_F_END 42
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -67,16 +67,55 @@
#define VIRTIO_F_VERSION_1 32
/*
- * If clear - device has the IOMMU bypass quirk feature.
- * If set - use platform tools to detect the IOMMU.
+ * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature.
+ * If set - use platform DMA tools to access the memory.
*
* Note the reverse polarity (compared to most other features),
* this is for compatibility with legacy systems.
*/
-#define VIRTIO_F_IOMMU_PLATFORM 33
+#define VIRTIO_F_ACCESS_PLATFORM 33
+/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */
+#define VIRTIO_F_IOMMU_PLATFORM VIRTIO_F_ACCESS_PLATFORM
+
+/* This feature indicates support for the packed virtqueue layout. */
+#define VIRTIO_F_RING_PACKED 34
+
+/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
+/*
+ * This feature indicates that memory accesses by the driver and the
+ * device are ordered in a way described by the platform.
+ */
+#define VIRTIO_F_ORDER_PLATFORM 36
/*
* Does the device support Single Root I/O Virtualization?
*/
#define VIRTIO_F_SR_IOV 37
+
+/*
+ * This feature indicates that the driver passes extra data (besides
+ * identifying the virtqueue) in its device notifications.
+ */
+#define VIRTIO_F_NOTIFICATION_DATA 38
+
+/* This feature indicates that the driver uses the data provided by the device
+ * as a virtqueue identifier in available buffer notifications.
+ */
+#define VIRTIO_F_NOTIF_CONFIG_DATA 39
+
+/*
+ * This feature indicates that the driver can reset a queue individually.
+ */
+#define VIRTIO_F_RING_RESET 40
+
+/*
+ * This feature indicates that the device support administration virtqueues.
+ */
+#define VIRTIO_F_ADMIN_VQ 41
+
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/standard-headers/linux/virtio_console.h b/include/standard-headers/linux/virtio_console.h
index 0dedc9e6f5..71f5f648e3 100644
--- a/include/standard-headers/linux/virtio_console.h
+++ b/include/standard-headers/linux/virtio_console.h
@@ -45,13 +45,13 @@
struct virtio_console_config {
/* colums of the screens */
- uint16_t cols;
+ __virtio16 cols;
/* rows of the screens */
- uint16_t rows;
+ __virtio16 rows;
/* max. number of ports this device can hold */
- uint32_t max_nr_ports;
+ __virtio32 max_nr_ports;
/* emergency write register */
- uint32_t emerg_wr;
+ __virtio32 emerg_wr;
} QEMU_PACKED;
/*
diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h
index 5ff0b4ee59..68066dafb6 100644
--- a/include/standard-headers/linux/virtio_crypto.h
+++ b/include/standard-headers/linux/virtio_crypto.h
@@ -37,6 +37,7 @@
#define VIRTIO_CRYPTO_SERVICE_HASH 1
#define VIRTIO_CRYPTO_SERVICE_MAC 2
#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
uint32_t opcode;
uint32_t algo;
uint32_t flag;
@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req {
uint8_t padding[32];
};
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+ uint32_t padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH 0
+#define VIRTIO_CRYPTO_RSA_MD2 1
+#define VIRTIO_CRYPTO_RSA_MD3 2
+#define VIRTIO_CRYPTO_RSA_MD4 3
+#define VIRTIO_CRYPTO_RSA_MD5 4
+#define VIRTIO_CRYPTO_RSA_SHA1 5
+#define VIRTIO_CRYPTO_RSA_SHA256 6
+#define VIRTIO_CRYPTO_RSA_SHA384 7
+#define VIRTIO_CRYPTO_RSA_SHA512 8
+#define VIRTIO_CRYPTO_RSA_SHA224 9
+ uint32_t hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+ uint32_t curve_id;
+ uint32_t padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER 0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA 1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA 2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+ uint32_t algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+ uint32_t keytype;
+ uint32_t keylen;
+
+ union {
+ struct virtio_crypto_rsa_session_para rsa;
+ struct virtio_crypto_ecdsa_session_para ecdsa;
+ } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+ struct virtio_crypto_akcipher_session_para para;
+ uint8_t padding[36];
+};
+
struct virtio_crypto_alg_chain_session_para {
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req {
mac_create_session;
struct virtio_crypto_aead_create_session_req
aead_create_session;
+ struct virtio_crypto_akcipher_create_session_req
+ akcipher_create_session;
struct virtio_crypto_destroy_session_req
destroy_session;
uint8_t padding[56];
@@ -266,6 +325,14 @@ struct virtio_crypto_op_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
#define VIRTIO_CRYPTO_AEAD_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
uint32_t opcode;
/* algo should be service-specific algorithms */
uint32_t algo;
@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req {
uint8_t padding[32];
};
+struct virtio_crypto_akcipher_para {
+ uint32_t src_data_len;
+ uint32_t dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+ struct virtio_crypto_akcipher_para para;
+ uint8_t padding[40];
+};
+
/* The request of the data virtqueue's packet */
struct virtio_crypto_op_data_req {
struct virtio_crypto_op_header header;
@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req {
struct virtio_crypto_hash_data_req hash_req;
struct virtio_crypto_mac_data_req mac_req;
struct virtio_crypto_aead_data_req aead_req;
+ struct virtio_crypto_akcipher_data_req akcipher_req;
uint8_t padding[48];
} u;
};
@@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req {
#define VIRTIO_CRYPTO_BADMSG 2
#define VIRTIO_CRYPTO_NOTSUPP 3
#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
/* The accelerator hardware is ready */
#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
@@ -438,7 +518,7 @@ struct virtio_crypto_config {
uint32_t max_cipher_key_len;
/* Maximum length of authenticated key */
uint32_t max_auth_key_len;
- uint32_t reserve;
+ uint32_t akcipher_algo;
/* Maximum size of each crypto request's content */
uint64_t max_size;
};
diff --git a/include/standard-headers/linux/virtio_fs.h b/include/standard-headers/linux/virtio_fs.h
new file mode 100644
index 0000000000..a32fe8a64c
--- /dev/null
+++ b/include/standard-headers/linux/virtio_fs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+
+#ifndef _LINUX_VIRTIO_FS_H
+#define _LINUX_VIRTIO_FS_H
+
+#include "standard-headers/linux/types.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_config.h"
+#include "standard-headers/linux/virtio_types.h"
+
+struct virtio_fs_config {
+ /* Filesystem name (UTF-8, not NUL-terminated, padded with NULs) */
+ uint8_t tag[36];
+
+ /* Number of request queues */
+ uint32_t num_request_queues;
+} QEMU_PACKED;
+
+/* For the id field in virtio_pci_shm_cap */
+#define VIRTIO_FS_SHMCAP_ID_CACHE 0
+
+#endif /* _LINUX_VIRTIO_FS_H */
diff --git a/include/standard-headers/linux/virtio_gpio.h b/include/standard-headers/linux/virtio_gpio.h
new file mode 100644
index 0000000000..2b5cf06349
--- /dev/null
+++ b/include/standard-headers/linux/virtio_gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _LINUX_VIRTIO_GPIO_H
+#define _LINUX_VIRTIO_GPIO_H
+
+#include "standard-headers/linux/types.h"
+
+/* Virtio GPIO Feature bits */
+#define VIRTIO_GPIO_F_IRQ 0
+
+/* Virtio GPIO request types */
+#define VIRTIO_GPIO_MSG_GET_NAMES 0x0001
+#define VIRTIO_GPIO_MSG_GET_DIRECTION 0x0002
+#define VIRTIO_GPIO_MSG_SET_DIRECTION 0x0003
+#define VIRTIO_GPIO_MSG_GET_VALUE 0x0004
+#define VIRTIO_GPIO_MSG_SET_VALUE 0x0005
+#define VIRTIO_GPIO_MSG_IRQ_TYPE 0x0006
+
+/* Possible values of the status field */
+#define VIRTIO_GPIO_STATUS_OK 0x0
+#define VIRTIO_GPIO_STATUS_ERR 0x1
+
+/* Direction types */
+#define VIRTIO_GPIO_DIRECTION_NONE 0x00
+#define VIRTIO_GPIO_DIRECTION_OUT 0x01
+#define VIRTIO_GPIO_DIRECTION_IN 0x02
+
+/* Virtio GPIO IRQ types */
+#define VIRTIO_GPIO_IRQ_TYPE_NONE 0x00
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
+struct virtio_gpio_config {
+ uint16_t ngpio;
+ uint8_t padding[2];
+ uint32_t gpio_names_size;
+};
+
+/* Virtio GPIO Request / Response */
+struct virtio_gpio_request {
+ uint16_t type;
+ uint16_t gpio;
+ uint32_t value;
+};
+
+struct virtio_gpio_response {
+ uint8_t status;
+ uint8_t value;
+};
+
+struct virtio_gpio_response_get_names {
+ uint8_t status;
+ uint8_t value[];
+};
+
+/* Virtio GPIO IRQ Request / Response */
+struct virtio_gpio_irq_request {
+ uint16_t gpio;
+};
+
+struct virtio_gpio_irq_response {
+ uint8_t status;
+};
+
+/* Possible values of the interrupt status field */
+#define VIRTIO_GPIO_IRQ_STATUS_INVALID 0x0
+#define VIRTIO_GPIO_IRQ_STATUS_VALID 0x1
+
+#endif /* _LINUX_VIRTIO_GPIO_H */
diff --git a/include/standard-headers/linux/virtio_gpu.h b/include/standard-headers/linux/virtio_gpu.h
index 52a830dcf8..2db643ed8f 100644
--- a/include/standard-headers/linux/virtio_gpu.h
+++ b/include/standard-headers/linux/virtio_gpu.h
@@ -40,7 +40,30 @@
#include "standard-headers/linux/types.h"
-#define VIRTIO_GPU_F_VIRGL 0
+/*
+ * VIRTIO_GPU_CMD_CTX_*
+ * VIRTIO_GPU_CMD_*_3D
+ */
+#define VIRTIO_GPU_F_VIRGL 0
+
+/*
+ * VIRTIO_GPU_CMD_GET_EDID
+ */
+#define VIRTIO_GPU_F_EDID 1
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
+ */
+#define VIRTIO_GPU_F_RESOURCE_UUID 2
+
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
+ */
+#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+/*
+ * VIRTIO_GPU_CMD_CREATE_CONTEXT with
+ * context_init and multiple timelines
+ */
+#define VIRTIO_GPU_F_CONTEXT_INIT 4
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -56,6 +79,10 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
+ VIRTIO_GPU_CMD_GET_EDID,
+ VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
+ VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -66,6 +93,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
+ VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
+ VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -76,6 +105,9 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
+ VIRTIO_GPU_RESP_OK_EDID,
+ VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
+ VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -86,14 +118,29 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
-#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+enum virtio_gpu_shm_id {
+ VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
+ /*
+ * VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB
+ * VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB
+ */
+ VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
+};
+
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+/*
+ * If the following flag is set, then ring_idx contains the index
+ * of the command ring that needs to used when creating the fence
+ */
+#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
struct virtio_gpu_ctrl_hdr {
uint32_t type;
uint32_t flags;
uint64_t fence_id;
uint32_t ctx_id;
- uint32_t padding;
+ uint8_t ring_idx;
+ uint8_t padding[3];
};
/* data passed in the cursor vq */
@@ -233,10 +280,11 @@ struct virtio_gpu_resource_create_3d {
};
/* VIRTIO_GPU_CMD_CTX_CREATE */
+#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t nlen;
- uint32_t padding;
+ uint32_t context_init;
char debug_name[64];
};
@@ -261,6 +309,8 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
+/* 3 is reserved for gfxstream */
+#define VIRTIO_GPU_CAPSET_VENUS 4
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
@@ -291,6 +341,21 @@ struct virtio_gpu_resp_capset {
uint8_t capset_data[];
};
+/* VIRTIO_GPU_CMD_GET_EDID */
+struct virtio_gpu_cmd_get_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t scanout;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_EDID */
+struct virtio_gpu_resp_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t size;
+ uint32_t padding;
+ uint8_t edid[1024];
+};
+
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
@@ -314,4 +379,80 @@ enum virtio_gpu_formats {
VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
};
+/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */
+struct virtio_gpu_resource_assign_uuid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */
+struct virtio_gpu_resp_resource_uuid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint8_t uuid[16];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
+struct virtio_gpu_resource_create_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
+#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob mem */
+ uint32_t blob_mem;
+ uint32_t blob_flags;
+ uint32_t nr_entries;
+ uint64_t blob_id;
+ uint64_t size;
+ /*
+ * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
+ */
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
+struct virtio_gpu_set_scanout_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ uint32_t scanout_id;
+ uint32_t resource_id;
+ uint32_t width;
+ uint32_t height;
+ uint32_t format;
+ uint32_t padding;
+ uint32_t strides[4];
+ uint32_t offsets[4];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
+struct virtio_gpu_resource_map_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t padding;
+ uint64_t offset;
+};
+
+/* VIRTIO_GPU_RESP_OK_MAP_INFO */
+#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
+#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
+#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
+#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
+#define VIRTIO_GPU_MAP_CACHE_WC 0x03
+struct virtio_gpu_resp_map_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t map_info;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
+struct virtio_gpu_resource_unmap_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t padding;
+};
+
#endif
diff --git a/include/standard-headers/linux/virtio_i2c.h b/include/standard-headers/linux/virtio_i2c.h
new file mode 100644
index 0000000000..09fa907793
--- /dev/null
+++ b/include/standard-headers/linux/virtio_i2c.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/*
+ * Definitions for virtio I2C Adpter
+ *
+ * Copyright (c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _LINUX_VIRTIO_I2C_H
+#define _LINUX_VIRTIO_I2C_H
+
+#include "standard-headers/linux/const.h"
+#include "standard-headers/linux/types.h"
+
+/* Virtio I2C Feature bits */
+#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0
+
+/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */
+#define VIRTIO_I2C_FLAGS_FAIL_NEXT _BITUL(0)
+
+/* The bit 1 of the @virtio_i2c_out_hdr.@flags, used to mark a buffer as read */
+#define VIRTIO_I2C_FLAGS_M_RD _BITUL(1)
+
+/**
+ * struct virtio_i2c_out_hdr - the virtio I2C message OUT header
+ * @addr: the controlled device address
+ * @padding: used to pad to full dword
+ * @flags: used for feature extensibility
+ */
+struct virtio_i2c_out_hdr {
+ uint16_t addr;
+ uint16_t padding;
+ uint32_t flags;
+};
+
+/**
+ * struct virtio_i2c_in_hdr - the virtio I2C message IN header
+ * @status: the processing result from the backend
+ */
+struct virtio_i2c_in_hdr {
+ uint8_t status;
+};
+
+/* The final status written by the device */
+#define VIRTIO_I2C_MSG_OK 0
+#define VIRTIO_I2C_MSG_ERR 1
+
+#endif /* _LINUX_VIRTIO_I2C_H */
diff --git a/include/standard-headers/linux/virtio_ids.h b/include/standard-headers/linux/virtio_ids.h
index 6d5c3b2d4f..7aa2eb7662 100644
--- a/include/standard-headers/linux/virtio_ids.h
+++ b/include/standard-headers/linux/virtio_ids.h
@@ -29,19 +29,56 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
-#define VIRTIO_ID_NET 1 /* virtio net */
-#define VIRTIO_ID_BLOCK 2 /* virtio block */
-#define VIRTIO_ID_CONSOLE 3 /* virtio console */
-#define VIRTIO_ID_RNG 4 /* virtio rng */
-#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
-#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
-#define VIRTIO_ID_SCSI 8 /* virtio scsi */
-#define VIRTIO_ID_9P 9 /* 9p virtio console */
-#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
-#define VIRTIO_ID_CAIF 12 /* Virtio caif */
-#define VIRTIO_ID_GPU 16 /* virtio GPU */
-#define VIRTIO_ID_INPUT 18 /* virtio input */
-#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
-#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
+#define VIRTIO_ID_NET 1 /* virtio net */
+#define VIRTIO_ID_BLOCK 2 /* virtio block */
+#define VIRTIO_ID_CONSOLE 3 /* virtio console */
+#define VIRTIO_ID_RNG 4 /* virtio rng */
+#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
+#define VIRTIO_ID_IOMEM 6 /* virtio ioMemory */
+#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
+#define VIRTIO_ID_SCSI 8 /* virtio scsi */
+#define VIRTIO_ID_9P 9 /* 9p virtio console */
+#define VIRTIO_ID_MAC80211_WLAN 10 /* virtio WLAN MAC */
+#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
+#define VIRTIO_ID_CAIF 12 /* Virtio caif */
+#define VIRTIO_ID_MEMORY_BALLOON 13 /* virtio memory balloon */
+#define VIRTIO_ID_GPU 16 /* virtio GPU */
+#define VIRTIO_ID_CLOCK 17 /* virtio clock/timer */
+#define VIRTIO_ID_INPUT 18 /* virtio input */
+#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
+#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
+#define VIRTIO_ID_SIGNAL_DIST 21 /* virtio signal distribution device */
+#define VIRTIO_ID_PSTORE 22 /* virtio pstore device */
+#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
+#define VIRTIO_ID_MEM 24 /* virtio mem */
+#define VIRTIO_ID_SOUND 25 /* virtio sound */
+#define VIRTIO_ID_FS 26 /* virtio filesystem */
+#define VIRTIO_ID_PMEM 27 /* virtio pmem */
+#define VIRTIO_ID_RPMB 28 /* virtio rpmb */
+#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_VIDEO_ENCODER 30 /* virtio video encoder */
+#define VIRTIO_ID_VIDEO_DECODER 31 /* virtio video decoder */
+#define VIRTIO_ID_SCMI 32 /* virtio SCMI */
+#define VIRTIO_ID_NITRO_SEC_MOD 33 /* virtio nitro secure module*/
+#define VIRTIO_ID_I2C_ADAPTER 34 /* virtio i2c adapter */
+#define VIRTIO_ID_WATCHDOG 35 /* virtio watchdog */
+#define VIRTIO_ID_CAN 36 /* virtio can */
+#define VIRTIO_ID_DMABUF 37 /* virtio dmabuf */
+#define VIRTIO_ID_PARAM_SERV 38 /* virtio parameter server */
+#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */
+#define VIRTIO_ID_BT 40 /* virtio bluetooth */
+#define VIRTIO_ID_GPIO 41 /* virtio gpio */
+
+/*
+ * Virtio Transitional IDs
+ */
+
+#define VIRTIO_TRANS_ID_NET 0x1000 /* transitional virtio net */
+#define VIRTIO_TRANS_ID_BLOCK 0x1001 /* transitional virtio block */
+#define VIRTIO_TRANS_ID_BALLOON 0x1002 /* transitional virtio balloon */
+#define VIRTIO_TRANS_ID_CONSOLE 0x1003 /* transitional virtio console */
+#define VIRTIO_TRANS_ID_SCSI 0x1004 /* transitional virtio SCSI */
+#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */
+#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/standard-headers/linux/virtio_iommu.h b/include/standard-headers/linux/virtio_iommu.h
new file mode 100644
index 0000000000..366379c2f0
--- /dev/null
+++ b/include/standard-headers/linux/virtio_iommu.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Virtio-iommu definition v0.12
+ *
+ * Copyright (C) 2019 Arm Ltd.
+ */
+#ifndef _LINUX_VIRTIO_IOMMU_H
+#define _LINUX_VIRTIO_IOMMU_H
+
+#include "standard-headers/linux/types.h"
+
+/* Feature bits */
+#define VIRTIO_IOMMU_F_INPUT_RANGE 0
+#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1
+#define VIRTIO_IOMMU_F_MAP_UNMAP 2
+#define VIRTIO_IOMMU_F_BYPASS 3
+#define VIRTIO_IOMMU_F_PROBE 4
+#define VIRTIO_IOMMU_F_MMIO 5
+#define VIRTIO_IOMMU_F_BYPASS_CONFIG 6
+
+struct virtio_iommu_range_64 {
+ uint64_t start;
+ uint64_t end;
+};
+
+struct virtio_iommu_range_32 {
+ uint32_t start;
+ uint32_t end;
+};
+
+struct virtio_iommu_config {
+ /* Supported page sizes */
+ uint64_t page_size_mask;
+ /* Supported IOVA range */
+ struct virtio_iommu_range_64 input_range;
+ /* Max domain ID size */
+ struct virtio_iommu_range_32 domain_range;
+ /* Probe buffer size */
+ uint32_t probe_size;
+ uint8_t bypass;
+ uint8_t reserved[3];
+};
+
+/* Request types */
+#define VIRTIO_IOMMU_T_ATTACH 0x01
+#define VIRTIO_IOMMU_T_DETACH 0x02
+#define VIRTIO_IOMMU_T_MAP 0x03
+#define VIRTIO_IOMMU_T_UNMAP 0x04
+#define VIRTIO_IOMMU_T_PROBE 0x05
+
+/* Status types */
+#define VIRTIO_IOMMU_S_OK 0x00
+#define VIRTIO_IOMMU_S_IOERR 0x01
+#define VIRTIO_IOMMU_S_UNSUPP 0x02
+#define VIRTIO_IOMMU_S_DEVERR 0x03
+#define VIRTIO_IOMMU_S_INVAL 0x04
+#define VIRTIO_IOMMU_S_RANGE 0x05
+#define VIRTIO_IOMMU_S_NOENT 0x06
+#define VIRTIO_IOMMU_S_FAULT 0x07
+#define VIRTIO_IOMMU_S_NOMEM 0x08
+
+struct virtio_iommu_req_head {
+ uint8_t type;
+ uint8_t reserved[3];
+};
+
+struct virtio_iommu_req_tail {
+ uint8_t status;
+ uint8_t reserved[3];
+};
+
+#define VIRTIO_IOMMU_ATTACH_F_BYPASS (1 << 0)
+
+struct virtio_iommu_req_attach {
+ struct virtio_iommu_req_head head;
+ uint32_t domain;
+ uint32_t endpoint;
+ uint32_t flags;
+ uint8_t reserved[4];
+ struct virtio_iommu_req_tail tail;
+};
+
+struct virtio_iommu_req_detach {
+ struct virtio_iommu_req_head head;
+ uint32_t domain;
+ uint32_t endpoint;
+ uint8_t reserved[8];
+ struct virtio_iommu_req_tail tail;
+};
+
+#define VIRTIO_IOMMU_MAP_F_READ (1 << 0)
+#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1)
+#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2)
+
+#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \
+ VIRTIO_IOMMU_MAP_F_WRITE | \
+ VIRTIO_IOMMU_MAP_F_MMIO)
+
+struct virtio_iommu_req_map {
+ struct virtio_iommu_req_head head;
+ uint32_t domain;
+ uint64_t virt_start;
+ uint64_t virt_end;
+ uint64_t phys_start;
+ uint32_t flags;
+ struct virtio_iommu_req_tail tail;
+};
+
+struct virtio_iommu_req_unmap {
+ struct virtio_iommu_req_head head;
+ uint32_t domain;
+ uint64_t virt_start;
+ uint64_t virt_end;
+ uint8_t reserved[4];
+ struct virtio_iommu_req_tail tail;
+};
+
+#define VIRTIO_IOMMU_PROBE_T_NONE 0
+#define VIRTIO_IOMMU_PROBE_T_RESV_MEM 1
+
+#define VIRTIO_IOMMU_PROBE_T_MASK 0xfff
+
+struct virtio_iommu_probe_property {
+ uint16_t type;
+ uint16_t length;
+};
+
+#define VIRTIO_IOMMU_RESV_MEM_T_RESERVED 0
+#define VIRTIO_IOMMU_RESV_MEM_T_MSI 1
+
+struct virtio_iommu_probe_resv_mem {
+ struct virtio_iommu_probe_property head;
+ uint8_t subtype;
+ uint8_t reserved[3];
+ uint64_t start;
+ uint64_t end;
+};
+
+struct virtio_iommu_req_probe {
+ struct virtio_iommu_req_head head;
+ uint32_t endpoint;
+ uint8_t reserved[64];
+
+ uint8_t properties[];
+
+ /*
+ * Tail follows the variable-length properties array. No padding,
+ * property lengths are all aligned on 8 bytes.
+ */
+};
+
+/* Fault types */
+#define VIRTIO_IOMMU_FAULT_R_UNKNOWN 0
+#define VIRTIO_IOMMU_FAULT_R_DOMAIN 1
+#define VIRTIO_IOMMU_FAULT_R_MAPPING 2
+
+#define VIRTIO_IOMMU_FAULT_F_READ (1 << 0)
+#define VIRTIO_IOMMU_FAULT_F_WRITE (1 << 1)
+#define VIRTIO_IOMMU_FAULT_F_EXEC (1 << 2)
+#define VIRTIO_IOMMU_FAULT_F_ADDRESS (1 << 8)
+
+struct virtio_iommu_fault {
+ uint8_t reason;
+ uint8_t reserved[3];
+ uint32_t flags;
+ uint32_t endpoint;
+ uint8_t reserved2[4];
+ uint64_t address;
+};
+
+#endif
diff --git a/include/standard-headers/linux/virtio_mem.h b/include/standard-headers/linux/virtio_mem.h
new file mode 100644
index 0000000000..18c74c527c
--- /dev/null
+++ b/include/standard-headers/linux/virtio_mem.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Virtio Mem Device
+ *
+ * Copyright Red Hat, Inc. 2020
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_VIRTIO_MEM_H
+#define _LINUX_VIRTIO_MEM_H
+
+#include "standard-headers/linux/types.h"
+#include "standard-headers/linux/virtio_types.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_config.h"
+
+/*
+ * Each virtio-mem device manages a dedicated region in physical address
+ * space. Each device can belong to a single NUMA node, multiple devices
+ * for a single NUMA node are possible. A virtio-mem device is like a
+ * "resizable DIMM" consisting of small memory blocks that can be plugged
+ * or unplugged. The device driver is responsible for (un)plugging memory
+ * blocks on demand.
+ *
+ * Virtio-mem devices can only operate on their assigned memory region in
+ * order to (un)plug memory. A device cannot (un)plug memory belonging to
+ * other devices.
+ *
+ * The "region_size" corresponds to the maximum amount of memory that can
+ * be provided by a device. The "size" corresponds to the amount of memory
+ * that is currently plugged. "requested_size" corresponds to a request
+ * from the device to the device driver to (un)plug blocks. The
+ * device driver should try to (un)plug blocks in order to reach the
+ * "requested_size". It is impossible to plug more memory than requested.
+ *
+ * The "usable_region_size" represents the memory region that can actually
+ * be used to (un)plug memory. It is always at least as big as the
+ * "requested_size" and will grow dynamically. It will only shrink when
+ * explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
+ *
+ * There are no guarantees what will happen if unplugged memory is
+ * read/written. In general, unplugged memory should not be touched, because
+ * the resulting action is undefined. There is one exception: without
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable
+ * region can be read, to simplify creation of memory dumps.
+ *
+ * It can happen that the device cannot process a request, because it is
+ * busy. The device driver has to retry later.
+ *
+ * Usually, during system resets all memory will get unplugged, so the
+ * device driver can start with a clean state. However, in specific
+ * scenarios (if the device is busy) it can happen that the device still
+ * has memory plugged. The device driver can request to unplug all memory
+ * (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the
+ * device is busy.
+ */
+
+/* --- virtio-mem: feature bits --- */
+
+/* node_id is an ACPI PXM and is valid */
+#define VIRTIO_MEM_F_ACPI_PXM 0
+/* unplugged memory must not be accessed */
+#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
+
+
+/* --- virtio-mem: guest -> host requests --- */
+
+/* request to plug memory blocks */
+#define VIRTIO_MEM_REQ_PLUG 0
+/* request to unplug memory blocks */
+#define VIRTIO_MEM_REQ_UNPLUG 1
+/* request to unplug all blocks and shrink the usable size */
+#define VIRTIO_MEM_REQ_UNPLUG_ALL 2
+/* request information about the plugged state of memory blocks */
+#define VIRTIO_MEM_REQ_STATE 3
+
+struct virtio_mem_req_plug {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req_unplug {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req_state {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req {
+ __virtio16 type;
+ __virtio16 padding[3];
+
+ union {
+ struct virtio_mem_req_plug plug;
+ struct virtio_mem_req_unplug unplug;
+ struct virtio_mem_req_state state;
+ } u;
+};
+
+
+/* --- virtio-mem: host -> guest response --- */
+
+/*
+ * Request processed successfully, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ACK 0
+/*
+ * Request denied - e.g. trying to plug more than requested, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ */
+#define VIRTIO_MEM_RESP_NACK 1
+/*
+ * Request cannot be processed right now, try again later, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ */
+#define VIRTIO_MEM_RESP_BUSY 2
+/*
+ * Error in request (e.g. addresses/alignment), applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ERROR 3
+
+
+/* State of memory blocks is "plugged" */
+#define VIRTIO_MEM_STATE_PLUGGED 0
+/* State of memory blocks is "unplugged" */
+#define VIRTIO_MEM_STATE_UNPLUGGED 1
+/* State of memory blocks is "mixed" */
+#define VIRTIO_MEM_STATE_MIXED 2
+
+struct virtio_mem_resp_state {
+ __virtio16 state;
+};
+
+struct virtio_mem_resp {
+ __virtio16 type;
+ __virtio16 padding[3];
+
+ union {
+ struct virtio_mem_resp_state state;
+ } u;
+};
+
+/* --- virtio-mem: configuration --- */
+
+struct virtio_mem_config {
+ /* Block size and alignment. Cannot change. */
+ uint64_t block_size;
+ /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
+ uint16_t node_id;
+ uint8_t padding[6];
+ /* Start address of the memory region. Cannot change. */
+ uint64_t addr;
+ /* Region size (maximum). Cannot change. */
+ uint64_t region_size;
+ /*
+ * Currently usable region size. Can grow up to region_size. Can
+ * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
+ * update will be sent).
+ */
+ uint64_t usable_region_size;
+ /*
+ * Currently used size. Changes due to plug/unplug requests, but no
+ * config updates will be sent.
+ */
+ uint64_t plugged_size;
+ /* Requested size. New plug requests cannot exceed it. Can change. */
+ uint64_t requested_size;
+};
+
+#endif /* _LINUX_VIRTIO_MEM_H */
diff --git a/include/standard-headers/linux/virtio_mmio.h b/include/standard-headers/linux/virtio_mmio.h
index c4b09689ab..0650f91bea 100644
--- a/include/standard-headers/linux/virtio_mmio.h
+++ b/include/standard-headers/linux/virtio_mmio.h
@@ -122,6 +122,17 @@
#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+/* Shared memory region id */
+#define VIRTIO_MMIO_SHM_SEL 0x0ac
+
+/* Shared memory region length, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0
+#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4
+
+/* Shared memory region base address, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8
+#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc
+
/* Configuration atomicity value */
#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h
index 260c3681d7..0f88417742 100644
--- a/include/standard-headers/linux/virtio_net.h
+++ b/include/standard-headers/linux/virtio_net.h
@@ -56,7 +56,15 @@
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
-
+#define VIRTIO_NET_F_VQ_NOTF_COAL 52 /* Device supports virtqueue notification coalescing */
+#define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */
+#define VIRTIO_NET_F_GUEST_USO4 54 /* Guest can handle USOv4 in. */
+#define VIRTIO_NET_F_GUEST_USO6 55 /* Guest can handle USOv6 in. */
+#define VIRTIO_NET_F_HOST_USO 56 /* Host can handle USO in. */
+#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */
+#define VIRTIO_NET_F_GUEST_HDRLEN 59 /* Guest provides the exact hdr_len value. */
+#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */
+#define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */
#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device
* with the same MAC.
*/
@@ -69,18 +77,29 @@
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
+/* supported/enabled hash types */
+#define VIRTIO_NET_RSS_HASH_TYPE_IPv4 (1 << 0)
+#define VIRTIO_NET_RSS_HASH_TYPE_TCPv4 (1 << 1)
+#define VIRTIO_NET_RSS_HASH_TYPE_UDPv4 (1 << 2)
+#define VIRTIO_NET_RSS_HASH_TYPE_IPv6 (1 << 3)
+#define VIRTIO_NET_RSS_HASH_TYPE_TCPv6 (1 << 4)
+#define VIRTIO_NET_RSS_HASH_TYPE_UDPv6 (1 << 5)
+#define VIRTIO_NET_RSS_HASH_TYPE_IP_EX (1 << 6)
+#define VIRTIO_NET_RSS_HASH_TYPE_TCP_EX (1 << 7)
+#define VIRTIO_NET_RSS_HASH_TYPE_UDP_EX (1 << 8)
+
struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
uint8_t mac[ETH_ALEN];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
- uint16_t status;
+ __virtio16 status;
/* Maximum number of each of transmit and receive queues;
* see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ.
* Legal values are between 1 and 0x8000
*/
- uint16_t max_virtqueue_pairs;
+ __virtio16 max_virtqueue_pairs;
/* Default maximum transmit unit advice */
- uint16_t mtu;
+ __virtio16 mtu;
/*
* speed, in units of 1Mb. All values 0 to INT_MAX are legal.
* Any other value stands for unknown.
@@ -92,6 +111,12 @@ struct virtio_net_config {
* Any other value stands for unknown.
*/
uint8_t duplex;
+ /* maximum size of RSS key */
+ uint8_t rss_max_key_size;
+ /* maximum number of indirection table entries */
+ uint16_t rss_max_indirection_table_length;
+ /* bitmask of supported VIRTIO_NET_RSS_HASH_ types */
+ uint32_t supported_hash_types;
} QEMU_PACKED;
/*
@@ -104,20 +129,57 @@ struct virtio_net_config {
struct virtio_net_hdr_v1 {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
+#define VIRTIO_NET_HDR_F_RSC_INFO 4 /* rsc info in csum_ fields */
uint8_t flags;
#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_UDP_L4 5 /* GSO frame, IPv4& IPv6 UDP (USO) */
#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
uint8_t gso_type;
__virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
__virtio16 gso_size; /* Bytes to append to hdr_len per frame */
- __virtio16 csum_start; /* Position to start checksumming from */
- __virtio16 csum_offset; /* Offset after that to place checksum */
+ union {
+ struct {
+ __virtio16 csum_start;
+ __virtio16 csum_offset;
+ };
+ /* Checksum calculation */
+ struct {
+ /* Position to start checksumming from */
+ __virtio16 start;
+ /* Offset after that to place checksum */
+ __virtio16 offset;
+ } csum;
+ /* Receive Segment Coalescing */
+ struct {
+ /* Number of coalesced segments */
+ uint16_t segments;
+ /* Number of duplicated acks */
+ uint16_t dup_acks;
+ } rsc;
+ };
__virtio16 num_buffers; /* Number of merged rx buffers */
};
+struct virtio_net_hdr_v1_hash {
+ struct virtio_net_hdr_v1 hdr;
+ uint32_t hash_value;
+#define VIRTIO_NET_HASH_REPORT_NONE 0
+#define VIRTIO_NET_HASH_REPORT_IPv4 1
+#define VIRTIO_NET_HASH_REPORT_TCPv4 2
+#define VIRTIO_NET_HASH_REPORT_UDPv4 3
+#define VIRTIO_NET_HASH_REPORT_IPv6 4
+#define VIRTIO_NET_HASH_REPORT_TCPv6 5
+#define VIRTIO_NET_HASH_REPORT_UDPv6 6
+#define VIRTIO_NET_HASH_REPORT_IPv6_EX 7
+#define VIRTIO_NET_HASH_REPORT_TCPv6_EX 8
+#define VIRTIO_NET_HASH_REPORT_UDPv6_EX 9
+ uint16_t hash_report;
+ uint16_t padding;
+};
+
#ifndef VIRTIO_NET_NO_LEGACY
/* This header comes first in the scatter-gather list.
* For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
@@ -228,7 +290,9 @@ struct virtio_net_ctrl_mac {
/*
* Control Receive Flow Steering
- *
+ */
+#define VIRTIO_NET_CTRL_MQ 4
+/*
* The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
* enables Receive Flow Steering, specifying the number of the transmit and
* receive queues that will be used. After the command is consumed and acked by
@@ -241,12 +305,48 @@ struct virtio_net_ctrl_mq {
__virtio16 virtqueue_pairs;
};
-#define VIRTIO_NET_CTRL_MQ 4
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
/*
+ * The command VIRTIO_NET_CTRL_MQ_RSS_CONFIG has the same effect as
+ * VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET does and additionally configures
+ * the receive steering to use a hash calculated for incoming packet
+ * to decide on receive virtqueue to place the packet. The command
+ * also provides parameters to calculate a hash and receive virtqueue.
+ */
+struct virtio_net_rss_config {
+ uint32_t hash_types;
+ uint16_t indirection_table_mask;
+ uint16_t unclassified_queue;
+ uint16_t indirection_table[1/* + indirection_table_mask */];
+ uint16_t max_tx_vq;
+ uint8_t hash_key_length;
+ uint8_t hash_key_data[/* hash_key_length */];
+};
+
+ #define VIRTIO_NET_CTRL_MQ_RSS_CONFIG 1
+
+/*
+ * The command VIRTIO_NET_CTRL_MQ_HASH_CONFIG requests the device
+ * to include in the virtio header of the packet the value of the
+ * calculated hash and the report type of hash. It also provides
+ * parameters for hash calculation. The command requires feature
+ * VIRTIO_NET_F_HASH_REPORT to be negotiated to extend the
+ * layout of virtio header as defined in virtio_net_hdr_v1_hash.
+ */
+struct virtio_net_hash_config {
+ uint32_t hash_types;
+ /* for compatibility with virtio_net_rss_config */
+ uint16_t reserved[4];
+ uint8_t hash_key_length;
+ uint8_t hash_key_data[/* hash_key_length */];
+};
+
+ #define VIRTIO_NET_CTRL_MQ_HASH_CONFIG 2
+
+/*
* Control network offloads
*
* Reconfigures the network offloads that Guest can handle.
@@ -261,4 +361,49 @@ struct virtio_net_ctrl_mq {
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
+/*
+ * Control notifications coalescing.
+ *
+ * Request the device to change the notifications coalescing parameters.
+ *
+ * Available with the VIRTIO_NET_F_NOTF_COAL feature bit.
+ */
+#define VIRTIO_NET_CTRL_NOTF_COAL 6
+/*
+ * Set the tx-usecs/tx-max-packets parameters.
+ */
+struct virtio_net_ctrl_coal_tx {
+ /* Maximum number of packets to send before a TX notification */
+ uint32_t tx_max_packets;
+ /* Maximum number of usecs to delay a TX notification */
+ uint32_t tx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_TX_SET 0
+
+/*
+ * Set the rx-usecs/rx-max-packets parameters.
+ */
+struct virtio_net_ctrl_coal_rx {
+ /* Maximum number of packets to receive before a RX notification */
+ uint32_t rx_max_packets;
+ /* Maximum number of usecs to delay a RX notification */
+ uint32_t rx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET 2
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_GET 3
+
+struct virtio_net_ctrl_coal {
+ uint32_t max_packets;
+ uint32_t max_usecs;
+};
+
+struct virtio_net_ctrl_coal_vq {
+ uint16_t vqn;
+ uint16_t reserved;
+ struct virtio_net_ctrl_coal coal;
+};
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h
index 9262acd130..4010216103 100644
--- a/include/standard-headers/linux/virtio_pci.h
+++ b/include/standard-headers/linux/virtio_pci.h
@@ -113,6 +113,8 @@
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
/* PCI configuration access */
#define VIRTIO_PCI_CAP_PCI_CFG 5
+/* Additional shared memory capability */
+#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
/* This is the PCI capability header: */
struct virtio_pci_cap {
@@ -121,11 +123,18 @@ struct virtio_pci_cap {
uint8_t cap_len; /* Generic PCI field: capability length */
uint8_t cfg_type; /* Identifies the structure. */
uint8_t bar; /* Where to find it. */
- uint8_t padding[3]; /* Pad to full dword. */
+ uint8_t id; /* Multiple capabilities of the same type */
+ uint8_t padding[2]; /* Pad to full dword. */
uint32_t offset; /* Offset within bar. */
uint32_t length; /* Length of the structure, in bytes. */
};
+struct virtio_pci_cap64 {
+ struct virtio_pci_cap cap;
+ uint32_t offset_hi; /* Most sig 32 bits of offset */
+ uint32_t length_hi; /* Most sig 32 bits of length */
+};
+
struct virtio_pci_notify_cap {
struct virtio_pci_cap cap;
uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
@@ -157,6 +166,20 @@ struct virtio_pci_common_cfg {
uint32_t queue_used_hi; /* read-write */
};
+/*
+ * Warning: do not use sizeof on this: use offsetofend for
+ * specific fields you need.
+ */
+struct virtio_pci_modern_common_cfg {
+ struct virtio_pci_common_cfg cfg;
+
+ uint16_t queue_notify_data; /* read-write */
+ uint16_t queue_reset; /* read-write */
+
+ uint16_t admin_queue_index; /* read-only */
+ uint16_t admin_queue_num; /* read-only */
+};
+
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
struct virtio_pci_cfg_cap {
struct virtio_pci_cap cap;
@@ -193,7 +216,74 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
#define VIRTIO_PCI_COMMON_Q_USEDLO 48
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
+#define VIRTIO_PCI_COMMON_Q_NDATA 56
+#define VIRTIO_PCI_COMMON_Q_RESET 58
+#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60
+#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62
#endif /* VIRTIO_PCI_NO_MODERN */
+/* Admin command status. */
+#define VIRTIO_ADMIN_STATUS_OK 0
+
+/* Admin command opcode. */
+#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0
+#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
+
+/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
+
+/* Transitional device admin command. */
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5
+#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6
+
+struct virtio_admin_cmd_hdr {
+ uint16_t opcode;
+ /*
+ * 1 - SR-IOV
+ * 2-65535 - reserved
+ */
+ uint16_t group_type;
+ /* Unused, reserved for future extensions. */
+ uint8_t reserved1[12];
+ uint64_t group_member_id;
+};
+
+struct virtio_admin_cmd_status {
+ uint16_t status;
+ uint16_t status_qualifier;
+ /* Unused, reserved for future extensions. */
+ uint8_t reserved2[4];
+};
+
+struct virtio_admin_cmd_legacy_wr_data {
+ uint8_t offset; /* Starting offset of the register(s) to write. */
+ uint8_t reserved[7];
+ uint8_t registers[];
+};
+
+struct virtio_admin_cmd_legacy_rd_data {
+ uint8_t offset; /* Starting offset of the register(s) to read. */
+};
+
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2
+
+#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4
+
+struct virtio_admin_cmd_notify_info_data {
+ uint8_t flags; /* 0 = end of list, 1 = owner device, 2 = member device */
+ uint8_t bar; /* BAR of the member or the owner device */
+ uint8_t padding[6];
+ uint64_t offset; /* Offset within bar. */
+};
+
+struct virtio_admin_cmd_notify_info_result {
+ struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO];
+};
+
#endif
diff --git a/include/standard-headers/linux/virtio_pcidev.h b/include/standard-headers/linux/virtio_pcidev.h
new file mode 100644
index 0000000000..bdf1d062da
--- /dev/null
+++ b/include/standard-headers/linux/virtio_pcidev.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#ifndef _LINUX_VIRTIO_PCIDEV_H
+#define _LINUX_VIRTIO_PCIDEV_H
+#include "standard-headers/linux/types.h"
+
+/**
+ * enum virtio_pcidev_ops - virtual PCI device operations
+ * @VIRTIO_PCIDEV_OP_RESERVED: reserved to catch errors
+ * @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_READ: read BAR mem/pio, size can be variable;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_WRITE: write BAR mem/pio, size can be variable;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but
+ * the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE)
+ * @VIRTIO_PCIDEV_OP_INT: legacy INTx# pin interrupt, the addr field is 1-4 for
+ * the number
+ * @VIRTIO_PCIDEV_OP_MSI: MSI(-X) interrupt, this message basically transports
+ * the 16- or 32-bit write that would otherwise be done into memory,
+ * analogous to the write messages (@VIRTIO_PCIDEV_OP_MMIO_WRITE) above
+ * @VIRTIO_PCIDEV_OP_PME: Dummy message whose content is ignored (and should be
+ * all zeroes) to signal the PME# pin.
+ */
+enum virtio_pcidev_ops {
+ VIRTIO_PCIDEV_OP_RESERVED = 0,
+ VIRTIO_PCIDEV_OP_CFG_READ,
+ VIRTIO_PCIDEV_OP_CFG_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_READ,
+ VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_MEMSET,
+ VIRTIO_PCIDEV_OP_INT,
+ VIRTIO_PCIDEV_OP_MSI,
+ VIRTIO_PCIDEV_OP_PME,
+};
+
+/**
+ * struct virtio_pcidev_msg - virtio PCI device operation
+ * @op: the operation to do
+ * @bar: the bar (only with BAR read/write messages)
+ * @reserved: reserved
+ * @size: the size of the read/write (in bytes)
+ * @addr: the address to read/write
+ * @data: the data, normally @size long, but just one byte for
+ * %VIRTIO_PCIDEV_OP_MMIO_MEMSET
+ *
+ * Note: the fields are all in native (CPU) endian, however, the
+ * @data values will often be in little endian (see the ops above.)
+ */
+struct virtio_pcidev_msg {
+ uint8_t op;
+ uint8_t bar;
+ uint16_t reserved;
+ uint32_t size;
+ uint64_t addr;
+ uint8_t data[];
+};
+
+#endif /* _LINUX_VIRTIO_PCIDEV_H */
diff --git a/include/standard-headers/linux/virtio_pmem.h b/include/standard-headers/linux/virtio_pmem.h
new file mode 100644
index 0000000000..1a2576d017
--- /dev/null
+++ b/include/standard-headers/linux/virtio_pmem.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
+/*
+ * Definitions for virtio-pmem devices.
+ *
+ * Copyright (C) 2019 Red Hat, Inc.
+ *
+ * Author(s): Pankaj Gupta <pagupta@redhat.com>
+ */
+
+#ifndef _LINUX_VIRTIO_PMEM_H
+#define _LINUX_VIRTIO_PMEM_H
+
+#include "standard-headers/linux/types.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_config.h"
+
+/* Feature bits */
+/* guest physical address range will be indicated as shared memory region 0 */
+#define VIRTIO_PMEM_F_SHMEM_REGION 0
+
+/* shmid of the shared memory region corresponding to the pmem */
+#define VIRTIO_PMEM_SHMEM_REGION_ID 0
+
+struct virtio_pmem_config {
+ uint64_t start;
+ uint64_t size;
+};
+
+#define VIRTIO_PMEM_REQ_TYPE_FLUSH 0
+
+struct virtio_pmem_resp {
+ /* Host return status corresponding to flush request */
+ uint32_t ret;
+};
+
+struct virtio_pmem_req {
+ /* command type */
+ uint32_t type;
+};
+
+#endif
diff --git a/include/standard-headers/linux/virtio_ring.h b/include/standard-headers/linux/virtio_ring.h
index d26e72bc6b..22f6eb8ca7 100644
--- a/include/standard-headers/linux/virtio_ring.h
+++ b/include/standard-headers/linux/virtio_ring.h
@@ -42,6 +42,13 @@
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
+/*
+ * Mark a descriptor as available or used in packed ring.
+ * Notice: they are defined as shifts instead of shifted values.
+ */
+#define VRING_PACKED_DESC_F_AVAIL 7
+#define VRING_PACKED_DESC_F_USED 15
+
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
* will still kick if it's out of buffers. */
@@ -51,6 +58,23 @@
* optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1
+/* Enable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
+/* Disable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor in packed ring.
+ * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
+ * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
+ */
+#define VRING_PACKED_EVENT_FLAG_DESC 0x2
+
+/*
+ * Wrap counter bit shift in event suppression structure
+ * of packed ring.
+ */
+#define VRING_PACKED_EVENT_F_WRAP_CTR 15
+
/* We support indirect buffer descriptors */
#define VIRTIO_RING_F_INDIRECT_DESC 28
@@ -60,15 +84,28 @@
* at the end of the used ring. Guest should ignore the used->flags field. */
#define VIRTIO_RING_F_EVENT_IDX 29
-/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
+/**
+ * struct vring_desc - Virtio ring descriptors,
+ * 16 bytes long. These can chain together via @next.
+ *
+ * @addr: buffer address (guest-physical)
+ * @len: buffer length
+ * @flags: descriptor flags
+ * @next: index of the next descriptor in the chain,
+ * if the VRING_DESC_F_NEXT flag is set. We chain unused
+ * descriptors via this, too.
+ */
struct vring_desc {
- /* Address (guest-physical). */
__virtio64 addr;
- /* Length. */
__virtio32 len;
- /* The flags as indicated above. */
__virtio16 flags;
- /* We chain unused descriptors via this, too */
__virtio16 next;
};
@@ -86,28 +123,47 @@ struct vring_used_elem {
__virtio32 len;
};
+typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+ vring_used_elem_t;
+
struct vring_used {
__virtio16 flags;
__virtio16 idx;
- struct vring_used_elem ring[];
+ vring_used_elem_t ring[];
};
+/*
+ * The ring element addresses are passed between components with different
+ * alignments assumptions. Thus, we might need to decrease the compiler-selected
+ * alignment, and so must use a typedef to make sure the aligned attribute
+ * actually takes hold:
+ *
+ * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
+ *
+ * When used on a struct, or struct member, the aligned attribute can only
+ * increase the alignment; in order to decrease it, the packed attribute must
+ * be specified as well. When used as part of a typedef, the aligned attribute
+ * can both increase and decrease alignment, and specifying the packed
+ * attribute generates a warning.
+ */
+typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
+ vring_desc_t;
+typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
+ vring_avail_t;
+typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+ vring_used_t;
+
struct vring {
unsigned int num;
- struct vring_desc *desc;
+ vring_desc_t *desc;
- struct vring_avail *avail;
+ vring_avail_t *avail;
- struct vring_used *used;
+ vring_used_t *used;
};
-/* Alignment requirements for vring elements.
- * When using pre-virtio 1.0 layout, these fall out naturally.
- */
-#define VRING_AVAIL_ALIGN_SIZE 2
-#define VRING_USED_ALIGN_SIZE 4
-#define VRING_DESC_ALIGN_SIZE 16
+#ifndef VIRTIO_RING_NO_LEGACY
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
@@ -143,7 +199,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
{
vr->num = num;
vr->desc = p;
- vr->avail = p + num*sizeof(struct vring_desc);
+ vr->avail = (struct vring_avail *)((char *)p + num * sizeof(struct vring_desc));
vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
+ align-1) & ~(align - 1));
}
@@ -155,6 +211,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
}
+#endif /* VIRTIO_RING_NO_LEGACY */
+
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
@@ -169,4 +227,22 @@ static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_
return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
}
+struct vring_packed_desc_event {
+ /* Descriptor Ring Change Event Offset/Wrap Counter. */
+ uint16_t off_wrap;
+ /* Descriptor Ring Change Event Flags. */
+ uint16_t flags;
+};
+
+struct vring_packed_desc {
+ /* Buffer Address. */
+ uint64_t addr;
+ /* Buffer Length. */
+ uint32_t len;
+ /* Buffer ID. */
+ uint16_t id;
+ /* The flags depending on descriptor type. */
+ uint16_t flags;
+};
+
#endif /* _LINUX_VIRTIO_RING_H */
diff --git a/include/standard-headers/linux/virtio_scmi.h b/include/standard-headers/linux/virtio_scmi.h
new file mode 100644
index 0000000000..8f2c305aea
--- /dev/null
+++ b/include/standard-headers/linux/virtio_scmi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2020-2021 OpenSynergy GmbH
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_VIRTIO_SCMI_H
+#define _LINUX_VIRTIO_SCMI_H
+
+#include "standard-headers/linux/virtio_types.h"
+
+/* Device implements some SCMI notifications, or delayed responses. */
+#define VIRTIO_SCMI_F_P2A_CHANNELS 0
+
+/* Device implements any SCMI statistics shared memory region */
+#define VIRTIO_SCMI_F_SHARED_MEMORY 1
+
+/* Virtqueues */
+
+#define VIRTIO_SCMI_VQ_TX 0 /* cmdq */
+#define VIRTIO_SCMI_VQ_RX 1 /* eventq */
+#define VIRTIO_SCMI_VQ_MAX_CNT 2
+
+#endif /* _LINUX_VIRTIO_SCMI_H */
diff --git a/include/standard-headers/linux/virtio_scsi.h b/include/standard-headers/linux/virtio_scsi.h
index ab66166b6a..663f36cbb7 100644
--- a/include/standard-headers/linux/virtio_scsi.h
+++ b/include/standard-headers/linux/virtio_scsi.h
@@ -103,16 +103,16 @@ struct virtio_scsi_event {
} QEMU_PACKED;
struct virtio_scsi_config {
- uint32_t num_queues;
- uint32_t seg_max;
- uint32_t max_sectors;
- uint32_t cmd_per_lun;
- uint32_t event_info_size;
- uint32_t sense_size;
- uint32_t cdb_size;
- uint16_t max_channel;
- uint16_t max_target;
- uint32_t max_lun;
+ __virtio32 num_queues;
+ __virtio32 seg_max;
+ __virtio32 max_sectors;
+ __virtio32 cmd_per_lun;
+ __virtio32 event_info_size;
+ __virtio32 sense_size;
+ __virtio32 cdb_size;
+ __virtio16 max_channel;
+ __virtio16 max_target;
+ __virtio32 max_lun;
} QEMU_PACKED;
/* Feature Bits */
diff --git a/include/standard-headers/linux/virtio_snd.h b/include/standard-headers/linux/virtio_snd.h
new file mode 100644
index 0000000000..860f12e0a4
--- /dev/null
+++ b/include/standard-headers/linux/virtio_snd.h
@@ -0,0 +1,488 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2021 OpenSynergy GmbH
+ */
+#ifndef VIRTIO_SND_IF_H
+#define VIRTIO_SND_IF_H
+
+#include "standard-headers/linux/virtio_types.h"
+
+/*******************************************************************************
+ * FEATURE BITS
+ */
+enum {
+ /* device supports control elements */
+ VIRTIO_SND_F_CTLS = 0
+};
+
+/*******************************************************************************
+ * CONFIGURATION SPACE
+ */
+struct virtio_snd_config {
+ /* # of available physical jacks */
+ uint32_t jacks;
+ /* # of available PCM streams */
+ uint32_t streams;
+ /* # of available channel maps */
+ uint32_t chmaps;
+ /* # of available control elements */
+ uint32_t controls;
+};
+
+enum {
+ /* device virtqueue indexes */
+ VIRTIO_SND_VQ_CONTROL = 0,
+ VIRTIO_SND_VQ_EVENT,
+ VIRTIO_SND_VQ_TX,
+ VIRTIO_SND_VQ_RX,
+ /* # of device virtqueues */
+ VIRTIO_SND_VQ_MAX
+};
+
+/*******************************************************************************
+ * COMMON DEFINITIONS
+ */
+
+/* supported dataflow directions */
+enum {
+ VIRTIO_SND_D_OUTPUT = 0,
+ VIRTIO_SND_D_INPUT
+};
+
+enum {
+ /* jack control request types */
+ VIRTIO_SND_R_JACK_INFO = 1,
+ VIRTIO_SND_R_JACK_REMAP,
+
+ /* PCM control request types */
+ VIRTIO_SND_R_PCM_INFO = 0x0100,
+ VIRTIO_SND_R_PCM_SET_PARAMS,
+ VIRTIO_SND_R_PCM_PREPARE,
+ VIRTIO_SND_R_PCM_RELEASE,
+ VIRTIO_SND_R_PCM_START,
+ VIRTIO_SND_R_PCM_STOP,
+
+ /* channel map control request types */
+ VIRTIO_SND_R_CHMAP_INFO = 0x0200,
+
+ /* control element request types */
+ VIRTIO_SND_R_CTL_INFO = 0x0300,
+ VIRTIO_SND_R_CTL_ENUM_ITEMS,
+ VIRTIO_SND_R_CTL_READ,
+ VIRTIO_SND_R_CTL_WRITE,
+ VIRTIO_SND_R_CTL_TLV_READ,
+ VIRTIO_SND_R_CTL_TLV_WRITE,
+ VIRTIO_SND_R_CTL_TLV_COMMAND,
+
+ /* jack event types */
+ VIRTIO_SND_EVT_JACK_CONNECTED = 0x1000,
+ VIRTIO_SND_EVT_JACK_DISCONNECTED,
+
+ /* PCM event types */
+ VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED = 0x1100,
+ VIRTIO_SND_EVT_PCM_XRUN,
+
+ /* control element event types */
+ VIRTIO_SND_EVT_CTL_NOTIFY = 0x1200,
+
+ /* common status codes */
+ VIRTIO_SND_S_OK = 0x8000,
+ VIRTIO_SND_S_BAD_MSG,
+ VIRTIO_SND_S_NOT_SUPP,
+ VIRTIO_SND_S_IO_ERR
+};
+
+/* common header */
+struct virtio_snd_hdr {
+ uint32_t code;
+};
+
+/* event notification */
+struct virtio_snd_event {
+ /* VIRTIO_SND_EVT_XXX */
+ struct virtio_snd_hdr hdr;
+ /* optional event data */
+ uint32_t data;
+};
+
+/* common control request to query an item information */
+struct virtio_snd_query_info {
+ /* VIRTIO_SND_R_XXX_INFO */
+ struct virtio_snd_hdr hdr;
+ /* item start identifier */
+ uint32_t start_id;
+ /* item count to query */
+ uint32_t count;
+ /* item information size in bytes */
+ uint32_t size;
+};
+
+/* common item information header */
+struct virtio_snd_info {
+ /* function group node id (High Definition Audio Specification 7.1.2) */
+ uint32_t hda_fn_nid;
+};
+
+/*******************************************************************************
+ * JACK CONTROL MESSAGES
+ */
+struct virtio_snd_jack_hdr {
+ /* VIRTIO_SND_R_JACK_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::jacks - 1 */
+ uint32_t jack_id;
+};
+
+/* supported jack features */
+enum {
+ VIRTIO_SND_JACK_F_REMAP = 0
+};
+
+struct virtio_snd_jack_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_JACK_F_XXX) */
+ uint32_t features;
+ /* pin configuration (High Definition Audio Specification 7.3.3.31) */
+ uint32_t hda_reg_defconf;
+ /* pin capabilities (High Definition Audio Specification 7.3.4.9) */
+ uint32_t hda_reg_caps;
+ /* current jack connection status (0: disconnected, 1: connected) */
+ uint8_t connected;
+
+ uint8_t padding[7];
+};
+
+/* jack remapping control request */
+struct virtio_snd_jack_remap {
+ /* .code = VIRTIO_SND_R_JACK_REMAP */
+ struct virtio_snd_jack_hdr hdr;
+ /* selected association number */
+ uint32_t association;
+ /* selected sequence number */
+ uint32_t sequence;
+};
+
+/*******************************************************************************
+ * PCM CONTROL MESSAGES
+ */
+struct virtio_snd_pcm_hdr {
+ /* VIRTIO_SND_R_PCM_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::streams - 1 */
+ uint32_t stream_id;
+};
+
+/* supported PCM stream features */
+enum {
+ VIRTIO_SND_PCM_F_SHMEM_HOST = 0,
+ VIRTIO_SND_PCM_F_SHMEM_GUEST,
+ VIRTIO_SND_PCM_F_MSG_POLLING,
+ VIRTIO_SND_PCM_F_EVT_SHMEM_PERIODS,
+ VIRTIO_SND_PCM_F_EVT_XRUNS
+};
+
+/* supported PCM sample formats */
+enum {
+ /* analog formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_IMA_ADPCM = 0, /* 4 / 4 bits */
+ VIRTIO_SND_PCM_FMT_MU_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_A_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_S18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT64, /* 64 / 64 bits */
+ /* digital formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_DSD_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_IEC958_SUBFRAME /* 32 / 32 bits */
+};
+
+/* supported PCM frame rates */
+enum {
+ VIRTIO_SND_PCM_RATE_5512 = 0,
+ VIRTIO_SND_PCM_RATE_8000,
+ VIRTIO_SND_PCM_RATE_11025,
+ VIRTIO_SND_PCM_RATE_16000,
+ VIRTIO_SND_PCM_RATE_22050,
+ VIRTIO_SND_PCM_RATE_32000,
+ VIRTIO_SND_PCM_RATE_44100,
+ VIRTIO_SND_PCM_RATE_48000,
+ VIRTIO_SND_PCM_RATE_64000,
+ VIRTIO_SND_PCM_RATE_88200,
+ VIRTIO_SND_PCM_RATE_96000,
+ VIRTIO_SND_PCM_RATE_176400,
+ VIRTIO_SND_PCM_RATE_192000,
+ VIRTIO_SND_PCM_RATE_384000
+};
+
+struct virtio_snd_pcm_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ uint32_t features;
+ /* supported sample format bit map (1 << VIRTIO_SND_PCM_FMT_XXX) */
+ uint64_t formats;
+ /* supported frame rate bit map (1 << VIRTIO_SND_PCM_RATE_XXX) */
+ uint64_t rates;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ uint8_t direction;
+ /* minimum # of supported channels */
+ uint8_t channels_min;
+ /* maximum # of supported channels */
+ uint8_t channels_max;
+
+ uint8_t padding[5];
+};
+
+/* set PCM stream format */
+struct virtio_snd_pcm_set_params {
+ /* .code = VIRTIO_SND_R_PCM_SET_PARAMS */
+ struct virtio_snd_pcm_hdr hdr;
+ /* size of the hardware buffer */
+ uint32_t buffer_bytes;
+ /* size of the hardware period */
+ uint32_t period_bytes;
+ /* selected feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ uint32_t features;
+ /* selected # of channels */
+ uint8_t channels;
+ /* selected sample format (VIRTIO_SND_PCM_FMT_XXX) */
+ uint8_t format;
+ /* selected frame rate (VIRTIO_SND_PCM_RATE_XXX) */
+ uint8_t rate;
+
+ uint8_t padding;
+};
+
+/*******************************************************************************
+ * PCM I/O MESSAGES
+ */
+
+/* I/O request header */
+struct virtio_snd_pcm_xfer {
+ /* 0 ... virtio_snd_config::streams - 1 */
+ uint32_t stream_id;
+};
+
+/* I/O request status */
+struct virtio_snd_pcm_status {
+ /* VIRTIO_SND_S_XXX */
+ uint32_t status;
+ /* current device latency */
+ uint32_t latency_bytes;
+};
+
+/*******************************************************************************
+ * CHANNEL MAP CONTROL MESSAGES
+ */
+struct virtio_snd_chmap_hdr {
+ /* VIRTIO_SND_R_CHMAP_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::chmaps - 1 */
+ uint32_t chmap_id;
+};
+
+/* standard channel position definition */
+enum {
+ VIRTIO_SND_CHMAP_NONE = 0, /* undefined */
+ VIRTIO_SND_CHMAP_NA, /* silent */
+ VIRTIO_SND_CHMAP_MONO, /* mono stream */
+ VIRTIO_SND_CHMAP_FL, /* front left */
+ VIRTIO_SND_CHMAP_FR, /* front right */
+ VIRTIO_SND_CHMAP_RL, /* rear left */
+ VIRTIO_SND_CHMAP_RR, /* rear right */
+ VIRTIO_SND_CHMAP_FC, /* front center */
+ VIRTIO_SND_CHMAP_LFE, /* low frequency (LFE) */
+ VIRTIO_SND_CHMAP_SL, /* side left */
+ VIRTIO_SND_CHMAP_SR, /* side right */
+ VIRTIO_SND_CHMAP_RC, /* rear center */
+ VIRTIO_SND_CHMAP_FLC, /* front left center */
+ VIRTIO_SND_CHMAP_FRC, /* front right center */
+ VIRTIO_SND_CHMAP_RLC, /* rear left center */
+ VIRTIO_SND_CHMAP_RRC, /* rear right center */
+ VIRTIO_SND_CHMAP_FLW, /* front left wide */
+ VIRTIO_SND_CHMAP_FRW, /* front right wide */
+ VIRTIO_SND_CHMAP_FLH, /* front left high */
+ VIRTIO_SND_CHMAP_FCH, /* front center high */
+ VIRTIO_SND_CHMAP_FRH, /* front right high */
+ VIRTIO_SND_CHMAP_TC, /* top center */
+ VIRTIO_SND_CHMAP_TFL, /* top front left */
+ VIRTIO_SND_CHMAP_TFR, /* top front right */
+ VIRTIO_SND_CHMAP_TFC, /* top front center */
+ VIRTIO_SND_CHMAP_TRL, /* top rear left */
+ VIRTIO_SND_CHMAP_TRR, /* top rear right */
+ VIRTIO_SND_CHMAP_TRC, /* top rear center */
+ VIRTIO_SND_CHMAP_TFLC, /* top front left center */
+ VIRTIO_SND_CHMAP_TFRC, /* top front right center */
+ VIRTIO_SND_CHMAP_TSL, /* top side left */
+ VIRTIO_SND_CHMAP_TSR, /* top side right */
+ VIRTIO_SND_CHMAP_LLFE, /* left LFE */
+ VIRTIO_SND_CHMAP_RLFE, /* right LFE */
+ VIRTIO_SND_CHMAP_BC, /* bottom center */
+ VIRTIO_SND_CHMAP_BLC, /* bottom left center */
+ VIRTIO_SND_CHMAP_BRC /* bottom right center */
+};
+
+/* maximum possible number of channels */
+#define VIRTIO_SND_CHMAP_MAX_SIZE 18
+
+struct virtio_snd_chmap_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ uint8_t direction;
+ /* # of valid channel position values */
+ uint8_t channels;
+ /* channel position values (VIRTIO_SND_CHMAP_XXX) */
+ uint8_t positions[VIRTIO_SND_CHMAP_MAX_SIZE];
+};
+
+/*******************************************************************************
+ * CONTROL ELEMENTS MESSAGES
+ */
+struct virtio_snd_ctl_hdr {
+ /* VIRTIO_SND_R_CTL_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::controls - 1 */
+ uint32_t control_id;
+};
+
+/* supported roles for control elements */
+enum {
+ VIRTIO_SND_CTL_ROLE_UNDEFINED = 0,
+ VIRTIO_SND_CTL_ROLE_VOLUME,
+ VIRTIO_SND_CTL_ROLE_MUTE,
+ VIRTIO_SND_CTL_ROLE_GAIN
+};
+
+/* supported value types for control elements */
+enum {
+ VIRTIO_SND_CTL_TYPE_BOOLEAN = 0,
+ VIRTIO_SND_CTL_TYPE_INTEGER,
+ VIRTIO_SND_CTL_TYPE_INTEGER64,
+ VIRTIO_SND_CTL_TYPE_ENUMERATED,
+ VIRTIO_SND_CTL_TYPE_BYTES,
+ VIRTIO_SND_CTL_TYPE_IEC958
+};
+
+/* supported access rights for control elements */
+enum {
+ VIRTIO_SND_CTL_ACCESS_READ = 0,
+ VIRTIO_SND_CTL_ACCESS_WRITE,
+ VIRTIO_SND_CTL_ACCESS_VOLATILE,
+ VIRTIO_SND_CTL_ACCESS_INACTIVE,
+ VIRTIO_SND_CTL_ACCESS_TLV_READ,
+ VIRTIO_SND_CTL_ACCESS_TLV_WRITE,
+ VIRTIO_SND_CTL_ACCESS_TLV_COMMAND
+};
+
+struct virtio_snd_ctl_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* element role (VIRTIO_SND_CTL_ROLE_XXX) */
+ uint32_t role;
+ /* element value type (VIRTIO_SND_CTL_TYPE_XXX) */
+ uint32_t type;
+ /* element access right bit map (1 << VIRTIO_SND_CTL_ACCESS_XXX) */
+ uint32_t access;
+ /* # of members in the element value */
+ uint32_t count;
+ /* index for an element with a non-unique name */
+ uint32_t index;
+ /* name identifier string for the element */
+ uint8_t name[44];
+ /* additional information about the element's value */
+ union {
+ /* VIRTIO_SND_CTL_TYPE_INTEGER */
+ struct {
+ /* minimum supported value */
+ uint32_t min;
+ /* maximum supported value */
+ uint32_t max;
+ /* fixed step size for value (0 = variable size) */
+ uint32_t step;
+ } integer;
+ /* VIRTIO_SND_CTL_TYPE_INTEGER64 */
+ struct {
+ /* minimum supported value */
+ uint64_t min;
+ /* maximum supported value */
+ uint64_t max;
+ /* fixed step size for value (0 = variable size) */
+ uint64_t step;
+ } integer64;
+ /* VIRTIO_SND_CTL_TYPE_ENUMERATED */
+ struct {
+ /* # of options supported for value */
+ uint32_t items;
+ } enumerated;
+ } value;
+};
+
+struct virtio_snd_ctl_enum_item {
+ /* option name */
+ uint8_t item[64];
+};
+
+struct virtio_snd_ctl_iec958 {
+ /* AES/IEC958 channel status bits */
+ uint8_t status[24];
+ /* AES/IEC958 subcode bits */
+ uint8_t subcode[147];
+ /* nothing */
+ uint8_t pad;
+ /* AES/IEC958 subframe bits */
+ uint8_t dig_subframe[4];
+};
+
+struct virtio_snd_ctl_value {
+ union {
+ /* VIRTIO_SND_CTL_TYPE_BOOLEAN|INTEGER value */
+ uint32_t integer[128];
+ /* VIRTIO_SND_CTL_TYPE_INTEGER64 value */
+ uint64_t integer64[64];
+ /* VIRTIO_SND_CTL_TYPE_ENUMERATED value (option indexes) */
+ uint32_t enumerated[128];
+ /* VIRTIO_SND_CTL_TYPE_BYTES value */
+ uint8_t bytes[512];
+ /* VIRTIO_SND_CTL_TYPE_IEC958 value */
+ struct virtio_snd_ctl_iec958 iec958;
+ } value;
+};
+
+/* supported event reason types */
+enum {
+ /* element's value has changed */
+ VIRTIO_SND_CTL_EVT_MASK_VALUE = 0,
+ /* element's information has changed */
+ VIRTIO_SND_CTL_EVT_MASK_INFO,
+ /* element's metadata has changed */
+ VIRTIO_SND_CTL_EVT_MASK_TLV
+};
+
+struct virtio_snd_ctl_event {
+ /* VIRTIO_SND_EVT_CTL_NOTIFY */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::controls - 1 */
+ uint16_t control_id;
+ /* event reason bit map (1 << VIRTIO_SND_CTL_EVT_MASK_XXX) */
+ uint16_t mask;
+};
+
+#endif /* VIRTIO_SND_IF_H */
diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
index be443211ce..467e751b17 100644
--- a/include/standard-headers/linux/virtio_vsock.h
+++ b/include/standard-headers/linux/virtio_vsock.h
@@ -38,6 +38,9 @@
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_config.h"
+/* The feature bitmap for virtio vsock */
+#define VIRTIO_VSOCK_F_SEQPACKET 1 /* SOCK_SEQPACKET supported */
+
struct virtio_vsock_config {
uint64_t guest_cid;
} QEMU_PACKED;
@@ -65,6 +68,7 @@ struct virtio_vsock_hdr {
enum virtio_vsock_type {
VIRTIO_VSOCK_TYPE_STREAM = 1,
+ VIRTIO_VSOCK_TYPE_SEQPACKET = 2,
};
enum virtio_vsock_op {
@@ -91,4 +95,10 @@ enum virtio_vsock_shutdown {
VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
};
+/* VIRTIO_VSOCK_OP_RW flags values */
+enum virtio_vsock_rw {
+ VIRTIO_VSOCK_SEQ_EOM = 1,
+ VIRTIO_VSOCK_SEQ_EOR = 2,
+};
+
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/standard-headers/rdma/vmw_pvrdma-abi.h b/include/standard-headers/rdma/vmw_pvrdma-abi.h
deleted file mode 100644
index 6c2bc46116..0000000000
--- a/include/standard-headers/rdma/vmw_pvrdma-abi.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
-/*
- * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of EITHER the GNU General Public License
- * version 2 as published by the Free Software Foundation or the BSD
- * 2-Clause License. This program is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License version 2 for more details at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program available in the file COPYING in the main
- * directory of this source tree.
- *
- * The BSD 2-Clause License
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __VMW_PVRDMA_ABI_H__
-#define __VMW_PVRDMA_ABI_H__
-
-#include "standard-headers/linux/types.h"
-
-#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
-#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
-#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
-#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
-#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
-#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
-#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
-#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
-#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
-#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
-#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
-
-enum pvrdma_wr_opcode {
- PVRDMA_WR_RDMA_WRITE,
- PVRDMA_WR_RDMA_WRITE_WITH_IMM,
- PVRDMA_WR_SEND,
- PVRDMA_WR_SEND_WITH_IMM,
- PVRDMA_WR_RDMA_READ,
- PVRDMA_WR_ATOMIC_CMP_AND_SWP,
- PVRDMA_WR_ATOMIC_FETCH_AND_ADD,
- PVRDMA_WR_LSO,
- PVRDMA_WR_SEND_WITH_INV,
- PVRDMA_WR_RDMA_READ_WITH_INV,
- PVRDMA_WR_LOCAL_INV,
- PVRDMA_WR_FAST_REG_MR,
- PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP,
- PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
- PVRDMA_WR_BIND_MW,
- PVRDMA_WR_REG_SIG_MR,
-};
-
-enum pvrdma_wc_status {
- PVRDMA_WC_SUCCESS,
- PVRDMA_WC_LOC_LEN_ERR,
- PVRDMA_WC_LOC_QP_OP_ERR,
- PVRDMA_WC_LOC_EEC_OP_ERR,
- PVRDMA_WC_LOC_PROT_ERR,
- PVRDMA_WC_WR_FLUSH_ERR,
- PVRDMA_WC_MW_BIND_ERR,
- PVRDMA_WC_BAD_RESP_ERR,
- PVRDMA_WC_LOC_ACCESS_ERR,
- PVRDMA_WC_REM_INV_REQ_ERR,
- PVRDMA_WC_REM_ACCESS_ERR,
- PVRDMA_WC_REM_OP_ERR,
- PVRDMA_WC_RETRY_EXC_ERR,
- PVRDMA_WC_RNR_RETRY_EXC_ERR,
- PVRDMA_WC_LOC_RDD_VIOL_ERR,
- PVRDMA_WC_REM_INV_RD_REQ_ERR,
- PVRDMA_WC_REM_ABORT_ERR,
- PVRDMA_WC_INV_EECN_ERR,
- PVRDMA_WC_INV_EEC_STATE_ERR,
- PVRDMA_WC_FATAL_ERR,
- PVRDMA_WC_RESP_TIMEOUT_ERR,
- PVRDMA_WC_GENERAL_ERR,
-};
-
-enum pvrdma_wc_opcode {
- PVRDMA_WC_SEND,
- PVRDMA_WC_RDMA_WRITE,
- PVRDMA_WC_RDMA_READ,
- PVRDMA_WC_COMP_SWAP,
- PVRDMA_WC_FETCH_ADD,
- PVRDMA_WC_BIND_MW,
- PVRDMA_WC_LSO,
- PVRDMA_WC_LOCAL_INV,
- PVRDMA_WC_FAST_REG_MR,
- PVRDMA_WC_MASKED_COMP_SWAP,
- PVRDMA_WC_MASKED_FETCH_ADD,
- PVRDMA_WC_RECV = 1 << 7,
- PVRDMA_WC_RECV_RDMA_WITH_IMM,
-};
-
-enum pvrdma_wc_flags {
- PVRDMA_WC_GRH = 1 << 0,
- PVRDMA_WC_WITH_IMM = 1 << 1,
- PVRDMA_WC_WITH_INVALIDATE = 1 << 2,
- PVRDMA_WC_IP_CSUM_OK = 1 << 3,
- PVRDMA_WC_WITH_SMAC = 1 << 4,
- PVRDMA_WC_WITH_VLAN = 1 << 5,
- PVRDMA_WC_WITH_NETWORK_HDR_TYPE = 1 << 6,
- PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
-};
-
-struct pvrdma_alloc_ucontext_resp {
- uint32_t qp_tab_size;
- uint32_t reserved;
-};
-
-struct pvrdma_alloc_pd_resp {
- uint32_t pdn;
- uint32_t reserved;
-};
-
-struct pvrdma_create_cq {
- uint64_t __attribute__((aligned(8))) buf_addr;
- uint32_t buf_size;
- uint32_t reserved;
-};
-
-struct pvrdma_create_cq_resp {
- uint32_t cqn;
- uint32_t reserved;
-};
-
-struct pvrdma_resize_cq {
- uint64_t __attribute__((aligned(8))) buf_addr;
- uint32_t buf_size;
- uint32_t reserved;
-};
-
-struct pvrdma_create_srq {
- uint64_t __attribute__((aligned(8))) buf_addr;
- uint32_t buf_size;
- uint32_t reserved;
-};
-
-struct pvrdma_create_srq_resp {
- uint32_t srqn;
- uint32_t reserved;
-};
-
-struct pvrdma_create_qp {
- uint64_t __attribute__((aligned(8))) rbuf_addr;
- uint64_t __attribute__((aligned(8))) sbuf_addr;
- uint32_t rbuf_size;
- uint32_t sbuf_size;
- uint64_t __attribute__((aligned(8))) qp_addr;
-};
-
-/* PVRDMA masked atomic compare and swap */
-struct pvrdma_ex_cmp_swap {
- uint64_t __attribute__((aligned(8))) swap_val;
- uint64_t __attribute__((aligned(8))) compare_val;
- uint64_t __attribute__((aligned(8))) swap_mask;
- uint64_t __attribute__((aligned(8))) compare_mask;
-};
-
-/* PVRDMA masked atomic fetch and add */
-struct pvrdma_ex_fetch_add {
- uint64_t __attribute__((aligned(8))) add_val;
- uint64_t __attribute__((aligned(8))) field_boundary;
-};
-
-/* PVRDMA address vector. */
-struct pvrdma_av {
- uint32_t port_pd;
- uint32_t sl_tclass_flowlabel;
- uint8_t dgid[16];
- uint8_t src_path_bits;
- uint8_t gid_index;
- uint8_t stat_rate;
- uint8_t hop_limit;
- uint8_t dmac[6];
- uint8_t reserved[6];
-};
-
-/* PVRDMA scatter/gather entry */
-struct pvrdma_sge {
- uint64_t __attribute__((aligned(8))) addr;
- uint32_t length;
- uint32_t lkey;
-};
-
-/* PVRDMA receive queue work request */
-struct pvrdma_rq_wqe_hdr {
- uint64_t __attribute__((aligned(8))) wr_id; /* wr id */
- uint32_t num_sge; /* size of s/g array */
- uint32_t total_len; /* reserved */
-};
-/* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */
-
-/* PVRDMA send queue work request */
-struct pvrdma_sq_wqe_hdr {
- uint64_t __attribute__((aligned(8))) wr_id; /* wr id */
- uint32_t num_sge; /* size of s/g array */
- uint32_t total_len; /* reserved */
- uint32_t opcode; /* operation type */
- uint32_t send_flags; /* wr flags */
- union {
- uint32_t imm_data;
- uint32_t invalidate_rkey;
- } ex;
- uint32_t reserved;
- union {
- struct {
- uint64_t __attribute__((aligned(8))) remote_addr;
- uint32_t rkey;
- uint8_t reserved[4];
- } rdma;
- struct {
- uint64_t __attribute__((aligned(8))) remote_addr;
- uint64_t __attribute__((aligned(8))) compare_add;
- uint64_t __attribute__((aligned(8))) swap;
- uint32_t rkey;
- uint32_t reserved;
- } atomic;
- struct {
- uint64_t __attribute__((aligned(8))) remote_addr;
- uint32_t log_arg_sz;
- uint32_t rkey;
- union {
- struct pvrdma_ex_cmp_swap cmp_swap;
- struct pvrdma_ex_fetch_add fetch_add;
- } wr_data;
- } masked_atomics;
- struct {
- uint64_t __attribute__((aligned(8))) iova_start;
- uint64_t __attribute__((aligned(8))) pl_pdir_dma;
- uint32_t page_shift;
- uint32_t page_list_len;
- uint32_t length;
- uint32_t access_flags;
- uint32_t rkey;
- uint32_t reserved;
- } fast_reg;
- struct {
- uint32_t remote_qpn;
- uint32_t remote_qkey;
- struct pvrdma_av av;
- } ud;
- } wr;
-};
-/* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */
-
-/* Completion queue element. */
-struct pvrdma_cqe {
- uint64_t __attribute__((aligned(8))) wr_id;
- uint64_t __attribute__((aligned(8))) qp;
- uint32_t opcode;
- uint32_t status;
- uint32_t byte_len;
- uint32_t imm_data;
- uint32_t src_qp;
- uint32_t wc_flags;
- uint32_t vendor_err;
- uint16_t pkey_index;
- uint16_t slid;
- uint8_t sl;
- uint8_t dlid_path_bits;
- uint8_t port_num;
- uint8_t smac[6];
- uint8_t network_hdr_type;
- uint8_t reserved2[6]; /* Pad to next power of 2 (64). */
-};
-
-#endif /* __VMW_PVRDMA_ABI_H__ */
diff --git a/include/sysemu/accel-blocker.h b/include/sysemu/accel-blocker.h
new file mode 100644
index 0000000000..f07f368358
--- /dev/null
+++ b/include/sysemu/accel-blocker.h
@@ -0,0 +1,55 @@
+/*
+ * Accelerator blocking API, to prevent new ioctls from starting and wait the
+ * running ones finish.
+ * This mechanism differs from pause/resume_all_vcpus() in that it does not
+ * release the BQL.
+ *
+ * Copyright (c) 2022 Red Hat Inc.
+ *
+ * Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef ACCEL_BLOCKER_H
+#define ACCEL_BLOCKER_H
+
+#include "sysemu/cpus.h"
+
+void accel_blocker_init(void);
+
+/*
+ * accel_{cpu_}ioctl_begin/end:
+ * Mark when ioctl is about to run or just finished.
+ *
+ * accel_{cpu_}ioctl_begin will block after accel_ioctl_inhibit_begin() is
+ * called, preventing new ioctls to run. They will continue only after
+ * accel_ioctl_inibith_end().
+ */
+void accel_ioctl_begin(void);
+void accel_ioctl_end(void);
+void accel_cpu_ioctl_begin(CPUState *cpu);
+void accel_cpu_ioctl_end(CPUState *cpu);
+
+/*
+ * accel_ioctl_inhibit_begin: start critical section
+ *
+ * This function makes sure that:
+ * 1) incoming accel_{cpu_}ioctl_begin() calls block
+ * 2) wait that all ioctls that were already running reach
+ * accel_{cpu_}ioctl_end(), kicking vcpus if necessary.
+ *
+ * This allows the caller to access shared data or perform operations without
+ * worrying of concurrent vcpus accesses.
+ */
+void accel_ioctl_inhibit_begin(void);
+
+/*
+ * accel_ioctl_inhibit_end: end critical section started by
+ * accel_ioctl_inhibit_begin()
+ *
+ * This function allows blocked accel_{cpu_}ioctl_begin() to continue.
+ */
+void accel_ioctl_inhibit_end(void);
+
+#endif /* ACCEL_BLOCKER_H */
diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h
new file mode 100644
index 0000000000..ef91fc28bb
--- /dev/null
+++ b/include/sysemu/accel-ops.h
@@ -0,0 +1,58 @@
+/*
+ * Accelerator OPS, used for cpus.c module
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ACCEL_OPS_H
+#define ACCEL_OPS_H
+
+#include "exec/cpu-common.h"
+#include "qom/object.h"
+
+#define ACCEL_OPS_SUFFIX "-ops"
+#define TYPE_ACCEL_OPS "accel" ACCEL_OPS_SUFFIX
+#define ACCEL_OPS_NAME(name) (name "-" TYPE_ACCEL_OPS)
+
+typedef struct AccelOpsClass AccelOpsClass;
+DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS)
+
+/* cpus.c operations interface */
+struct AccelOpsClass {
+ /*< private >*/
+ ObjectClass parent_class;
+ /*< public >*/
+
+ /* initialization function called when accel is chosen */
+ void (*ops_init)(AccelOpsClass *ops);
+
+ bool (*cpus_are_resettable)(void);
+ void (*cpu_reset_hold)(CPUState *cpu);
+
+ void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */
+ void (*kick_vcpu_thread)(CPUState *cpu);
+ bool (*cpu_thread_is_idle)(CPUState *cpu);
+
+ void (*synchronize_post_reset)(CPUState *cpu);
+ void (*synchronize_post_init)(CPUState *cpu);
+ void (*synchronize_state)(CPUState *cpu);
+ void (*synchronize_pre_loadvm)(CPUState *cpu);
+ void (*synchronize_pre_resume)(bool step_pending);
+
+ void (*handle_interrupt)(CPUState *cpu, int mask);
+
+ int64_t (*get_virtual_clock)(void);
+ int64_t (*get_elapsed_ticks)(void);
+
+ /* gdbstub hooks */
+ bool (*supports_guest_debug)(void);
+ int (*update_guest_debug)(CPUState *cpu);
+ int (*insert_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
+ int (*remove_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
+ void (*remove_all_breakpoints)(CPUState *cpu);
+};
+
+#endif /* ACCEL_OPS_H */
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
index 32abdfe6a1..8d041aa84e 100644
--- a/include/sysemu/arch_init.h
+++ b/include/sysemu/arch_init.h
@@ -1,7 +1,6 @@
#ifndef QEMU_ARCH_INIT_H
#define QEMU_ARCH_INIT_H
-#include "qapi/qapi-types-misc.h"
enum {
QEMU_ARCH_ALL = -1,
@@ -10,7 +9,6 @@ enum {
QEMU_ARCH_CRIS = (1 << 2),
QEMU_ARCH_I386 = (1 << 3),
QEMU_ARCH_M68K = (1 << 4),
- QEMU_ARCH_LM32 = (1 << 5),
QEMU_ARCH_MICROBLAZE = (1 << 6),
QEMU_ARCH_MIPS = (1 << 7),
QEMU_ARCH_PPC = (1 << 8),
@@ -19,28 +17,17 @@ enum {
QEMU_ARCH_SPARC = (1 << 11),
QEMU_ARCH_XTENSA = (1 << 12),
QEMU_ARCH_OPENRISC = (1 << 13),
- QEMU_ARCH_UNICORE32 = (1 << 14),
- QEMU_ARCH_MOXIE = (1 << 15),
QEMU_ARCH_TRICORE = (1 << 16),
- QEMU_ARCH_NIOS2 = (1 << 17),
QEMU_ARCH_HPPA = (1 << 18),
QEMU_ARCH_RISCV = (1 << 19),
+ QEMU_ARCH_RX = (1 << 20),
+ QEMU_ARCH_AVR = (1 << 21),
+ QEMU_ARCH_HEXAGON = (1 << 22),
+ QEMU_ARCH_LOONGARCH = (1 << 23),
};
extern const uint32_t arch_type;
-int kvm_available(void);
-int xen_available(void);
-
-CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp);
-CpuModelExpansionInfo *arch_query_cpu_model_expansion(CpuModelExpansionType type,
- CpuModelInfo *mode,
- Error **errp);
-CpuModelCompareInfo *arch_query_cpu_model_comparison(CpuModelInfo *modela,
- CpuModelInfo *modelb,
- Error **errp);
-CpuModelBaselineInfo *arch_query_cpu_model_baseline(CpuModelInfo *modela,
- CpuModelInfo *modelb,
- Error **errp);
+void qemu_init_arch_modules(void);
#endif
diff --git a/include/sysemu/balloon.h b/include/sysemu/balloon.h
index c8f6145257..867687b73a 100644
--- a/include/sysemu/balloon.h
+++ b/include/sysemu/balloon.h
@@ -14,7 +14,8 @@
#ifndef QEMU_BALLOON_H
#define QEMU_BALLOON_H
-#include "qapi/qapi-types-misc.h"
+#include "exec/cpu-common.h"
+#include "qapi/qapi-types-machine.h"
typedef void (QEMUBalloonEvent)(void *opaque, ram_addr_t target);
typedef void (QEMUBalloonStatus)(void *opaque, BalloonInfo *info);
@@ -22,7 +23,5 @@ typedef void (QEMUBalloonStatus)(void *opaque, BalloonInfo *info);
int qemu_add_balloon_handler(QEMUBalloonEvent *event_func,
QEMUBalloonStatus *stat_func, void *opaque);
void qemu_remove_balloon_handler(void *opaque);
-bool qemu_balloon_is_inhibited(void);
-void qemu_balloon_inhibit(bool state);
#endif
diff --git a/include/sysemu/block-backend-common.h b/include/sysemu/block-backend-common.h
new file mode 100644
index 0000000000..780cea7305
--- /dev/null
+++ b/include/sysemu/block-backend-common.h
@@ -0,0 +1,103 @@
+/*
+ * QEMU Block backends
+ *
+ * Copyright (C) 2014-2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BLOCK_BACKEND_COMMON_H
+#define BLOCK_BACKEND_COMMON_H
+
+#include "qemu/iov.h"
+#include "block/throttle-groups.h"
+
+/*
+ * TODO Have to include block/block.h for a bunch of block layer
+ * types. Unfortunately, this pulls in the whole BlockDriverState
+ * API, which we don't want used by many BlockBackend users. Some of
+ * the types belong here, and the rest should be split into a common
+ * header and one for the BlockDriverState API.
+ */
+#include "block/block.h"
+
+/* Callbacks for block device models */
+typedef struct BlockDevOps {
+
+ /*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+ /*
+ * Runs when virtual media changed (monitor commands eject, change)
+ * Argument load is true on load and false on eject.
+ * Beware: doesn't run when a host device's physical media
+ * changes. Sure would be useful if it did.
+ * Device models with removable media must implement this callback.
+ */
+ void (*change_media_cb)(void *opaque, bool load, Error **errp);
+ /*
+ * Runs when an eject request is issued from the monitor, the tray
+ * is closed, and the medium is locked.
+ * Device models that do not implement is_medium_locked will not need
+ * this callback. Device models that can lock the medium or tray might
+ * want to implement the callback and unlock the tray when "force" is
+ * true, even if they do not support eject requests.
+ */
+ void (*eject_request_cb)(void *opaque, bool force);
+
+ /*
+ * Is the virtual medium locked into the device?
+ * Device models implement this only when device has such a lock.
+ */
+ bool (*is_medium_locked)(void *opaque);
+
+ /*
+ * Runs when the backend receives a drain request.
+ */
+ void (*drained_begin)(void *opaque);
+ /*
+ * Runs when the backend's last drain request ends.
+ */
+ void (*drained_end)(void *opaque);
+ /*
+ * Is the device still busy?
+ */
+ bool (*drained_poll)(void *opaque);
+
+ /*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+ /*
+ * Is the virtual tray open?
+ * Device models implement this only when the device has a tray.
+ */
+ bool (*is_tray_open)(void *opaque);
+
+ /*
+ * Runs when the size changed (e.g. monitor command block_resize)
+ */
+ void (*resize_cb)(void *opaque);
+} BlockDevOps;
+
+/*
+ * This struct is embedded in (the private) BlockBackend struct and contains
+ * fields that must be public. This is in particular for QLIST_ENTRY() and
+ * friends so that BlockBackends can be kept in lists outside block-backend.c
+ */
+typedef struct BlockBackendPublic {
+ ThrottleGroupMember throttle_group_member;
+} BlockBackendPublic;
+
+#endif /* BLOCK_BACKEND_COMMON_H */
diff --git a/include/sysemu/block-backend-global-state.h b/include/sysemu/block-backend-global-state.h
new file mode 100644
index 0000000000..49c12b0fa9
--- /dev/null
+++ b/include/sysemu/block-backend-global-state.h
@@ -0,0 +1,133 @@
+/*
+ * QEMU Block backends
+ *
+ * Copyright (C) 2014-2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BLOCK_BACKEND_GLOBAL_STATE_H
+#define BLOCK_BACKEND_GLOBAL_STATE_H
+
+#include "block-backend-common.h"
+
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm);
+
+BlockBackend * no_coroutine_fn
+blk_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm,
+ Error **errp);
+
+BlockBackend * coroutine_fn no_co_wrapper
+blk_co_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm,
+ Error **errp);
+
+BlockBackend * no_coroutine_fn
+blk_new_open(const char *filename, const char *reference, QDict *options,
+ int flags, Error **errp);
+
+BlockBackend * coroutine_fn no_co_wrapper
+blk_co_new_open(const char *filename, const char *reference, QDict *options,
+ int flags, Error **errp);
+
+int blk_get_refcnt(BlockBackend *blk);
+void blk_ref(BlockBackend *blk);
+
+void no_coroutine_fn blk_unref(BlockBackend *blk);
+void coroutine_fn no_co_wrapper blk_co_unref(BlockBackend *blk);
+
+void blk_remove_all_bs(void);
+BlockBackend *blk_by_name(const char *name);
+BlockBackend *blk_next(BlockBackend *blk);
+BlockBackend *blk_all_next(BlockBackend *blk);
+bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
+void monitor_remove_blk(BlockBackend *blk);
+
+BlockBackendPublic *blk_get_public(BlockBackend *blk);
+BlockBackend *blk_by_public(BlockBackendPublic *public);
+
+void blk_remove_bs(BlockBackend *blk);
+int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
+int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp);
+bool GRAPH_RDLOCK bdrv_has_blk(BlockDriverState *bs);
+bool GRAPH_RDLOCK bdrv_is_root_node(BlockDriverState *bs);
+int GRAPH_UNLOCKED blk_set_perm(BlockBackend *blk, uint64_t perm,
+ uint64_t shared_perm, Error **errp);
+void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
+
+void blk_iostatus_enable(BlockBackend *blk);
+BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
+void blk_iostatus_disable(BlockBackend *blk);
+void blk_iostatus_reset(BlockBackend *blk);
+int blk_attach_dev(BlockBackend *blk, DeviceState *dev);
+void blk_detach_dev(BlockBackend *blk, DeviceState *dev);
+DeviceState *blk_get_attached_dev(BlockBackend *blk);
+BlockBackend *blk_by_dev(void *dev);
+BlockBackend *blk_by_qdev_id(const char *id, Error **errp);
+void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
+
+void blk_activate(BlockBackend *blk, Error **errp);
+
+int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
+void blk_aio_cancel(BlockAIOCB *acb);
+int blk_commit_all(void);
+bool blk_in_drain(BlockBackend *blk);
+void blk_drain(BlockBackend *blk);
+void blk_drain_all(void);
+void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
+ BlockdevOnError on_write_error);
+bool blk_supports_write_perm(BlockBackend *blk);
+bool blk_is_sg(BlockBackend *blk);
+void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
+int blk_get_flags(BlockBackend *blk);
+bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
+void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
+void blk_op_block_all(BlockBackend *blk, Error *reason);
+void blk_op_unblock_all(BlockBackend *blk, Error *reason);
+int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ Error **errp);
+void blk_add_aio_context_notifier(BlockBackend *blk,
+ void (*attached_aio_context)(AioContext *new_context, void *opaque),
+ void (*detach_aio_context)(void *opaque), void *opaque);
+void blk_remove_aio_context_notifier(BlockBackend *blk,
+ void (*attached_aio_context)(AioContext *,
+ void *),
+ void (*detach_aio_context)(void *),
+ void *opaque);
+void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify);
+void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify);
+BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
+void blk_update_root_state(BlockBackend *blk);
+bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk);
+int blk_get_open_flags_from_root_state(BlockBackend *blk);
+
+int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
+ int64_t pos, int size);
+int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
+int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz);
+int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo);
+
+void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg);
+void blk_io_limits_disable(BlockBackend *blk);
+void blk_io_limits_enable(BlockBackend *blk, const char *group);
+void blk_io_limits_update_group(BlockBackend *blk, const char *group);
+void blk_set_force_allow_inactivate(BlockBackend *blk);
+
+bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp);
+void blk_unregister_buf(BlockBackend *blk, void *host, size_t size);
+
+const BdrvChild *blk_root(BlockBackend *blk);
+
+int blk_make_empty(BlockBackend *blk, Error **errp);
+
+#endif /* BLOCK_BACKEND_GLOBAL_STATE_H */
diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h
new file mode 100644
index 0000000000..d174275a5c
--- /dev/null
+++ b/include/sysemu/block-backend-io.h
@@ -0,0 +1,230 @@
+/*
+ * QEMU Block backends
+ *
+ * Copyright (C) 2014-2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BLOCK_BACKEND_IO_H
+#define BLOCK_BACKEND_IO_H
+
+#include "block-backend-common.h"
+#include "block/accounting.h"
+
+/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+const char *blk_name(const BlockBackend *blk);
+
+BlockDriverState *blk_bs(BlockBackend *blk);
+
+void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
+void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
+void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
+bool blk_iostatus_is_enabled(const BlockBackend *blk);
+
+char *blk_get_attached_dev_id(BlockBackend *blk);
+
+BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+
+BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_flush(BlockBackend *blk,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes,
+ BlockCompletionFunc *cb, void *opaque);
+void blk_aio_cancel_async(BlockAIOCB *acb);
+BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
+ BlockCompletionFunc *cb, void *opaque);
+
+void blk_inc_in_flight(BlockBackend *blk);
+void blk_dec_in_flight(BlockBackend *blk);
+
+bool coroutine_fn GRAPH_RDLOCK blk_co_is_inserted(BlockBackend *blk);
+bool co_wrapper_mixed_bdrv_rdlock blk_is_inserted(BlockBackend *blk);
+
+bool coroutine_fn GRAPH_RDLOCK blk_co_is_available(BlockBackend *blk);
+bool co_wrapper_mixed_bdrv_rdlock blk_is_available(BlockBackend *blk);
+
+void coroutine_fn blk_co_lock_medium(BlockBackend *blk, bool locked);
+void co_wrapper blk_lock_medium(BlockBackend *blk, bool locked);
+
+void coroutine_fn blk_co_eject(BlockBackend *blk, bool eject_flag);
+void co_wrapper blk_eject(BlockBackend *blk, bool eject_flag);
+
+int64_t coroutine_fn blk_co_getlength(BlockBackend *blk);
+int64_t co_wrapper_mixed blk_getlength(BlockBackend *blk);
+
+void coroutine_fn blk_co_get_geometry(BlockBackend *blk,
+ uint64_t *nb_sectors_ptr);
+void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
+
+int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk);
+int64_t blk_nb_sectors(BlockBackend *blk);
+
+void *blk_try_blockalign(BlockBackend *blk, size_t size);
+void *blk_blockalign(BlockBackend *blk, size_t size);
+bool blk_is_writable(BlockBackend *blk);
+bool blk_enable_write_cache(BlockBackend *blk);
+BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
+BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
+ int error);
+void blk_error_action(BlockBackend *blk, BlockErrorAction action,
+ bool is_read, int error);
+void blk_iostatus_set_err(BlockBackend *blk, int error);
+int blk_get_max_iov(BlockBackend *blk);
+int blk_get_max_hw_iov(BlockBackend *blk);
+
+AioContext *blk_get_aio_context(BlockBackend *blk);
+BlockAcctStats *blk_get_stats(BlockBackend *blk);
+void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
+ BlockCompletionFunc *cb,
+ void *opaque, int ret);
+
+uint32_t blk_get_request_alignment(BlockBackend *blk);
+uint32_t blk_get_max_transfer(BlockBackend *blk);
+uint64_t blk_get_max_hw_transfer(BlockBackend *blk);
+
+int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
+ BlockBackend *blk_out, int64_t off_out,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+
+int coroutine_fn blk_co_block_status_above(BlockBackend *blk,
+ BlockDriverState *base,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map,
+ BlockDriverState **file);
+int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk,
+ BlockDriverState *base,
+ bool include_base, int64_t offset,
+ int64_t bytes, int64_t *pnum);
+
+/*
+ * "I/O or GS" API functions. These functions can run without
+ * the BQL, but only in one specific iothread/main loop.
+ *
+ * See include/block/block-io.h for more information about
+ * the "I/O or GS" API.
+ */
+
+int co_wrapper_mixed blk_pread(BlockBackend *blk, int64_t offset,
+ int64_t bytes, void *buf,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes,
+ void *buf, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_preadv(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_preadv_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwrite(BlockBackend *blk, int64_t offset,
+ int64_t bytes, const void *buf,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwritev(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwritev_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwrite_compressed(BlockBackend *blk,
+ int64_t offset, int64_t bytes,
+ const void *buf);
+int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset,
+ int64_t bytes, const void *buf);
+
+int co_wrapper_mixed blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes, BdrvRequestFlags flags);
+
+int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+int co_wrapper_mixed blk_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len);
+int co_wrapper_mixed blk_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len);
+int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int co_wrapper_mixed blk_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pdiscard(BlockBackend *blk, int64_t offset,
+ int64_t bytes);
+int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
+ int64_t bytes);
+
+int co_wrapper_mixed blk_flush(BlockBackend *blk);
+int coroutine_fn blk_co_flush(BlockBackend *blk);
+
+int co_wrapper_mixed blk_ioctl(BlockBackend *blk, unsigned long int req,
+ void *buf);
+int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req,
+ void *buf);
+
+int co_wrapper_mixed blk_truncate(BlockBackend *blk, int64_t offset,
+ bool exact, PreallocMode prealloc,
+ BdrvRequestFlags flags, Error **errp);
+int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags,
+ Error **errp);
+
+#endif /* BLOCK_BACKEND_IO_H */
diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
index c96bcdee14..038be9fc40 100644
--- a/include/sysemu/block-backend.h
+++ b/include/sysemu/block-backend.h
@@ -13,228 +13,9 @@
#ifndef BLOCK_BACKEND_H
#define BLOCK_BACKEND_H
-#include "qemu/iov.h"
-#include "block/throttle-groups.h"
+#include "block-backend-global-state.h"
+#include "block-backend-io.h"
-/*
- * TODO Have to include block/block.h for a bunch of block layer
- * types. Unfortunately, this pulls in the whole BlockDriverState
- * API, which we don't want used by many BlockBackend users. Some of
- * the types belong here, and the rest should be split into a common
- * header and one for the BlockDriverState API.
- */
-#include "block/block.h"
-
-/* Callbacks for block device models */
-typedef struct BlockDevOps {
- /*
- * Runs when virtual media changed (monitor commands eject, change)
- * Argument load is true on load and false on eject.
- * Beware: doesn't run when a host device's physical media
- * changes. Sure would be useful if it did.
- * Device models with removable media must implement this callback.
- */
- void (*change_media_cb)(void *opaque, bool load, Error **errp);
- /*
- * Runs when an eject request is issued from the monitor, the tray
- * is closed, and the medium is locked.
- * Device models that do not implement is_medium_locked will not need
- * this callback. Device models that can lock the medium or tray might
- * want to implement the callback and unlock the tray when "force" is
- * true, even if they do not support eject requests.
- */
- void (*eject_request_cb)(void *opaque, bool force);
- /*
- * Is the virtual tray open?
- * Device models implement this only when the device has a tray.
- */
- bool (*is_tray_open)(void *opaque);
- /*
- * Is the virtual medium locked into the device?
- * Device models implement this only when device has such a lock.
- */
- bool (*is_medium_locked)(void *opaque);
- /*
- * Runs when the size changed (e.g. monitor command block_resize)
- */
- void (*resize_cb)(void *opaque);
- /*
- * Runs when the backend receives a drain request.
- */
- void (*drained_begin)(void *opaque);
- /*
- * Runs when the backend's last drain request ends.
- */
- void (*drained_end)(void *opaque);
-} BlockDevOps;
-
-/* This struct is embedded in (the private) BlockBackend struct and contains
- * fields that must be public. This is in particular for QLIST_ENTRY() and
- * friends so that BlockBackends can be kept in lists outside block-backend.c
- * */
-typedef struct BlockBackendPublic {
- ThrottleGroupMember throttle_group_member;
-} BlockBackendPublic;
-
-BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm);
-BlockBackend *blk_new_open(const char *filename, const char *reference,
- QDict *options, int flags, Error **errp);
-int blk_get_refcnt(BlockBackend *blk);
-void blk_ref(BlockBackend *blk);
-void blk_unref(BlockBackend *blk);
-void blk_remove_all_bs(void);
-const char *blk_name(const BlockBackend *blk);
-BlockBackend *blk_by_name(const char *name);
-BlockBackend *blk_next(BlockBackend *blk);
-BlockBackend *blk_all_next(BlockBackend *blk);
-bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
-void monitor_remove_blk(BlockBackend *blk);
-
-BlockBackendPublic *blk_get_public(BlockBackend *blk);
-BlockBackend *blk_by_public(BlockBackendPublic *public);
-
-BlockDriverState *blk_bs(BlockBackend *blk);
-void blk_remove_bs(BlockBackend *blk);
-int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
-bool bdrv_has_blk(BlockDriverState *bs);
-bool bdrv_is_root_node(BlockDriverState *bs);
-int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
- Error **errp);
-void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
-
-void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
-void blk_iostatus_enable(BlockBackend *blk);
-bool blk_iostatus_is_enabled(const BlockBackend *blk);
-BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
-void blk_iostatus_disable(BlockBackend *blk);
-void blk_iostatus_reset(BlockBackend *blk);
-void blk_iostatus_set_err(BlockBackend *blk, int error);
-int blk_attach_dev(BlockBackend *blk, DeviceState *dev);
-void blk_attach_dev_legacy(BlockBackend *blk, void *dev);
-void blk_detach_dev(BlockBackend *blk, void *dev);
-void *blk_get_attached_dev(BlockBackend *blk);
-char *blk_get_attached_dev_id(BlockBackend *blk);
-BlockBackend *blk_by_dev(void *dev);
-BlockBackend *blk_by_qdev_id(const char *id, Error **errp);
-void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
-int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
- int bytes);
-int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
- unsigned int bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
- unsigned int bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int bytes, BdrvRequestFlags flags);
-BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int bytes, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
-int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int bytes);
-int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int bytes,
- BdrvRequestFlags flags);
-int64_t blk_getlength(BlockBackend *blk);
-void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
-int64_t blk_nb_sectors(BlockBackend *blk);
-BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
- QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
- QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_flush(BlockBackend *blk,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int bytes,
- BlockCompletionFunc *cb, void *opaque);
-void blk_aio_cancel(BlockAIOCB *acb);
-void blk_aio_cancel_async(BlockAIOCB *acb);
-int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
-int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
-BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
- BlockCompletionFunc *cb, void *opaque);
-int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes);
-int blk_co_flush(BlockBackend *blk);
-int blk_flush(BlockBackend *blk);
-int blk_commit_all(void);
-void blk_drain(BlockBackend *blk);
-void blk_drain_all(void);
-void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
- BlockdevOnError on_write_error);
-BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
-BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
- int error);
-void blk_error_action(BlockBackend *blk, BlockErrorAction action,
- bool is_read, int error);
-bool blk_is_read_only(BlockBackend *blk);
-bool blk_is_sg(BlockBackend *blk);
-bool blk_enable_write_cache(BlockBackend *blk);
-void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
-void blk_invalidate_cache(BlockBackend *blk, Error **errp);
-bool blk_is_inserted(BlockBackend *blk);
-bool blk_is_available(BlockBackend *blk);
-void blk_lock_medium(BlockBackend *blk, bool locked);
-void blk_eject(BlockBackend *blk, bool eject_flag);
-int blk_get_flags(BlockBackend *blk);
-uint32_t blk_get_max_transfer(BlockBackend *blk);
-int blk_get_max_iov(BlockBackend *blk);
-void blk_set_guest_block_size(BlockBackend *blk, int align);
-void *blk_try_blockalign(BlockBackend *blk, size_t size);
-void *blk_blockalign(BlockBackend *blk, size_t size);
-bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
-void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
-void blk_op_block_all(BlockBackend *blk, Error *reason);
-void blk_op_unblock_all(BlockBackend *blk, Error *reason);
-AioContext *blk_get_aio_context(BlockBackend *blk);
-void blk_set_aio_context(BlockBackend *blk, AioContext *new_context);
-void blk_add_aio_context_notifier(BlockBackend *blk,
- void (*attached_aio_context)(AioContext *new_context, void *opaque),
- void (*detach_aio_context)(void *opaque), void *opaque);
-void blk_remove_aio_context_notifier(BlockBackend *blk,
- void (*attached_aio_context)(AioContext *,
- void *),
- void (*detach_aio_context)(void *),
- void *opaque);
-void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify);
-void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify);
-void blk_io_plug(BlockBackend *blk);
-void blk_io_unplug(BlockBackend *blk);
-BlockAcctStats *blk_get_stats(BlockBackend *blk);
-BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
-void blk_update_root_state(BlockBackend *blk);
-bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk);
-int blk_get_open_flags_from_root_state(BlockBackend *blk);
-
-void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
- BlockCompletionFunc *cb, void *opaque);
-int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int bytes, BdrvRequestFlags flags);
-int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
- int bytes);
-int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc,
- Error **errp);
-int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes);
-int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
- int64_t pos, int size);
-int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
-int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz);
-int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo);
-BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
- BlockCompletionFunc *cb,
- void *opaque, int ret);
-
-void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg);
-void blk_io_limits_disable(BlockBackend *blk);
-void blk_io_limits_enable(BlockBackend *blk, const char *group);
-void blk_io_limits_update_group(BlockBackend *blk, const char *group);
-void blk_set_force_allow_inactivate(BlockBackend *blk);
-
-void blk_register_buf(BlockBackend *blk, void *host, size_t size);
-void blk_unregister_buf(BlockBackend *blk, void *host);
-
-int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
- BlockBackend *blk_out, int64_t off_out,
- int bytes, BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
+/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */
#endif
diff --git a/include/sysemu/block-ram-registrar.h b/include/sysemu/block-ram-registrar.h
new file mode 100644
index 0000000000..d8b2f7942b
--- /dev/null
+++ b/include/sysemu/block-ram-registrar.h
@@ -0,0 +1,37 @@
+/*
+ * BlockBackend RAM Registrar
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef BLOCK_RAM_REGISTRAR_H
+#define BLOCK_RAM_REGISTRAR_H
+
+#include "exec/ramlist.h"
+
+/**
+ * struct BlockRAMRegistrar:
+ *
+ * Keeps RAMBlock memory registered with a BlockBackend using
+ * blk_register_buf() including hotplugged memory.
+ *
+ * Emulated devices or other BlockBackend users initialize a BlockRAMRegistrar
+ * with blk_ram_registrar_init() before submitting I/O requests with the
+ * BDRV_REQ_REGISTERED_BUF flag set.
+ */
+typedef struct {
+ BlockBackend *blk;
+ RAMBlockNotifier notifier;
+ bool ok;
+} BlockRAMRegistrar;
+
+void blk_ram_registrar_init(BlockRAMRegistrar *r, BlockBackend *blk);
+void blk_ram_registrar_destroy(BlockRAMRegistrar *r);
+
+/* Have all RAMBlocks been registered successfully? */
+static inline bool blk_ram_registrar_ok(BlockRAMRegistrar *r)
+{
+ return r->ok;
+}
+
+#endif /* BLOCK_RAM_REGISTRAR_H */
diff --git a/include/sysemu/blockdev.h b/include/sysemu/blockdev.h
index d34c4920dc..3211b16513 100644
--- a/include/sysemu/blockdev.h
+++ b/include/sysemu/blockdev.h
@@ -13,9 +13,6 @@
#include "block/block.h"
#include "qemu/queue.h"
-void blockdev_mark_auto_del(BlockBackend *blk);
-void blockdev_auto_del(BlockBackend *blk);
-
typedef enum {
IF_DEFAULT = -1, /* for use with drive_add() only */
/*
@@ -38,6 +35,16 @@ struct DriveInfo {
QTAILQ_ENTRY(DriveInfo) next;
};
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+void blockdev_mark_auto_del(BlockBackend *blk);
+void blockdev_auto_del(BlockBackend *blk);
+
DriveInfo *blk_legacy_dinfo(BlockBackend *blk);
DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo);
BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo);
@@ -48,17 +55,10 @@ DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit);
void drive_check_orphaned(void);
DriveInfo *drive_get_by_index(BlockInterfaceType type, int index);
int drive_get_max_bus(BlockInterfaceType type);
-int drive_get_max_devs(BlockInterfaceType type);
-DriveInfo *drive_get_next(BlockInterfaceType type);
-QemuOpts *drive_def(const char *optstr);
QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
const char *optstr);
DriveInfo *drive_new(QemuOpts *arg, BlockInterfaceType block_default_type,
Error **errp);
-/* device-hotplug */
-
-void hmp_commit(Monitor *mon, const QDict *qdict);
-void hmp_drive_del(Monitor *mon, const QDict *qdict);
#endif
diff --git a/include/sysemu/bt.h b/include/sysemu/bt.h
deleted file mode 100644
index ddb05cd109..0000000000
--- a/include/sysemu/bt.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef SYSEMU_BT_H
-#define SYSEMU_BT_H
-
-/* BT HCI info */
-
-struct HCIInfo {
- int (*bdaddr_set)(struct HCIInfo *hci, const uint8_t *bd_addr);
- void (*cmd_send)(struct HCIInfo *hci, const uint8_t *data, int len);
- void (*sco_send)(struct HCIInfo *hci, const uint8_t *data, int len);
- void (*acl_send)(struct HCIInfo *hci, const uint8_t *data, int len);
- void *opaque;
- void (*evt_recv)(void *opaque, const uint8_t *data, int len);
- void (*acl_recv)(void *opaque, const uint8_t *data, int len);
-};
-
-/* bt-host.c */
-struct HCIInfo *bt_host_hci(const char *id);
-struct HCIInfo *qemu_next_hci(void);
-
-#endif
diff --git a/include/sysemu/cpu-throttle.h b/include/sysemu/cpu-throttle.h
new file mode 100644
index 0000000000..d65bdef6d0
--- /dev/null
+++ b/include/sysemu/cpu-throttle.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#ifndef SYSEMU_CPU_THROTTLE_H
+#define SYSEMU_CPU_THROTTLE_H
+
+#include "qemu/timer.h"
+
+/**
+ * cpu_throttle_init:
+ *
+ * Initialize the CPU throttling API.
+ */
+void cpu_throttle_init(void);
+
+/**
+ * cpu_throttle_set:
+ * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
+ *
+ * Throttles all vcpus by forcing them to sleep for the given percentage of
+ * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
+ * (example: 10ms sleep for every 30ms awake).
+ *
+ * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
+ * Once the throttling starts, it will remain in effect until cpu_throttle_stop
+ * is called.
+ */
+void cpu_throttle_set(int new_throttle_pct);
+
+/**
+ * cpu_throttle_stop:
+ *
+ * Stops the vcpu throttling started by cpu_throttle_set.
+ */
+void cpu_throttle_stop(void);
+
+/**
+ * cpu_throttle_active:
+ *
+ * Returns: %true if the vcpus are currently being throttled, %false otherwise.
+ */
+bool cpu_throttle_active(void);
+
+/**
+ * cpu_throttle_get_percentage:
+ *
+ * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
+ *
+ * Returns: The throttle percentage in range 1 to 99.
+ */
+int cpu_throttle_get_percentage(void);
+
+#endif /* SYSEMU_CPU_THROTTLE_H */
diff --git a/include/sysemu/cpu-timers-internal.h b/include/sysemu/cpu-timers-internal.h
new file mode 100644
index 0000000000..94bb7394c5
--- /dev/null
+++ b/include/sysemu/cpu-timers-internal.h
@@ -0,0 +1,71 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TIMERS_STATE_H
+#define TIMERS_STATE_H
+
+/* timers state, for sharing between icount and cpu-timers */
+
+typedef struct TimersState {
+ /* Protected by BQL. */
+ int64_t cpu_ticks_prev;
+ int64_t cpu_ticks_offset;
+
+ /*
+ * Protect fields that can be respectively read outside the
+ * BQL, and written from multiple threads.
+ */
+ QemuSeqLock vm_clock_seqlock;
+ QemuSpin vm_clock_lock;
+
+ int16_t cpu_ticks_enabled;
+
+ /* Conversion factor from emulated instructions to virtual clock ticks. */
+ int16_t icount_time_shift;
+ /* Icount delta used for shift auto adjust. */
+ int64_t last_delta;
+
+ /* Compensate for varying guest execution speed. */
+ aligned_int64_t qemu_icount_bias;
+
+ int64_t vm_clock_warp_start;
+ int64_t cpu_clock_offset;
+
+ /* Only written by TCG thread */
+ int64_t qemu_icount;
+
+ /* for adjusting icount */
+ QEMUTimer *icount_rt_timer;
+ QEMUTimer *icount_vm_timer;
+ QEMUTimer *icount_warp_timer;
+} TimersState;
+
+extern TimersState timers_state;
+
+/*
+ * icount needs this internal from cpu-timers when adjusting the icount shift.
+ */
+int64_t cpu_get_clock_locked(void);
+
+#endif /* TIMERS_STATE_H */
diff --git a/include/sysemu/cpu-timers.h b/include/sysemu/cpu-timers.h
new file mode 100644
index 0000000000..d86738a378
--- /dev/null
+++ b/include/sysemu/cpu-timers.h
@@ -0,0 +1,103 @@
+/*
+ * CPU timers state API
+ *
+ * Copyright 2020 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef SYSEMU_CPU_TIMERS_H
+#define SYSEMU_CPU_TIMERS_H
+
+#include "qemu/timer.h"
+
+/* init the whole cpu timers API, including icount, ticks, and cpu_throttle */
+void cpu_timers_init(void);
+
+/* icount - Instruction Counter API */
+
+/**
+ * ICountMode: icount enablement state:
+ *
+ * @ICOUNT_DISABLED: Disabled - Do not count executed instructions.
+ * @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option
+ * @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift
+ */
+typedef enum {
+ ICOUNT_DISABLED = 0,
+ ICOUNT_PRECISE,
+ ICOUNT_ADAPTATIVE,
+} ICountMode;
+
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
+extern ICountMode use_icount;
+#define icount_enabled() (use_icount)
+#else
+#define icount_enabled() ICOUNT_DISABLED
+#endif
+
+/*
+ * Update the icount with the executed instructions. Called by
+ * cpus-tcg vCPU thread so the main-loop can see time has moved forward.
+ */
+void icount_update(CPUState *cpu);
+
+/* get raw icount value */
+int64_t icount_get_raw(void);
+
+/* return the virtual CPU time in ns, based on the instruction counter. */
+int64_t icount_get(void);
+/*
+ * convert an instruction counter value to ns, based on the icount shift.
+ * This shift is set as a fixed value with the icount "shift" option
+ * (precise mode), or it is constantly approximated and corrected at
+ * runtime in adaptive mode.
+ */
+int64_t icount_to_ns(int64_t icount);
+
+/**
+ * icount_configure: configure the icount options, including "shift"
+ * @opts: Options to parse
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Return: true on success, else false setting @errp with error
+ */
+bool icount_configure(QemuOpts *opts, Error **errp);
+
+/* used by tcg vcpu thread to calc icount budget */
+int64_t icount_round(int64_t count);
+
+/* if the CPUs are idle, start accounting real time to virtual clock. */
+void icount_start_warp_timer(void);
+void icount_account_warp_timer(void);
+void icount_notify_exit(void);
+
+/*
+ * CPU Ticks and Clock
+ */
+
+/* Caller must hold BQL */
+void cpu_enable_ticks(void);
+/* Caller must hold BQL */
+void cpu_disable_ticks(void);
+
+/*
+ * return the time elapsed in VM between vm_start and vm_stop.
+ * cpu_get_ticks() uses units of the host CPU cycle counter.
+ */
+int64_t cpu_get_ticks(void);
+
+/*
+ * Returns the monotonic time elapsed in VM, i.e.,
+ * the time between vm_start and vm_stop
+ */
+int64_t cpu_get_clock(void);
+
+void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
+
+/* get the VIRTUAL clock and VM elapsed ticks via the cpus accel interface */
+int64_t cpus_get_virtual_clock(void);
+int64_t cpus_get_elapsed_ticks(void);
+
+#endif /* SYSEMU_CPU_TIMERS_H */
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
index 731756d948..b4a566cfe7 100644
--- a/include/sysemu/cpus.h
+++ b/include/sysemu/cpus.h
@@ -1,45 +1,53 @@
#ifndef QEMU_CPUS_H
#define QEMU_CPUS_H
-#include "qemu/timer.h"
+#include "sysemu/accel-ops.h"
+
+/* register accel-specific operations */
+void cpus_register_accel(const AccelOpsClass *i);
+
+/* return registers ops */
+const AccelOpsClass *cpus_get_accel(void);
+
+/* accel/dummy-cpus.c */
+
+/* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */
+void dummy_start_vcpu_thread(CPUState *);
+
+/* interface available for cpus accelerator threads */
+
+/* For temporary buffers for forming a name */
+#define VCPU_THREAD_NAME_SIZE 16
+
+void cpus_kick_thread(CPUState *cpu);
+bool cpu_work_list_empty(CPUState *cpu);
+bool cpu_thread_is_idle(CPUState *cpu);
+bool all_cpu_threads_idle(void);
+bool cpu_can_run(CPUState *cpu);
+void qemu_wait_io_event_common(CPUState *cpu);
+void qemu_wait_io_event(CPUState *cpu);
+void cpu_thread_signal_created(CPUState *cpu);
+void cpu_thread_signal_destroyed(CPUState *cpu);
+void cpu_handle_guest_debug(CPUState *cpu);
+
+/* end interface for cpus accelerator threads */
-/* cpus.c */
bool qemu_in_vcpu_thread(void);
void qemu_init_cpu_loop(void);
void resume_all_vcpus(void);
void pause_all_vcpus(void);
void cpu_stop_current(void);
-void cpu_ticks_init(void);
-void configure_icount(QemuOpts *opts, Error **errp);
-extern int use_icount;
extern int icount_align_option;
-/* drift information for info jit command */
-extern int64_t max_delay;
-extern int64_t max_advance;
-void dump_drift_info(FILE *f, fprintf_function cpu_fprintf);
-
/* Unblock cpu */
void qemu_cpu_kick_self(void);
-void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
+
+bool cpus_are_resettable(void);
void cpu_synchronize_all_states(void);
void cpu_synchronize_all_post_reset(void);
void cpu_synchronize_all_post_init(void);
void cpu_synchronize_all_pre_loadvm(void);
-void qtest_clock_warp(int64_t dest);
-
-#ifndef CONFIG_USER_ONLY
-/* vl.c */
-/* *-user doesn't have configurable SMP topology */
-extern int smp_cores;
-extern int smp_threads;
-#endif
-
-void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg);
-
-void qemu_tcg_configure(QemuOpts *opts, Error **errp);
-
#endif
diff --git a/include/sysemu/cryptodev-vhost-user.h b/include/sysemu/cryptodev-vhost-user.h
index 6debf53fc5..60710502c2 100644
--- a/include/sysemu/cryptodev-vhost-user.h
+++ b/include/sysemu/cryptodev-vhost-user.h
@@ -9,7 +9,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,9 +20,12 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
+
#ifndef CRYPTODEV_VHOST_USER_H
#define CRYPTODEV_VHOST_USER_H
+#include "sysemu/cryptodev-vhost.h"
+
#define VHOST_USER_MAX_AUTH_KEY_LEN 512
#define VHOST_USER_MAX_CIPHER_KEY_LEN 64
diff --git a/include/sysemu/cryptodev-vhost.h b/include/sysemu/cryptodev-vhost.h
index fb26b86977..4c3c22acae 100644
--- a/include/sysemu/cryptodev-vhost.h
+++ b/include/sysemu/cryptodev-vhost.h
@@ -10,7 +10,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,7 +24,6 @@
#ifndef CRYPTODEV_VHOST_H
#define CRYPTODEV_VHOST_H
-#include "qemu-common.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-backend.h"
#include "chardev/char.h"
@@ -80,7 +79,7 @@ cryptodev_vhost_init(
* cryptodev_vhost_cleanup:
* @crypto: the cryptodev backend common vhost object
*
- * Clean the resouce associated with @crypto that realizaed
+ * Clean the resource associated with @crypto that realizaed
* by cryptodev_vhost_init()
*
*/
diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h
index faeb6f891a..96d3998b93 100644
--- a/include/sysemu/cryptodev.h
+++ b/include/sysemu/cryptodev.h
@@ -9,7 +9,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,8 +23,10 @@
#ifndef CRYPTODEV_H
#define CRYPTODEV_H
+#include "qemu/queue.h"
+#include "qemu/throttle.h"
#include "qom/object.h"
-#include "qemu-common.h"
+#include "qapi/qapi-types-cryptodev.h"
/**
* CryptoDevBackend:
@@ -37,15 +39,8 @@
#define TYPE_CRYPTODEV_BACKEND "cryptodev-backend"
-#define CRYPTODEV_BACKEND(obj) \
- OBJECT_CHECK(CryptoDevBackend, \
- (obj), TYPE_CRYPTODEV_BACKEND)
-#define CRYPTODEV_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(CryptoDevBackendClass, \
- (obj), TYPE_CRYPTODEV_BACKEND)
-#define CRYPTODEV_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(CryptoDevBackendClass, \
- (klass), TYPE_CRYPTODEV_BACKEND)
+OBJECT_DECLARE_TYPE(CryptoDevBackend, CryptoDevBackendClass,
+ CRYPTODEV_BACKEND)
#define MAX_CRYPTO_QUEUE_NUM 64
@@ -54,17 +49,10 @@ typedef struct CryptoDevBackendConf CryptoDevBackendConf;
typedef struct CryptoDevBackendPeers CryptoDevBackendPeers;
typedef struct CryptoDevBackendClient
CryptoDevBackendClient;
-typedef struct CryptoDevBackend CryptoDevBackend;
-
-enum CryptoDevBackendAlgType {
- CRYPTODEV_BACKEND_ALG_SYM,
- CRYPTODEV_BACKEND_ALG__MAX,
-};
/**
* CryptoDevBackendSymSessionInfo:
*
- * @op_code: operation code (refer to virtio_crypto.h)
* @cipher_alg: algorithm type of CIPHER
* @key_len: byte length of cipher key
* @hash_alg: algorithm type of HASH/MAC
@@ -82,7 +70,6 @@ enum CryptoDevBackendAlgType {
*/
typedef struct CryptoDevBackendSymSessionInfo {
/* corresponding with virtio crypto spec */
- uint32_t op_code;
uint32_t cipher_alg;
uint32_t key_len;
uint32_t hash_alg;
@@ -98,10 +85,36 @@ typedef struct CryptoDevBackendSymSessionInfo {
} CryptoDevBackendSymSessionInfo;
/**
+ * CryptoDevBackendAsymSessionInfo:
+ */
+typedef struct CryptoDevBackendRsaPara {
+ uint32_t padding_algo;
+ uint32_t hash_algo;
+} CryptoDevBackendRsaPara;
+
+typedef struct CryptoDevBackendAsymSessionInfo {
+ /* corresponding with virtio crypto spec */
+ uint32_t algo;
+ uint32_t keytype;
+ uint32_t keylen;
+ uint8_t *key;
+ union {
+ CryptoDevBackendRsaPara rsa;
+ } u;
+} CryptoDevBackendAsymSessionInfo;
+
+typedef struct CryptoDevBackendSessionInfo {
+ uint32_t op_code;
+ union {
+ CryptoDevBackendSymSessionInfo sym_sess_info;
+ CryptoDevBackendAsymSessionInfo asym_sess_info;
+ } u;
+ uint64_t session_id;
+} CryptoDevBackendSessionInfo;
+
+/**
* CryptoDevBackendSymOpInfo:
*
- * @session_id: session index which was previously
- * created by cryptodev_backend_sym_create_session()
* @aad_len: byte length of additional authenticated data
* @iv_len: byte length of initialization vector or counter
* @src_len: byte length of source data
@@ -127,7 +140,6 @@ typedef struct CryptoDevBackendSymSessionInfo {
*
*/
typedef struct CryptoDevBackendSymOpInfo {
- uint64_t session_id;
uint32_t aad_len;
uint32_t iv_len;
uint32_t src_len;
@@ -143,37 +155,66 @@ typedef struct CryptoDevBackendSymOpInfo {
uint8_t *dst;
uint8_t *aad_data;
uint8_t *digest_result;
- uint8_t data[0];
+ uint8_t data[];
} CryptoDevBackendSymOpInfo;
-typedef struct CryptoDevBackendClass {
+
+/**
+ * CryptoDevBackendAsymOpInfo:
+ *
+ * @src_len: byte length of source data
+ * @dst_len: byte length of destination data
+ * @src: point to the source data
+ * @dst: point to the destination data
+ *
+ */
+typedef struct CryptoDevBackendAsymOpInfo {
+ uint32_t src_len;
+ uint32_t dst_len;
+ uint8_t *src;
+ uint8_t *dst;
+} CryptoDevBackendAsymOpInfo;
+
+typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret);
+
+typedef struct CryptoDevBackendOpInfo {
+ QCryptodevBackendAlgType algtype;
+ uint32_t op_code;
+ uint32_t queue_index;
+ CryptoDevCompletionFunc cb;
+ void *opaque; /* argument for cb */
+ uint64_t session_id;
+ union {
+ CryptoDevBackendSymOpInfo *sym_op_info;
+ CryptoDevBackendAsymOpInfo *asym_op_info;
+ } u;
+ QTAILQ_ENTRY(CryptoDevBackendOpInfo) next;
+} CryptoDevBackendOpInfo;
+
+struct CryptoDevBackendClass {
ObjectClass parent_class;
void (*init)(CryptoDevBackend *backend, Error **errp);
void (*cleanup)(CryptoDevBackend *backend, Error **errp);
- int64_t (*create_session)(CryptoDevBackend *backend,
- CryptoDevBackendSymSessionInfo *sess_info,
- uint32_t queue_index, Error **errp);
+ int (*create_session)(CryptoDevBackend *backend,
+ CryptoDevBackendSessionInfo *sess_info,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
+
int (*close_session)(CryptoDevBackend *backend,
- uint64_t session_id,
- uint32_t queue_index, Error **errp);
- int (*do_sym_op)(CryptoDevBackend *backend,
- CryptoDevBackendSymOpInfo *op_info,
- uint32_t queue_index, Error **errp);
-} CryptoDevBackendClass;
-
-typedef enum CryptoDevBackendOptionsType {
- CRYPTODEV_BACKEND_TYPE_NONE = 0,
- CRYPTODEV_BACKEND_TYPE_BUILTIN = 1,
- CRYPTODEV_BACKEND_TYPE_VHOST_USER = 2,
- CRYPTODEV_BACKEND_TYPE__MAX,
-} CryptoDevBackendOptionsType;
+ uint64_t session_id,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
+
+ int (*do_op)(CryptoDevBackend *backend,
+ CryptoDevBackendOpInfo *op_info);
+};
struct CryptoDevBackendClient {
- CryptoDevBackendOptionsType type;
- char *model;
- char *name;
+ QCryptodevBackendType type;
char *info_str;
unsigned int queue_index;
int vring_enable;
@@ -198,6 +239,7 @@ struct CryptoDevBackendConf {
uint32_t mac_algo_l;
uint32_t mac_algo_h;
uint32_t aead_algo;
+ uint32_t akcipher_algo;
/* Maximum length of cipher key */
uint32_t max_cipher_key_len;
/* Maximum length of authenticated key */
@@ -206,6 +248,24 @@ struct CryptoDevBackendConf {
uint64_t max_size;
};
+typedef struct CryptodevBackendSymStat {
+ int64_t encrypt_ops;
+ int64_t decrypt_ops;
+ int64_t encrypt_bytes;
+ int64_t decrypt_bytes;
+} CryptodevBackendSymStat;
+
+typedef struct CryptodevBackendAsymStat {
+ int64_t encrypt_ops;
+ int64_t decrypt_ops;
+ int64_t sign_ops;
+ int64_t verify_ops;
+ int64_t encrypt_bytes;
+ int64_t decrypt_bytes;
+ int64_t sign_bytes;
+ int64_t verify_bytes;
+} CryptodevBackendAsymStat;
+
struct CryptoDevBackend {
Object parent_obj;
@@ -213,15 +273,48 @@ struct CryptoDevBackend {
/* Tag the cryptodev backend is used by virtio-crypto or not */
bool is_used;
CryptoDevBackendConf conf;
+ CryptodevBackendSymStat *sym_stat;
+ CryptodevBackendAsymStat *asym_stat;
+
+ ThrottleState ts;
+ ThrottleTimers tt;
+ ThrottleConfig tc;
+ QTAILQ_HEAD(, CryptoDevBackendOpInfo) opinfos;
};
+#define CryptodevSymStatInc(be, op, bytes) do { \
+ be->sym_stat->op##_bytes += (bytes); \
+ be->sym_stat->op##_ops += 1; \
+} while (/*CONSTCOND*/0)
+
+#define CryptodevSymStatIncEncrypt(be, bytes) \
+ CryptodevSymStatInc(be, encrypt, bytes)
+
+#define CryptodevSymStatIncDecrypt(be, bytes) \
+ CryptodevSymStatInc(be, decrypt, bytes)
+
+#define CryptodevAsymStatInc(be, op, bytes) do { \
+ be->asym_stat->op##_bytes += (bytes); \
+ be->asym_stat->op##_ops += 1; \
+} while (/*CONSTCOND*/0)
+
+#define CryptodevAsymStatIncEncrypt(be, bytes) \
+ CryptodevAsymStatInc(be, encrypt, bytes)
+
+#define CryptodevAsymStatIncDecrypt(be, bytes) \
+ CryptodevAsymStatInc(be, decrypt, bytes)
+
+#define CryptodevAsymStatIncSign(be, bytes) \
+ CryptodevAsymStatInc(be, sign, bytes)
+
+#define CryptodevAsymStatIncVerify(be, bytes) \
+ CryptodevAsymStatInc(be, verify, bytes)
+
+
/**
* cryptodev_backend_new_client:
- * @model: the cryptodev backend model
- * @name: the cryptodev backend name, can be NULL
*
- * Creates a new cryptodev backend client object
- * with the @name in the model @model.
+ * Creates a new cryptodev backend client object.
*
* The returned object must be released with
* cryptodev_backend_free_client() when no
@@ -229,9 +322,8 @@ struct CryptoDevBackend {
*
* Returns: a new cryptodev backend client object
*/
-CryptoDevBackendClient *
-cryptodev_backend_new_client(const char *model,
- const char *name);
+CryptoDevBackendClient *cryptodev_backend_new_client(void);
+
/**
* cryptodev_backend_free_client:
* @cc: the cryptodev backend client object
@@ -247,7 +339,7 @@ void cryptodev_backend_free_client(
* @backend: the cryptodev backend object
* @errp: pointer to a NULL-initialized error object
*
- * Clean the resouce associated with @backend that realizaed
+ * Clean the resource associated with @backend that realizaed
* by the specific backend's init() callback
*/
void cryptodev_backend_cleanup(
@@ -255,60 +347,67 @@ void cryptodev_backend_cleanup(
Error **errp);
/**
- * cryptodev_backend_sym_create_session:
+ * cryptodev_backend_create_session:
* @backend: the cryptodev backend object
* @sess_info: parameters needed by session creating
* @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object
+ * @cb: callback when session create is compeleted
+ * @opaque: parameter passed to callback
*
- * Create a session for symmetric algorithms
+ * Create a session for symmetric/asymmetric algorithms
*
- * Returns: session id on success, or -1 on error
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
*/
-int64_t cryptodev_backend_sym_create_session(
+int cryptodev_backend_create_session(
CryptoDevBackend *backend,
- CryptoDevBackendSymSessionInfo *sess_info,
- uint32_t queue_index, Error **errp);
+ CryptoDevBackendSessionInfo *sess_info,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
/**
- * cryptodev_backend_sym_close_session:
+ * cryptodev_backend_close_session:
* @backend: the cryptodev backend object
* @session_id: the session id
* @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object
+ * @cb: callback when session create is compeleted
+ * @opaque: parameter passed to callback
*
- * Close a session for symmetric algorithms which was previously
- * created by cryptodev_backend_sym_create_session()
+ * Close a session for which was previously
+ * created by cryptodev_backend_create_session()
*
- * Returns: 0 on success, or Negative on error
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
*/
-int cryptodev_backend_sym_close_session(
+int cryptodev_backend_close_session(
CryptoDevBackend *backend,
uint64_t session_id,
- uint32_t queue_index, Error **errp);
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
/**
* cryptodev_backend_crypto_operation:
* @backend: the cryptodev backend object
- * @opaque: pointer to a VirtIOCryptoReq object
- * @queue_index: queue index of cryptodev backend client
- * @errp: pointer to a NULL-initialized error object
+ * @op_info: pointer to a CryptoDevBackendOpInfo object
*
- * Do crypto operation, such as encryption and
- * decryption
+ * Do crypto operation, such as encryption, decryption, signature and
+ * verification
*
- * Returns: VIRTIO_CRYPTO_OK on success,
- * or -VIRTIO_CRYPTO_* on error
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
*/
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
- void *opaque,
- uint32_t queue_index, Error **errp);
+ CryptoDevBackendOpInfo *op_info);
/**
* cryptodev_backend_set_used:
* @backend: the cryptodev backend object
- * @used: ture or false
+ * @used: true or false
*
* Set the cryptodev backend is used by virtio-crypto or not
*/
@@ -328,7 +427,7 @@ bool cryptodev_backend_is_used(CryptoDevBackend *backend);
/**
* cryptodev_backend_set_ready:
* @backend: the cryptodev backend object
- * @ready: ture or false
+ * @ready: true or false
*
* Set the cryptodev backend is ready or not, which is called
* by the children of the cryptodev banckend interface.
diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h
index c16fd69bc0..8eab395934 100644
--- a/include/sysemu/device_tree.h
+++ b/include/sysemu/device_tree.h
@@ -39,8 +39,11 @@ void *load_device_tree_from_sysfs(void);
* NULL. If there is no error but no matching node was found, the
* returned array contains a single element equal to NULL. If an error
* was encountered when parsing the blob, the function returns NULL
+ *
+ * @name may be NULL to wildcard names and only match compatibility
+ * strings.
*/
-char **qemu_fdt_node_path(void *fdt, const char *name, char *compat,
+char **qemu_fdt_node_path(void *fdt, const char *name, const char *compat,
Error **errp);
/**
@@ -67,6 +70,23 @@ int qemu_fdt_setprop_u64(void *fdt, const char *node_path,
const char *property, uint64_t val);
int qemu_fdt_setprop_string(void *fdt, const char *node_path,
const char *property, const char *string);
+
+/**
+ * qemu_fdt_setprop_string_array: set a string array property
+ *
+ * @fdt: pointer to the dt blob
+ * @name: node name
+ * @prop: property array
+ * @array: pointer to an array of string pointers
+ * @len: length of array
+ *
+ * assigns a string array to a property. This function converts and
+ * array of strings to a sequential string with \0 separators before
+ * setting the property.
+ */
+int qemu_fdt_setprop_string_array(void *fdt, const char *node_path,
+ const char *prop, char **array, int len);
+
int qemu_fdt_setprop_phandle(void *fdt, const char *node_path,
const char *property,
const char *target_node_path);
@@ -101,20 +121,20 @@ uint32_t qemu_fdt_get_phandle(void *fdt, const char *path);
uint32_t qemu_fdt_alloc_phandle(void *fdt);
int qemu_fdt_nop_node(void *fdt, const char *node_path);
int qemu_fdt_add_subnode(void *fdt, const char *name);
+int qemu_fdt_add_path(void *fdt, const char *path);
#define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \
do { \
uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(qdt_tmp); i++) { \
- qdt_tmp[i] = cpu_to_be32(qdt_tmp[i]); \
+ for (unsigned i_ = 0; i_ < ARRAY_SIZE(qdt_tmp); i_++) { \
+ qdt_tmp[i_] = cpu_to_be32(qdt_tmp[i_]); \
} \
qemu_fdt_setprop(fdt, node_path, property, qdt_tmp, \
sizeof(qdt_tmp)); \
} while (0)
void qemu_fdt_dumpdtb(void *fdt, int size);
+void hmp_dumpdtb(Monitor *mon, const QDict *qdict);
/**
* qemu_fdt_setprop_sized_cells_from_array:
@@ -175,6 +195,15 @@ int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
qdt_tmp); \
})
+
+/**
+ * qemu_fdt_randomize_seeds:
+ * @fdt: device tree blob
+ *
+ * Re-randomize all "rng-seed" properties with new seeds.
+ */
+void qemu_fdt_randomize_seeds(void *fdt);
+
#define FDT_PCI_RANGE_RELOCATABLE 0x80000000
#define FDT_PCI_RANGE_PREFETCHABLE 0x40000000
#define FDT_PCI_RANGE_ALIASED 0x20000000
diff --git a/include/sysemu/dirtylimit.h b/include/sysemu/dirtylimit.h
new file mode 100644
index 0000000000..d11ebbbbdb
--- /dev/null
+++ b/include/sysemu/dirtylimit.h
@@ -0,0 +1,39 @@
+/*
+ * Dirty page rate limit common functions
+ *
+ * Copyright (c) 2022 CHINA TELECOM CO.,LTD.
+ *
+ * Authors:
+ * Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_DIRTYRLIMIT_H
+#define QEMU_DIRTYRLIMIT_H
+
+#define DIRTYLIMIT_CALC_TIME_MS 1000 /* 1000ms */
+
+int64_t vcpu_dirty_rate_get(int cpu_index);
+void vcpu_dirty_rate_stat_start(void);
+void vcpu_dirty_rate_stat_stop(void);
+void vcpu_dirty_rate_stat_initialize(void);
+void vcpu_dirty_rate_stat_finalize(void);
+
+void dirtylimit_state_lock(void);
+void dirtylimit_state_unlock(void);
+void dirtylimit_state_initialize(void);
+void dirtylimit_state_finalize(void);
+bool dirtylimit_in_service(void);
+bool dirtylimit_vcpu_index_valid(int cpu_index);
+void dirtylimit_process(void);
+void dirtylimit_change(bool start);
+void dirtylimit_set_vcpu(int cpu_index,
+ uint64_t quota,
+ bool enable);
+void dirtylimit_set_all(uint64_t quota,
+ bool enable);
+void dirtylimit_vcpu_execute(CPUState *cpu);
+uint64_t dirtylimit_throttle_time_per_round(void);
+uint64_t dirtylimit_ring_full_time(void);
+#endif
diff --git a/include/sysemu/dirtyrate.h b/include/sysemu/dirtyrate.h
new file mode 100644
index 0000000000..20813f303f
--- /dev/null
+++ b/include/sysemu/dirtyrate.h
@@ -0,0 +1,30 @@
+/*
+ * dirty page rate helper functions
+ *
+ * Copyright (c) 2022 CHINA TELECOM CO.,LTD.
+ *
+ * Authors:
+ * Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_DIRTYRATE_H
+#define QEMU_DIRTYRATE_H
+
+#include "qapi/qapi-types-migration.h"
+
+typedef struct VcpuStat {
+ int nvcpu; /* number of vcpu */
+ DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */
+} VcpuStat;
+
+int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
+ VcpuStat *stat,
+ unsigned int flag,
+ bool one_shot);
+
+void global_dirty_log_change(unsigned int flag,
+ bool start);
+#endif
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
index 5da3c4e3c5..a1ac5bc1b5 100644
--- a/include/sysemu/dma.h
+++ b/include/sysemu/dma.h
@@ -1,7 +1,7 @@
/*
* DMA helper functions
*
- * Copyright (c) 2009 Red Hat
+ * Copyright (c) 2009, 2020 Red Hat
*
* This work is licensed under the terms of the GNU General Public License
* (GNU GPL), version 2 or later.
@@ -12,28 +12,14 @@
#include "exec/memory.h"
#include "exec/address-spaces.h"
-#include "hw/hw.h"
#include "block/block.h"
#include "block/accounting.h"
-typedef struct ScatterGatherEntry ScatterGatherEntry;
-
typedef enum {
DMA_DIRECTION_TO_DEVICE = 0,
DMA_DIRECTION_FROM_DEVICE = 1,
} DMADirection;
-struct QEMUSGList {
- ScatterGatherEntry *sg;
- int nsg;
- int nalloc;
- size_t size;
- DeviceState *dev;
- AddressSpace *as;
-};
-
-#ifndef CONFIG_USER_ONLY
-
/*
* When an IOMMU is present, bus addresses become distinct from
* CPU/memory physical addresses and may be a different size. Because
@@ -46,6 +32,17 @@ typedef uint64_t dma_addr_t;
#define DMA_ADDR_BITS 64
#define DMA_ADDR_FMT "%" PRIx64
+typedef struct ScatterGatherEntry ScatterGatherEntry;
+
+struct QEMUSGList {
+ ScatterGatherEntry *sg;
+ int nsg;
+ int nalloc;
+ dma_addr_t size;
+ DeviceState *dev;
+ AddressSpace *as;
+};
+
static inline void dma_barrier(AddressSpace *as, DMADirection dir)
{
/*
@@ -74,71 +71,164 @@ static inline void dma_barrier(AddressSpace *as, DMADirection dir)
* dma_memory_{read,write}() and check for errors */
static inline bool dma_memory_valid(AddressSpace *as,
dma_addr_t addr, dma_addr_t len,
- DMADirection dir)
+ DMADirection dir, MemTxAttrs attrs)
{
return address_space_access_valid(as, addr, len,
dir == DMA_DIRECTION_FROM_DEVICE,
- MEMTXATTRS_UNSPECIFIED);
+ attrs);
}
-static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len,
- DMADirection dir)
+static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir,
+ MemTxAttrs attrs)
{
- return (bool)address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
- buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
+ return address_space_rw(as, addr, attrs,
+ buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
}
-static inline int dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len)
+static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ void *buf, dma_addr_t len)
{
- return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+ return dma_memory_rw_relaxed(as, addr, buf, len,
+ DMA_DIRECTION_TO_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
}
-static inline int dma_memory_write_relaxed(AddressSpace *as, dma_addr_t addr,
- const void *buf, dma_addr_t len)
+static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ const void *buf,
+ dma_addr_t len)
{
return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
- DMA_DIRECTION_FROM_DEVICE);
+ DMA_DIRECTION_FROM_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
}
-static inline int dma_memory_rw(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len,
- DMADirection dir)
+/**
+ * dma_memory_rw: Read from or write to an address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
+ * @dir: indicates the transfer direction
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir, MemTxAttrs attrs)
{
dma_barrier(as, dir);
- return dma_memory_rw_relaxed(as, addr, buf, len, dir);
+ return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
}
-static inline int dma_memory_read(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len)
+/**
+ * dma_memory_read: Read from an address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault). Called within RCU critical section.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ MemTxAttrs attrs)
{
- return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+ return dma_memory_rw(as, addr, buf, len,
+ DMA_DIRECTION_TO_DEVICE, attrs);
}
-static inline int dma_memory_write(AddressSpace *as, dma_addr_t addr,
- const void *buf, dma_addr_t len)
+/**
+ * address_space_write: Write to address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
+ const void *buf, dma_addr_t len,
+ MemTxAttrs attrs)
{
return dma_memory_rw(as, addr, (void *)buf, len,
- DMA_DIRECTION_FROM_DEVICE);
+ DMA_DIRECTION_FROM_DEVICE, attrs);
}
-int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len);
+/**
+ * dma_memory_set: Fill memory with a constant byte from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
+ * @attrs: memory transaction attributes
+ */
+MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
+ uint8_t c, dma_addr_t len, MemTxAttrs attrs);
+/**
+ * address_space_map: Map a physical memory region into a host virtual address.
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL and set *@plen to zero(0), if resources needed to perform
+ * the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: pointer to length of buffer; updated on return
+ * @dir: indicates the transfer direction
+ * @attrs: memory attributes
+ */
static inline void *dma_memory_map(AddressSpace *as,
dma_addr_t addr, dma_addr_t *len,
- DMADirection dir)
+ DMADirection dir, MemTxAttrs attrs)
{
hwaddr xlen = *len;
void *p;
p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
- MEMTXATTRS_UNSPECIFIED);
+ attrs);
*len = xlen;
return p;
}
+/**
+ * address_space_unmap: Unmaps a memory region previously mapped
+ * by dma_memory_map()
+ *
+ * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
+ * @access_len gives the amount of memory that was actually read or written
+ * by the caller.
+ *
+ * @as: #AddressSpace used
+ * @buffer: host pointer as returned by address_space_map()
+ * @len: buffer length as returned by address_space_map()
+ * @dir: indicates the transfer direction
+ * @access_len: amount of data actually transferred
+ */
static inline void dma_memory_unmap(AddressSpace *as,
void *buffer, dma_addr_t len,
DMADirection dir, dma_addr_t access_len)
@@ -148,32 +238,34 @@ static inline void dma_memory_unmap(AddressSpace *as,
}
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
- static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
- dma_addr_t addr) \
- { \
- uint##_bits##_t val; \
- dma_memory_read(as, addr, &val, (_bits) / 8); \
- return _end##_bits##_to_cpu(val); \
- } \
- static inline void st##_sname##_##_end##_dma(AddressSpace *as, \
- dma_addr_t addr, \
- uint##_bits##_t val) \
- { \
- val = cpu_to_##_end##_bits(val); \
- dma_memory_write(as, addr, &val, (_bits) / 8); \
+ static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
+ dma_addr_t addr, \
+ uint##_bits##_t *pval, \
+ MemTxAttrs attrs) \
+ { \
+ MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
+ _end##_bits##_to_cpus(pval); \
+ return res; \
+ } \
+ static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
+ dma_addr_t addr, \
+ uint##_bits##_t val, \
+ MemTxAttrs attrs) \
+ { \
+ val = cpu_to_##_end##_bits(val); \
+ return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
}
-static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
+static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
+ uint8_t *val, MemTxAttrs attrs)
{
- uint8_t val;
-
- dma_memory_read(as, addr, &val, 1);
- return val;
+ return dma_memory_read(as, addr, val, 1, attrs);
}
-static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val)
+static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
+ uint8_t val, MemTxAttrs attrs)
{
- dma_memory_write(as, addr, &val, 1);
+ return dma_memory_write(as, addr, &val, 1, attrs);
}
DEFINE_LDST_DMA(uw, w, 16, le);
@@ -194,7 +286,6 @@ void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
AddressSpace *as);
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
void qemu_sglist_destroy(QEMUSGList *qsg);
-#endif
typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
BlockCompletionFunc *cb, void *cb_opaque,
@@ -210,10 +301,24 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
BlockAIOCB *dma_blk_write(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset, uint32_t align,
BlockCompletionFunc *cb, void *opaque);
-uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
-uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
+MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual,
+ QEMUSGList *sg, MemTxAttrs attrs);
+MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual,
+ QEMUSGList *sg, MemTxAttrs attrs);
void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
QEMUSGList *sg, enum BlockAcctType type);
+/**
+ * dma_aligned_pow2_mask: Return the address bit mask of the largest
+ * power of 2 size less or equal than @end - @start + 1, aligned with @start,
+ * and bounded by 1 << @max_addr_bits bits.
+ *
+ * @start: range start address
+ * @end: range end address (greater than @start)
+ * @max_addr_bits: max address bits (<= 64)
+ */
+uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
+ int max_addr_bits);
+
#endif
diff --git a/include/sysemu/dump-arch.h b/include/sysemu/dump-arch.h
index e25b02e990..743916e46c 100644
--- a/include/sysemu/dump-arch.h
+++ b/include/sysemu/dump-arch.h
@@ -21,6 +21,10 @@ typedef struct ArchDumpInfo {
uint32_t page_size; /* The target's page size. If it's variable and
* unknown, then this should be the maximum. */
uint64_t phys_base; /* The target's physmem base. */
+ void (*arch_sections_add_fn)(DumpState *s);
+ uint64_t (*arch_sections_write_hdr_fn)(DumpState *s, uint8_t *buff);
+ int (*arch_sections_write_fn)(DumpState *s, uint8_t *buff);
+ void (*arch_cleanup_fn)(DumpState *s);
} ArchDumpInfo;
struct GuestPhysBlockList; /* memory_mapping.h */
diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h
index d824bc0941..d702854853 100644
--- a/include/sysemu/dump.h
+++ b/include/sysemu/dump.h
@@ -14,7 +14,8 @@
#ifndef DUMP_H
#define DUMP_H
-#include "qapi/qapi-types-misc.h"
+#include "qapi/qapi-types-dump.h"
+#include "qemu/thread.h"
#define MAKEDUMPFILE_SIGNATURE "makedumpfile"
#define MAX_SIZE_MDF_HEADER (4096) /* max size of makedumpfile_header */
@@ -136,7 +137,7 @@ typedef struct QEMU_PACKED KdumpSubHeader64 {
} KdumpSubHeader64;
typedef struct DataCache {
- int fd; /* fd of the file where to write the cached data */
+ DumpState *state; /* dump state related to this data */
uint8_t *buf; /* buffer for cached data */
size_t buf_size; /* size of the buf */
size_t data_size; /* size of cached data in buf */
@@ -154,20 +155,36 @@ typedef struct DumpState {
GuestPhysBlockList guest_phys_blocks;
ArchDumpInfo dump_info;
MemoryMappingList list;
- uint16_t phdr_num;
- uint32_t sh_info;
- bool have_section;
bool resume;
bool detached;
- ssize_t note_size;
+ bool kdump_raw;
hwaddr memory_offset;
int fd;
- GuestPhysBlock *next_block;
- ram_addr_t start;
- bool has_filter;
- int64_t begin;
- int64_t length;
+ /*
+ * Dump filter area variables
+ *
+ * A filtered dump only contains the guest memory designated by
+ * the start address and length variables defined below.
+ *
+ * If length is 0, no filtering is applied.
+ */
+ int64_t filter_area_begin; /* Start address of partial guest memory area */
+ int64_t filter_area_length; /* Length of partial guest memory area */
+
+ /* Elf dump related data */
+ uint32_t phdr_num;
+ uint32_t shdr_num;
+ ssize_t note_size;
+ hwaddr shdr_offset;
+ hwaddr phdr_offset;
+ hwaddr section_offset;
+ hwaddr note_offset;
+
+ void *elf_section_hdrs; /* Pointer to section header buffer */
+ void *elf_section_data; /* Pointer to section data buffer */
+ uint64_t elf_section_data_size; /* Size of section data */
+ GArray *string_table_buf; /* String table data buffer */
uint8_t *note_buf; /* buffer for notes */
size_t note_buf_offset; /* the writing place in note_buf */
@@ -200,4 +217,9 @@ typedef struct DumpState {
uint16_t cpu_to_dump16(DumpState *s, uint16_t val);
uint32_t cpu_to_dump32(DumpState *s, uint32_t val);
uint64_t cpu_to_dump64(DumpState *s, uint64_t val);
+
+int64_t dump_filtered_memblock_size(GuestPhysBlock *block, int64_t filter_area_start,
+ int64_t filter_area_length);
+int64_t dump_filtered_memblock_start(GuestPhysBlock *block, int64_t filter_area_start,
+ int64_t filter_area_length);
#endif
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
new file mode 100644
index 0000000000..a6c24f1351
--- /dev/null
+++ b/include/sysemu/event-loop-base.h
@@ -0,0 +1,40 @@
+/*
+ * QEMU event-loop backend
+ *
+ * Copyright (C) 2022 Red Hat Inc
+ *
+ * Authors:
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_EVENT_LOOP_BASE_H
+#define QEMU_EVENT_LOOP_BASE_H
+
+#include "qom/object.h"
+#include "block/aio.h"
+
+#define TYPE_EVENT_LOOP_BASE "event-loop-base"
+OBJECT_DECLARE_TYPE(EventLoopBase, EventLoopBaseClass,
+ EVENT_LOOP_BASE)
+
+struct EventLoopBaseClass {
+ ObjectClass parent_class;
+
+ void (*init)(EventLoopBase *base, Error **errp);
+ void (*update_params)(EventLoopBase *base, Error **errp);
+ bool (*can_be_deleted)(EventLoopBase *base);
+};
+
+struct EventLoopBase {
+ Object parent;
+
+ /* AioContext AIO engine parameters */
+ int64_t aio_max_batch;
+
+ /* AioContext thread pool parameters */
+ int64_t thread_pool_min;
+ int64_t thread_pool_max;
+};
+#endif
diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h
deleted file mode 100644
index 1f6c46186d..0000000000
--- a/include/sysemu/hax.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * QEMU HAXM support
- *
- * Copyright IBM, Corp. 2008
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * Copyright (c) 2011 Intel Corporation
- * Written by:
- * Jiang Yunhong<yunhong.jiang@intel.com>
- * Xin Xiaohui<xiaohui.xin@intel.com>
- * Zhang Xiantao<xiantao.zhang@intel.com>
- *
- * Copyright 2016 Google, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMU_HAX_H
-#define QEMU_HAX_H
-
-#include "qemu-common.h"
-
-int hax_sync_vcpus(void);
-int hax_init_vcpu(CPUState *cpu);
-int hax_smp_cpu_exec(CPUState *cpu);
-int hax_populate_ram(uint64_t va, uint64_t size);
-
-void hax_cpu_synchronize_state(CPUState *cpu);
-void hax_cpu_synchronize_post_reset(CPUState *cpu);
-void hax_cpu_synchronize_post_init(CPUState *cpu);
-void hax_cpu_synchronize_pre_loadvm(CPUState *cpu);
-
-#ifdef CONFIG_HAX
-
-int hax_enabled(void);
-
-#include "hw/hw.h"
-#include "qemu/bitops.h"
-#include "exec/memory.h"
-int hax_vcpu_destroy(CPUState *cpu);
-void hax_raise_event(CPUState *cpu);
-void hax_reset_vcpu_state(void *opaque);
-#include "target/i386/hax-interface.h"
-#include "target/i386/hax-i386.h"
-
-#else /* CONFIG_HAX */
-
-#define hax_enabled() (0)
-
-#endif /* CONFIG_HAX */
-
-#endif /* QEMU_HAX_H */
diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h
index a023b372a4..04b884bf42 100644
--- a/include/sysemu/hostmem.h
+++ b/include/sysemu/hostmem.h
@@ -13,22 +13,32 @@
#ifndef SYSEMU_HOSTMEM_H
#define SYSEMU_HOSTMEM_H
-#include "sysemu/sysemu.h" /* for MAX_NODES */
-#include "qapi/qapi-types-misc.h"
+#include "sysemu/numa.h"
+#include "qapi/qapi-types-machine.h"
#include "qom/object.h"
#include "exec/memory.h"
#include "qemu/bitmap.h"
+#include "qemu/thread-context.h"
#define TYPE_MEMORY_BACKEND "memory-backend"
-#define MEMORY_BACKEND(obj) \
- OBJECT_CHECK(HostMemoryBackend, (obj), TYPE_MEMORY_BACKEND)
-#define MEMORY_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(HostMemoryBackendClass, (obj), TYPE_MEMORY_BACKEND)
-#define MEMORY_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(HostMemoryBackendClass, (klass), TYPE_MEMORY_BACKEND)
+OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass,
+ MEMORY_BACKEND)
+
+/* hostmem-ram.c */
+/**
+ * @TYPE_MEMORY_BACKEND_RAM:
+ * name of backend that uses mmap on the anonymous RAM
+ */
+
+#define TYPE_MEMORY_BACKEND_RAM "memory-backend-ram"
+
+/* hostmem-file.c */
+/**
+ * @TYPE_MEMORY_BACKEND_FILE:
+ * name of backend that uses mmap on a file descriptor
+ */
+#define TYPE_MEMORY_BACKEND_FILE "memory-backend-file"
-typedef struct HostMemoryBackend HostMemoryBackend;
-typedef struct HostMemoryBackendClass HostMemoryBackendClass;
/**
* HostMemoryBackendClass:
@@ -37,7 +47,15 @@ typedef struct HostMemoryBackendClass HostMemoryBackendClass;
struct HostMemoryBackendClass {
ObjectClass parent_class;
- void (*alloc)(HostMemoryBackend *backend, Error **errp);
+ /**
+ * alloc: Allocate memory from backend.
+ *
+ * @backend: the #HostMemoryBackend.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+ bool (*alloc)(HostMemoryBackend *backend, Error **errp);
};
/**
@@ -46,6 +64,7 @@ struct HostMemoryBackendClass {
* @parent: opaque parent object container
* @size: amount of memory backend provides
* @mr: MemoryRegion representing host memory belonging to backend
+ * @prealloc_threads: number of threads to be used for preallocatining RAM
*/
struct HostMemoryBackend {
/* private */
@@ -54,7 +73,10 @@ struct HostMemoryBackend {
/* protected */
uint64_t size;
bool merge, dump, use_canonical_path;
- bool prealloc, force_prealloc, is_mapped, share;
+ bool prealloc, is_mapped, share, reserve;
+ bool guest_memfd;
+ uint32_t prealloc_threads;
+ ThreadContext *prealloc_context;
DECLARE_BITMAP(host_nodes, MAX_NODES + 1);
HostMemPolicy policy;
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
index aaa51d2c51..4a7c6af3a5 100644
--- a/include/sysemu/hvf.h
+++ b/include/sysemu/hvf.h
@@ -9,97 +9,63 @@
*/
/* header to be included in non-HVF-specific code */
-#ifndef _HVF_H
-#define _HVF_H
-#include "qemu-common.h"
-#include "qemu/bitops.h"
-#include "exec/memory.h"
-#include "sysemu/accel.h"
+#ifndef HVF_H
+#define HVF_H
+
+#include "qemu/accel.h"
+#include "qom/object.h"
+
+#ifdef NEED_CPU_H
+#include "cpu.h"
-extern bool hvf_allowed;
#ifdef CONFIG_HVF
-#include <Hypervisor/hv.h>
-#include <Hypervisor/hv_vmx.h>
-#include <Hypervisor/hv_error.h>
-#include "target/i386/cpu.h"
-#include "hw/hw.h"
-uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
- int reg);
+extern bool hvf_allowed;
#define hvf_enabled() (hvf_allowed)
-#else
+#else /* !CONFIG_HVF */
#define hvf_enabled() 0
-#define hvf_get_supported_cpuid(func, idx, reg) 0
-#endif
-
-/* hvf_slot flags */
-#define HVF_SLOT_LOG (1 << 0)
-
-typedef struct hvf_slot {
- uint64_t start;
- uint64_t size;
- uint8_t *mem;
- int slot_id;
- uint32_t flags;
- MemoryRegion *region;
-} hvf_slot;
-
-typedef struct hvf_vcpu_caps {
- uint64_t vmx_cap_pinbased;
- uint64_t vmx_cap_procbased;
- uint64_t vmx_cap_procbased2;
- uint64_t vmx_cap_entry;
- uint64_t vmx_cap_exit;
- uint64_t vmx_cap_preemption_timer;
-} hvf_vcpu_caps;
-
-typedef struct HVFState {
- AccelState parent;
- hvf_slot slots[32];
- int num_slots;
+#endif /* !CONFIG_HVF */
- hvf_vcpu_caps *hvf_caps;
-} HVFState;
-extern HVFState *hvf_state;
+#endif /* NEED_CPU_H */
-void hvf_set_phys_mem(MemoryRegionSection *, bool);
-void hvf_handle_io(CPUArchState *, uint16_t, void *,
- int, int, int);
-hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
-
-/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by
- * the host CPU. Use hvf_enabled() after this to get the result. */
-void hvf_disable(int disable);
+#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
-/* Returns non-0 if the host CPU supports the VMX "unrestricted guest" feature
- * which allows the virtual CPU to directly run in "real mode". If true, this
- * allows QEMU to run several vCPU threads in parallel (see cpus.c). Otherwise,
- * only a a single TCG thread can run, and it will call HVF to run the current
- * instructions, except in case of "real mode" (paging disabled, typically at
- * boot time), or MMIO operations. */
+typedef struct HVFState HVFState;
+DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE,
+ TYPE_HVF_ACCEL)
-int hvf_sync_vcpus(void);
+#ifdef NEED_CPU_H
+struct hvf_sw_breakpoint {
+ vaddr pc;
+ vaddr saved_insn;
+ int use_count;
+ QTAILQ_ENTRY(hvf_sw_breakpoint) entry;
+};
-int hvf_init_vcpu(CPUState *);
-int hvf_vcpu_exec(CPUState *);
-int hvf_smp_cpu_exec(CPUState *);
-void hvf_cpu_synchronize_state(CPUState *);
-void hvf_cpu_synchronize_post_reset(CPUState *);
-void hvf_cpu_synchronize_post_init(CPUState *);
-void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);
+struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu,
+ vaddr pc);
+int hvf_sw_breakpoints_active(CPUState *cpu);
-void hvf_vcpu_destroy(CPUState *);
-void hvf_raise_event(CPUState *);
-/* void hvf_reset_vcpu_state(void *opaque); */
-void hvf_reset_vcpu(CPUState *);
-void vmx_update_tpr(CPUState *);
-void update_apic_tpr(CPUState *);
-int hvf_put_registers(CPUState *);
-void vmx_clear_int_window_exiting(CPUState *cpu);
+int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
+int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
+int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
+int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
+void hvf_arch_remove_all_hw_breakpoints(void);
-#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
+/*
+ * hvf_update_guest_debug:
+ * @cs: CPUState for the CPU to update
+ *
+ * Update guest to enable or disable debugging. Per-arch specifics will be
+ * handled by calling down to hvf_arch_update_guest_debug.
+ */
+int hvf_update_guest_debug(CPUState *cpu);
+void hvf_arch_update_guest_debug(CPUState *cpu);
-#define HVF_STATE(obj) \
- OBJECT_CHECK(HVFState, (obj), TYPE_HVF_ACCEL)
+/*
+ * Return whether the guest supports debugging.
+ */
+bool hvf_arch_supports_guest_debug(void);
+#endif /* NEED_CPU_H */
#endif
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
new file mode 100644
index 0000000000..718beddcdd
--- /dev/null
+++ b/include/sysemu/hvf_int.h
@@ -0,0 +1,70 @@
+/*
+ * QEMU Hypervisor.framework (HVF) support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* header to be included in HVF-specific code */
+
+#ifndef HVF_INT_H
+#define HVF_INT_H
+
+#ifdef __aarch64__
+#include <Hypervisor/Hypervisor.h>
+#else
+#include <Hypervisor/hv.h>
+#endif
+
+/* hvf_slot flags */
+#define HVF_SLOT_LOG (1 << 0)
+
+typedef struct hvf_slot {
+ uint64_t start;
+ uint64_t size;
+ uint8_t *mem;
+ int slot_id;
+ uint32_t flags;
+ MemoryRegion *region;
+} hvf_slot;
+
+typedef struct hvf_vcpu_caps {
+ uint64_t vmx_cap_pinbased;
+ uint64_t vmx_cap_procbased;
+ uint64_t vmx_cap_procbased2;
+ uint64_t vmx_cap_entry;
+ uint64_t vmx_cap_exit;
+ uint64_t vmx_cap_preemption_timer;
+} hvf_vcpu_caps;
+
+struct HVFState {
+ AccelState parent;
+ hvf_slot slots[32];
+ int num_slots;
+
+ hvf_vcpu_caps *hvf_caps;
+ uint64_t vtimer_offset;
+ QTAILQ_HEAD(, hvf_sw_breakpoint) hvf_sw_breakpoints;
+};
+extern HVFState *hvf_state;
+
+struct AccelCPUState {
+ uint64_t fd;
+ void *exit;
+ bool vtimer_masked;
+ sigset_t unblock_ipi_mask;
+ bool guest_debug_enabled;
+};
+
+void assert_hvf_ok(hv_return_t ret);
+int hvf_arch_init(void);
+int hvf_arch_init_vcpu(CPUState *cpu);
+void hvf_arch_vcpu_destroy(CPUState *cpu);
+int hvf_vcpu_exec(CPUState *);
+hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
+int hvf_put_registers(CPUState *);
+int hvf_get_registers(CPUState *);
+void hvf_kick_vcpu_thread(CPUState *cpu);
+
+#endif
diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h
index d2ddfb5ad0..c71b77e71f 100644
--- a/include/sysemu/hw_accel.h
+++ b/include/sysemu/hw_accel.h
@@ -1,5 +1,5 @@
/*
- * QEMU Hardware accelertors support
+ * QEMU Hardware accelerators support
*
* Copyright 2016 Google, Inc.
*
@@ -11,61 +11,15 @@
#ifndef QEMU_HW_ACCEL_H
#define QEMU_HW_ACCEL_H
-#include "qom/cpu.h"
-#include "sysemu/hax.h"
+#include "hw/core/cpu.h"
#include "sysemu/kvm.h"
+#include "sysemu/hvf.h"
#include "sysemu/whpx.h"
+#include "sysemu/nvmm.h"
-static inline void cpu_synchronize_state(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_state(cpu);
- }
- if (hax_enabled()) {
- hax_cpu_synchronize_state(cpu);
- }
- if (whpx_enabled()) {
- whpx_cpu_synchronize_state(cpu);
- }
-}
-
-static inline void cpu_synchronize_post_reset(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_post_reset(cpu);
- }
- if (hax_enabled()) {
- hax_cpu_synchronize_post_reset(cpu);
- }
- if (whpx_enabled()) {
- whpx_cpu_synchronize_post_reset(cpu);
- }
-}
-
-static inline void cpu_synchronize_post_init(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_post_init(cpu);
- }
- if (hax_enabled()) {
- hax_cpu_synchronize_post_init(cpu);
- }
- if (whpx_enabled()) {
- whpx_cpu_synchronize_post_init(cpu);
- }
-}
-
-static inline void cpu_synchronize_pre_loadvm(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_pre_loadvm(cpu);
- }
- if (hax_enabled()) {
- hax_cpu_synchronize_pre_loadvm(cpu);
- }
- if (whpx_enabled()) {
- whpx_cpu_synchronize_pre_loadvm(cpu);
- }
-}
+void cpu_synchronize_state(CPUState *cpu);
+void cpu_synchronize_post_reset(CPUState *cpu);
+void cpu_synchronize_post_init(CPUState *cpu);
+void cpu_synchronize_pre_loadvm(CPUState *cpu);
#endif /* QEMU_HW_ACCEL_H */
diff --git a/include/sysemu/iommufd.h b/include/sysemu/iommufd.h
new file mode 100644
index 0000000000..9af27ebd6c
--- /dev/null
+++ b/include/sysemu/iommufd.h
@@ -0,0 +1,36 @@
+#ifndef SYSEMU_IOMMUFD_H
+#define SYSEMU_IOMMUFD_H
+
+#include "qom/object.h"
+#include "exec/hwaddr.h"
+#include "exec/cpu-common.h"
+
+#define TYPE_IOMMUFD_BACKEND "iommufd"
+OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND)
+
+struct IOMMUFDBackendClass {
+ ObjectClass parent_class;
+};
+
+struct IOMMUFDBackend {
+ Object parent;
+
+ /*< protected >*/
+ int fd; /* /dev/iommu file descriptor */
+ bool owned; /* is the /dev/iommu opened internally */
+ uint32_t users;
+
+ /*< public >*/
+};
+
+int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp);
+void iommufd_backend_disconnect(IOMMUFDBackend *be);
+
+int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
+ Error **errp);
+void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id);
+int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly);
+int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
+ hwaddr iova, ram_addr_t size);
+#endif
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
index 8a7ac2c528..2102a90eca 100644
--- a/include/sysemu/iothread.h
+++ b/include/sysemu/iothread.h
@@ -16,19 +16,20 @@
#include "block/aio.h"
#include "qemu/thread.h"
+#include "qom/object.h"
+#include "sysemu/event-loop-base.h"
#define TYPE_IOTHREAD "iothread"
-typedef struct {
- Object parent_obj;
+struct IOThread {
+ EventLoopBase parent_obj;
QemuThread thread;
AioContext *ctx;
+ bool run_gcontext; /* whether we should run gcontext */
GMainContext *worker_context;
GMainLoop *main_loop;
- GOnce once;
- QemuMutex init_done_lock;
- QemuCond init_done_cond; /* is thread initialization done? */
+ QemuSemaphore init_done_sem; /* is thread init done? */
bool stopping; /* has iothread_stop() been called? */
bool running; /* should iothread_run() continue? */
int thread_id;
@@ -37,10 +38,11 @@ typedef struct {
int64_t poll_max_ns;
int64_t poll_grow;
int64_t poll_shrink;
-} IOThread;
+};
+typedef struct IOThread IOThread;
-#define IOTHREAD(obj) \
- OBJECT_CHECK(IOThread, obj, TYPE_IOTHREAD)
+DECLARE_INSTANCE_CHECKER(IOThread, IOTHREAD,
+ TYPE_IOTHREAD)
char *iothread_get_id(IOThread *iothread);
IOThread *iothread_by_id(const char *id);
@@ -56,4 +58,10 @@ IOThread *iothread_create(const char *id, Error **errp);
void iothread_stop(IOThread *iothread);
void iothread_destroy(IOThread *iothread);
+/*
+ * Returns true if executing within IOThread context,
+ * false otherwise.
+ */
+bool qemu_in_iothread(void);
+
#endif /* IOTHREAD_H */
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index a6d1cd190f..47f9e8be1b 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -11,13 +11,14 @@
*
*/
+/* header to be included in non-KVM-specific code */
+
#ifndef QEMU_KVM_H
#define QEMU_KVM_H
-#include "qemu/queue.h"
-#include "qom/cpu.h"
#include "exec/memattrs.h"
-#include "hw/irq.h"
+#include "qemu/accel.h"
+#include "qom/object.h"
#ifdef NEED_CPU_H
# ifdef CONFIG_KVM
@@ -35,38 +36,33 @@ extern bool kvm_kernel_irqchip;
extern bool kvm_split_irqchip;
extern bool kvm_async_interrupts_allowed;
extern bool kvm_halt_in_kernel_allowed;
-extern bool kvm_eventfds_allowed;
-extern bool kvm_irqfds_allowed;
extern bool kvm_resamplefds_allowed;
extern bool kvm_msi_via_irqfd_allowed;
extern bool kvm_gsi_routing_allowed;
extern bool kvm_gsi_direct_mapping;
extern bool kvm_readonly_mem_allowed;
-extern bool kvm_direct_msi_allowed;
-extern bool kvm_ioeventfd_any_length_allowed;
extern bool kvm_msi_use_devid;
#define kvm_enabled() (kvm_allowed)
/**
* kvm_irqchip_in_kernel:
*
- * Returns: true if the user asked us to create an in-kernel
- * irqchip via the "kernel_irqchip=on" machine option.
+ * Returns: true if an in-kernel irqchip was created.
* What this actually means is architecture and machine model
- * specific: on PC, for instance, it means that the LAPIC,
- * IOAPIC and PIT are all in kernel. This function should never
- * be used from generic target-independent code: use one of the
- * following functions or some other specific check instead.
+ * specific: on PC, for instance, it means that the LAPIC
+ * is in kernel. This function should never be used from generic
+ * target-independent code: use one of the following functions or
+ * some other specific check instead.
*/
#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
/**
* kvm_irqchip_is_split:
*
- * Returns: true if the user asked us to split the irqchip
- * implementation between user and kernel space. The details are
- * architecture and machine specific. On PC, it means that the PIC,
- * IOAPIC, and PIT are in user space while the LAPIC is in the kernel.
+ * Returns: true if the irqchip implementation is split between
+ * user and kernel space. The details are architecture and
+ * machine specific. On PC, it means that the PIC, IOAPIC, and
+ * PIT are in user space while the LAPIC is in the kernel.
*/
#define kvm_irqchip_is_split() (kvm_split_irqchip)
@@ -89,22 +85,15 @@ extern bool kvm_msi_use_devid;
#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
/**
- * kvm_eventfds_enabled:
- *
- * Returns: true if we can use eventfds to receive notifications
- * from a KVM CPU (ie the kernel supports eventds and we are running
- * with a configuration where it is meaningful to use them).
- */
-#define kvm_eventfds_enabled() (kvm_eventfds_allowed)
-
-/**
* kvm_irqfds_enabled:
*
* Returns: true if we can use irqfds to inject interrupts into
* a KVM CPU (ie the kernel supports irqfds and we are running
* with a configuration where it is meaningful to use them).
+ *
+ * Always available if running with in-kernel irqchip.
*/
-#define kvm_irqfds_enabled() (kvm_irqfds_allowed)
+#define kvm_irqfds_enabled() kvm_irqchip_in_kernel()
/**
* kvm_resamplefds_enabled:
@@ -148,19 +137,6 @@ extern bool kvm_msi_use_devid;
#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
/**
- * kvm_direct_msi_enabled:
- *
- * Returns: true if KVM allows direct MSI injection.
- */
-#define kvm_direct_msi_enabled() (kvm_direct_msi_allowed)
-
-/**
- * kvm_ioeventfd_any_length_enabled:
- * Returns: true if KVM allows any length io eventfd.
- */
-#define kvm_ioeventfd_any_length_enabled() (kvm_ioeventfd_any_length_allowed)
-
-/**
* kvm_msi_devid_required:
* Returns: true if KVM requires a device id to be provided while
* defining an MSI routing entry.
@@ -174,21 +150,17 @@ extern bool kvm_msi_use_devid;
#define kvm_irqchip_is_split() (false)
#define kvm_async_interrupts_enabled() (false)
#define kvm_halt_in_kernel() (false)
-#define kvm_eventfds_enabled() (false)
#define kvm_irqfds_enabled() (false)
#define kvm_resamplefds_enabled() (false)
#define kvm_msi_via_irqfd_enabled() (false)
#define kvm_gsi_routing_allowed() (false)
#define kvm_gsi_direct_mapping() (false)
#define kvm_readonly_mem_enabled() (false)
-#define kvm_direct_msi_enabled() (false)
-#define kvm_ioeventfd_any_length_enabled() (false)
#define kvm_msi_devid_required() (false)
#endif /* CONFIG_KVM_IS_POSSIBLE */
struct kvm_run;
-struct kvm_lapic_state;
struct kvm_irq_routing_entry;
typedef struct KVMCapabilityInfo {
@@ -200,24 +172,28 @@ typedef struct KVMCapabilityInfo {
#define KVM_CAP_LAST_INFO { NULL, 0 }
struct KVMState;
+
+#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
typedef struct KVMState KVMState;
+DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE,
+ TYPE_KVM_ACCEL)
+
extern KVMState *kvm_state;
+typedef struct Notifier Notifier;
+
+typedef struct KVMRouteChange {
+ KVMState *s;
+ int changes;
+} KVMRouteChange;
/* external API */
-bool kvm_has_free_slot(MachineState *ms);
+unsigned int kvm_get_max_memslots(void);
+unsigned int kvm_get_free_memslots(void);
bool kvm_has_sync_mmu(void);
int kvm_has_vcpu_events(void);
-int kvm_has_robust_singlestep(void);
-int kvm_has_debugregs(void);
-int kvm_has_pit_state2(void);
-int kvm_has_many_ioeventfds(void);
+int kvm_max_nested_state_length(void);
int kvm_has_gsi_routing(void);
-int kvm_has_intx_set_mask(void);
-
-int kvm_init_vcpu(CPUState *cpu);
-int kvm_cpu_exec(CPUState *cpu);
-int kvm_destroy_vcpu(CPUState *cpu);
/**
* kvm_arm_supports_user_irq
@@ -230,41 +206,32 @@ int kvm_destroy_vcpu(CPUState *cpu);
*/
bool kvm_arm_supports_user_irq(void);
-/**
- * kvm_memcrypt_enabled - return boolean indicating whether memory encryption
- * is enabled
- * Returns: 1 memory encryption is enabled
- * 0 memory encryption is disabled
- */
-bool kvm_memcrypt_enabled(void);
-
-/**
- * kvm_memcrypt_encrypt_data: encrypt the memory range
- *
- * Return: 1 failed to encrypt the range
- * 0 succesfully encrypted memory region
- */
-int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len);
+int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
+int kvm_on_sigbus(int code, void *addr);
#ifdef NEED_CPU_H
#include "cpu.h"
void kvm_flush_coalesced_mmio_buffer(void);
-int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
- target_ulong len, int type);
-int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
- target_ulong len, int type);
-void kvm_remove_all_breakpoints(CPUState *cpu);
+/**
+ * kvm_update_guest_debug(): ensure KVM debug structures updated
+ * @cs: the CPUState for this cpu
+ * @reinject_trap: KVM trap injection control
+ *
+ * There are usually per-arch specifics which will be handled by
+ * calling down to kvm_arch_update_guest_debug after the generic
+ * fields have been set.
+ */
+#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
-
-int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
-int kvm_on_sigbus(int code, void *addr);
-
-/* interface with exec.c */
-
-void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared));
+#else
+static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
+{
+ return -EINVAL;
+}
+#endif
/* internal API */
@@ -308,7 +275,7 @@ int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr);
int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr);
/**
- * kvm_device_access - set or get value of a specific vm attribute
+ * kvm_device_access - set or get value of a specific device attribute
* @fd: The device file descriptor
* @group: the group
* @attr: the attribute of that group to set or get
@@ -350,6 +317,8 @@ bool kvm_device_supported(int vmfd, uint64_t type);
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
+void kvm_arch_accel_class_init(ObjectClass *oc);
+
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
@@ -368,17 +337,19 @@ int kvm_arch_get_registers(CPUState *cpu);
int kvm_arch_put_registers(CPUState *cpu, int level);
+int kvm_arch_get_default_type(MachineState *ms);
+
int kvm_arch_init(MachineState *ms, KVMState *s);
int kvm_arch_init_vcpu(CPUState *cpu);
+int kvm_arch_destroy_vcpu(CPUState *cpu);
bool kvm_vcpu_id_is_valid(int vcpu_id);
/* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */
unsigned long kvm_arch_vcpu_id(CPUState *cpu);
-#ifdef TARGET_I386
-#define KVM_HAVE_MCE_INJECTION 1
+#ifdef KVM_HAVE_MCE_INJECTION
void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
#endif
@@ -400,20 +371,22 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
-void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
+void kvm_irqchip_add_change_notifier(Notifier *n);
+void kvm_irqchip_remove_change_notifier(Notifier *n);
+void kvm_irqchip_change_notify(void);
struct kvm_guest_debug;
struct kvm_debug_exit_arch;
struct kvm_sw_breakpoint {
- target_ulong pc;
- target_ulong saved_insn;
+ vaddr pc;
+ vaddr saved_insn;
int use_count;
QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
};
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
- target_ulong pc);
+ vaddr pc);
int kvm_sw_breakpoints_active(CPUState *cpu);
@@ -421,10 +394,8 @@ int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
struct kvm_sw_breakpoint *bp);
int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
struct kvm_sw_breakpoint *bp);
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
- target_ulong len, int type);
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
- target_ulong len, int type);
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
void kvm_arch_remove_all_hw_breakpoints(void);
void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
@@ -459,30 +430,20 @@ int kvm_vm_check_extension(KVMState *s, unsigned int extension);
kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \
})
-uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
- uint32_t index, int reg);
-uint32_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index);
-
-
void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
-#if !defined(CONFIG_USER_ONLY)
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
hwaddr *phys_addr);
-#endif
#endif /* NEED_CPU_H */
void kvm_cpu_synchronize_state(CPUState *cpu);
-void kvm_cpu_synchronize_post_reset(CPUState *cpu);
-void kvm_cpu_synchronize_post_init(CPUState *cpu);
-void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
void kvm_init_cpu_signals(CPUState *cpu);
/**
* kvm_irqchip_add_msi_route - Add MSI route for specific vector
- * @s: KVM state
+ * @c: KVMRouteChange instance.
* @vector: which vector to add. This can be either MSI/MSIX
* vector. The function will automatically detect whether
* MSI/MSIX is enabled, and fetch corresponding MSI
@@ -491,10 +452,24 @@ void kvm_init_cpu_signals(CPUState *cpu);
* as @NULL, an empty MSI message will be inited.
* @return: virq (>=0) when success, errno (<0) when failed.
*/
-int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev);
+int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
PCIDevice *dev);
void kvm_irqchip_commit_routes(KVMState *s);
+
+static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
+{
+ return (KVMRouteChange) { .s = s, .changes = 0 };
+}
+
+static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c)
+{
+ if (c->changes) {
+ kvm_irqchip_commit_routes(c->s);
+ c->changes = 0;
+ }
+}
+
void kvm_irqchip_release_virq(KVMState *s, int virq);
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter);
@@ -509,14 +484,15 @@ int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
qemu_irq irq);
void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi);
-void kvm_pc_gsi_handler(void *opaque, int n, int level);
-void kvm_pc_setup_irq_routing(bool pci_enabled);
void kvm_init_irq_routing(KVMState *s);
+bool kvm_kernel_irqchip_allowed(void);
+bool kvm_kernel_irqchip_required(void);
+bool kvm_kernel_irqchip_split(void);
+
/**
* kvm_arch_irqchip_create:
* @KVMState: The KVMState pointer
- * @MachineState: The MachineState pointer
*
* Allow architectures to create an in-kernel irq chip themselves.
*
@@ -524,7 +500,7 @@ void kvm_init_irq_routing(KVMState *s);
* 0: irq chip was not created
* > 0: irq chip was created
*/
-int kvm_arch_irqchip_create(MachineState *ms, KVMState *s);
+int kvm_arch_irqchip_create(KVMState *s);
/**
* kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl
@@ -545,6 +521,27 @@ int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
* Returns: 0 on success, or a negative errno on failure.
*/
int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
-struct ppc_radix_page_info *kvm_get_radix_page_info(void);
-int kvm_get_max_memslots(void);
+
+/* Notify resamplefd for EOI of specific interrupts. */
+void kvm_resample_fd_notify(int gsi);
+
+bool kvm_dirty_ring_enabled(void);
+
+uint32_t kvm_dirty_ring_size(void);
+
+void kvm_mark_guest_state_protected(void);
+
+/**
+ * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page
+ * reported for the VM.
+ */
+bool kvm_hwpoisoned_mem(void);
+
+int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp);
+
+int kvm_set_memory_attributes_private(hwaddr start, uint64_t size);
+int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size);
+
+int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private);
+
#endif
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index f838412491..3f3d13f816 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -9,8 +9,10 @@
#ifndef QEMU_KVM_INT_H
#define QEMU_KVM_INT_H
-#include "sysemu/sysemu.h"
-#include "sysemu/accel.h"
+#include "exec/memory.h"
+#include "qapi/qapi-types-common.h"
+#include "qemu/accel.h"
+#include "qemu/queue.h"
#include "sysemu/kvm.h"
typedef struct KVMSlot
@@ -21,20 +23,123 @@ typedef struct KVMSlot
int slot;
int flags;
int old_flags;
+ /* Dirty bitmap cache for the slot */
+ unsigned long *dirty_bmap;
+ unsigned long dirty_bmap_size;
+ /* Cache of the address space ID */
+ int as_id;
+ /* Cache of the offset in ram address space */
+ ram_addr_t ram_start_offset;
+ int guest_memfd;
+ hwaddr guest_memfd_offset;
} KVMSlot;
+typedef struct KVMMemoryUpdate {
+ QSIMPLEQ_ENTRY(KVMMemoryUpdate) next;
+ MemoryRegionSection section;
+} KVMMemoryUpdate;
+
typedef struct KVMMemoryListener {
MemoryListener listener;
KVMSlot *slots;
+ unsigned int nr_used_slots;
int as_id;
+ QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_add;
+ QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_del;
} KVMMemoryListener;
-#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
+#define KVM_MSI_HASHTAB_SIZE 256
-#define KVM_STATE(obj) \
- OBJECT_CHECK(KVMState, (obj), TYPE_KVM_ACCEL)
+enum KVMDirtyRingReaperState {
+ KVM_DIRTY_RING_REAPER_NONE = 0,
+ /* The reaper is sleeping */
+ KVM_DIRTY_RING_REAPER_WAIT,
+ /* The reaper is reaping for dirty pages */
+ KVM_DIRTY_RING_REAPER_REAPING,
+};
+
+/*
+ * KVM reaper instance, responsible for collecting the KVM dirty bits
+ * via the dirty ring.
+ */
+struct KVMDirtyRingReaper {
+ /* The reaper thread */
+ QemuThread reaper_thr;
+ volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
+ volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
+};
+struct KVMState
+{
+ AccelState parent_obj;
+
+ int nr_slots;
+ int fd;
+ int vmfd;
+ int coalesced_mmio;
+ int coalesced_pio;
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+ bool coalesced_flush_in_progress;
+ int vcpu_events;
+#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
+ QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
+#endif
+ int max_nested_state_len;
+ int kvm_shadow_mem;
+ bool kernel_irqchip_allowed;
+ bool kernel_irqchip_required;
+ OnOffAuto kernel_irqchip_split;
+ bool sync_mmu;
+ bool guest_state_protected;
+ uint64_t manual_dirty_log_protect;
+ /* The man page (and posix) say ioctl numbers are signed int, but
+ * they're not. Linux, glibc and *BSD all treat ioctl numbers as
+ * unsigned, and treating them as signed here can break things */
+ unsigned irq_set_ioctl;
+ unsigned int sigmask_len;
+ GHashTable *gsimap;
+#ifdef KVM_CAP_IRQ_ROUTING
+ struct kvm_irq_routing *irq_routes;
+ int nr_allocated_irq_routes;
+ unsigned long *used_gsi_bitmap;
+ unsigned int gsi_count;
+#endif
+ KVMMemoryListener memory_listener;
+ QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
+
+ /* For "info mtree -f" to tell if an MR is registered in KVM */
+ int nr_as;
+ struct KVMAs {
+ KVMMemoryListener *ml;
+ AddressSpace *as;
+ } *as;
+ uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
+ uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
+ bool kvm_dirty_ring_with_bitmap;
+ uint64_t kvm_eager_split_size; /* Eager Page Splitting chunk size */
+ struct KVMDirtyRingReaper reaper;
+ NotifyVmexitOption notify_vmexit;
+ uint32_t notify_window;
+ uint32_t xen_version;
+ uint32_t xen_caps;
+ uint16_t xen_gnttab_max_frames;
+ uint16_t xen_evtchn_max_pirq;
+ char *device;
+};
void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
- AddressSpace *as, int as_id);
+ AddressSpace *as, int as_id, const char *name);
+void kvm_set_max_memslot_size(hwaddr max_slot_size);
+
+/**
+ * kvm_hwpoison_page_add:
+ *
+ * Parameters:
+ * @ram_addr: the address in the RAM for the poisoned page
+ *
+ * Add a poisoned page to the list
+ *
+ * Return: None.
+ */
+void kvm_hwpoison_page_add(ram_addr_t ram_addr);
#endif
diff --git a/include/sysemu/kvm_xen.h b/include/sysemu/kvm_xen.h
new file mode 100644
index 0000000000..961c702c4e
--- /dev/null
+++ b/include/sysemu/kvm_xen.h
@@ -0,0 +1,44 @@
+/*
+ * Xen HVM emulation support in KVM
+ *
+ * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_SYSEMU_KVM_XEN_H
+#define QEMU_SYSEMU_KVM_XEN_H
+
+/* The KVM API uses these to indicate "no GPA" or "no GFN" */
+#define INVALID_GPA UINT64_MAX
+#define INVALID_GFN UINT64_MAX
+
+/* QEMU plays the rôle of dom0 for "interdomain" communication. */
+#define DOMID_QEMU 0
+
+int kvm_xen_soft_reset(void);
+uint32_t kvm_xen_get_caps(void);
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
+bool kvm_xen_has_vcpu_callback_vector(void);
+void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type);
+void kvm_xen_set_callback_asserted(void);
+int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port);
+uint16_t kvm_xen_get_gnttab_max_frames(void);
+uint16_t kvm_xen_get_evtchn_max_pirq(void);
+
+#define kvm_xen_has_cap(cap) (!!(kvm_xen_get_caps() & \
+ KVM_XEN_HVM_CONFIG_ ## cap))
+
+#define XEN_SPECIAL_AREA_ADDR 0xfeff8000UL
+#define XEN_SPECIAL_AREA_SIZE 0x4000UL
+
+#define XEN_SPECIALPAGE_CONSOLE 0
+#define XEN_SPECIALPAGE_XENSTORE 1
+
+#define XEN_SPECIAL_PFN(x) ((XEN_SPECIAL_AREA_ADDR >> TARGET_PAGE_BITS) + \
+ XEN_SPECIALPAGE_##x)
+
+#endif /* QEMU_SYSEMU_KVM_XEN_H */
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
index 58452457ce..021e0a6230 100644
--- a/include/sysemu/memory_mapping.h
+++ b/include/sysemu/memory_mapping.h
@@ -15,7 +15,7 @@
#define MEMORY_MAPPING_H
#include "qemu/queue.h"
-#include "exec/memory.h"
+#include "exec/cpu-common.h"
typedef struct GuestPhysBlock {
/* visible to guest, reflects PCI hole, etc */
@@ -42,7 +42,7 @@ typedef struct GuestPhysBlockList {
/* The physical and virtual address in the memory mapping are contiguous. */
typedef struct MemoryMapping {
hwaddr phys_addr;
- target_ulong virt_addr;
+ vaddr virt_addr;
ram_addr_t length;
QTAILQ_ENTRY(MemoryMapping) next;
} MemoryMapping;
@@ -71,7 +71,7 @@ void guest_phys_blocks_free(GuestPhysBlockList *list);
void guest_phys_blocks_init(GuestPhysBlockList *list);
void guest_phys_blocks_append(GuestPhysBlockList *list);
-void qemu_get_guest_memory_mapping(MemoryMappingList *list,
+bool qemu_get_guest_memory_mapping(MemoryMappingList *list,
const GuestPhysBlockList *guest_phys_blocks,
Error **errp);
diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h
index b6ac7de43e..825cfe86bc 100644
--- a/include/sysemu/numa.h
+++ b/include/sysemu/numa.h
@@ -2,17 +2,48 @@
#define SYSEMU_NUMA_H
#include "qemu/bitmap.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/hostmem.h"
-#include "hw/boards.h"
+#include "qapi/qapi-types-machine.h"
+#include "exec/cpu-common.h"
-extern int nb_numa_nodes; /* Number of NUMA nodes */
-extern bool have_numa_distance;
+struct CPUArchId;
+
+#define MAX_NODES 128
+#define NUMA_NODE_UNASSIGNED MAX_NODES
+#define NUMA_DISTANCE_MIN 10
+#define NUMA_DISTANCE_DEFAULT 20
+#define NUMA_DISTANCE_MAX 254
+#define NUMA_DISTANCE_UNREACHABLE 255
+
+/* the value of AcpiHmatLBInfo flags */
+enum {
+ HMAT_LB_MEM_MEMORY = 0,
+ HMAT_LB_MEM_CACHE_1ST_LEVEL = 1,
+ HMAT_LB_MEM_CACHE_2ND_LEVEL = 2,
+ HMAT_LB_MEM_CACHE_3RD_LEVEL = 3,
+ HMAT_LB_LEVELS /* must be the last entry */
+};
+
+/* the value of AcpiHmatLBInfo data type */
+enum {
+ HMAT_LB_DATA_ACCESS_LATENCY = 0,
+ HMAT_LB_DATA_READ_LATENCY = 1,
+ HMAT_LB_DATA_WRITE_LATENCY = 2,
+ HMAT_LB_DATA_ACCESS_BANDWIDTH = 3,
+ HMAT_LB_DATA_READ_BANDWIDTH = 4,
+ HMAT_LB_DATA_WRITE_BANDWIDTH = 5,
+ HMAT_LB_TYPES /* must be the last entry */
+};
+
+#define UINT16_BITS 16
struct NodeInfo {
uint64_t node_mem;
struct HostMemoryBackend *node_memdev;
bool present;
+ bool has_cpu;
+ bool has_gi;
+ uint8_t lb_info_provided;
+ uint16_t initiator;
uint8_t distance[MAX_NODES];
};
@@ -21,14 +52,63 @@ struct NumaNodeMem {
uint64_t node_plugged_mem;
};
-extern NodeInfo numa_info[MAX_NODES];
+struct HMAT_LB_Data {
+ uint8_t initiator;
+ uint8_t target;
+ uint64_t data;
+};
+typedef struct HMAT_LB_Data HMAT_LB_Data;
+
+struct HMAT_LB_Info {
+ /* Indicates it's memory or the specified level memory side cache. */
+ uint8_t hierarchy;
+
+ /* Present the type of data, access/read/write latency or bandwidth. */
+ uint8_t data_type;
+
+ /* The range bitmap of bandwidth for calculating common base */
+ uint64_t range_bitmap;
+
+ /* The common base unit for latencies or bandwidths */
+ uint64_t base;
+
+ /* Array to store the latencies or bandwidths */
+ GArray *list;
+};
+typedef struct HMAT_LB_Info HMAT_LB_Info;
+
+struct NumaState {
+ /* Number of NUMA nodes */
+ int num_nodes;
+
+ /* Allow setting NUMA distance for different NUMA nodes */
+ bool have_numa_distance;
+
+ /* Detect if HMAT support is enabled. */
+ bool hmat_enabled;
+
+ /* NUMA nodes information */
+ NodeInfo nodes[MAX_NODES];
+
+ /* NUMA nodes HMAT Locality Latency and Bandwidth Information */
+ HMAT_LB_Info *hmat_lb[HMAT_LB_LEVELS][HMAT_LB_TYPES];
+
+ /* Memory Side Cache Information Structure */
+ NumaHmatCacheOptions *hmat_cache[MAX_NODES][HMAT_LB_LEVELS];
+};
+typedef struct NumaState NumaState;
+
+void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp);
void parse_numa_opts(MachineState *ms);
+void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node,
+ Error **errp);
+void parse_numa_hmat_cache(MachineState *ms, NumaHmatCacheOptions *node,
+ Error **errp);
void numa_complete_configuration(MachineState *ms);
-void query_numa_node_mem(NumaNodeMem node_mem[]);
+void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms);
extern QemuOptsList qemu_numa_opts;
-void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
- int nb_nodes, ram_addr_t size);
-void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
- int nb_nodes, ram_addr_t size);
-void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp);
+void numa_cpu_pre_plug(const struct CPUArchId *slot, DeviceState *dev,
+ Error **errp);
+bool numa_uses_legacy_mem(void);
+
#endif
diff --git a/include/sysemu/nvmm.h b/include/sysemu/nvmm.h
new file mode 100644
index 0000000000..be7bc9a62d
--- /dev/null
+++ b/include/sysemu/nvmm.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
+ *
+ * NetBSD Virtual Machine Monitor (NVMM) accelerator support.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/* header to be included in non-NVMM-specific code */
+
+#ifndef QEMU_NVMM_H
+#define QEMU_NVMM_H
+
+#ifdef NEED_CPU_H
+
+#ifdef CONFIG_NVMM
+
+int nvmm_enabled(void);
+
+#else /* CONFIG_NVMM */
+
+#define nvmm_enabled() (0)
+
+#endif /* CONFIG_NVMM */
+
+#endif /* NEED_CPU_H */
+
+#endif /* QEMU_NVMM_H */
diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h
index 629c8c648b..b881ac6c6f 100644
--- a/include/sysemu/os-posix.h
+++ b/include/sysemu/os-posix.h
@@ -38,21 +38,23 @@
#include <sys/sysmacros.h>
#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void os_set_line_buffering(void);
+void os_setup_early_signal_handling(void);
void os_set_proc_name(const char *s);
void os_setup_signal_handling(void);
+int os_set_daemonize(bool d);
+bool is_daemonized(void);
void os_daemonize(void);
+bool os_set_runas(const char *user_id);
+void os_set_chroot(const char *path);
+void os_setup_limits(void);
void os_setup_post(void);
int os_mlock(void);
-#define closesocket(s) close(s)
-#define ioctlsocket(s, r, v) ioctl(s, r, v)
-
-typedef struct timeval qemu_timeval;
-#define qemu_gettimeofday(tp) gettimeofday(tp, NULL)
-
-bool is_daemonized(void);
-
/**
* qemu_alloc_stack:
* @sz: pointer to a size_t holding the requested usable stack size
@@ -92,4 +94,8 @@ static inline void qemu_funlockfile(FILE *f)
funlockfile(f);
}
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h
index ff18b23db1..b82a5d3ad9 100644
--- a/include/sysemu/os-win32.h
+++ b/include/sysemu/os-win32.h
@@ -29,15 +29,57 @@
#include <winsock2.h>
#include <windows.h>
#include <ws2tcpip.h>
+#include "qemu/typedefs.h"
-#if defined(_WIN64)
-/* On w64, setjmp is implemented by _setjmp which needs a second parameter.
+#ifdef HAVE_AFUNIX_H
+#include <afunix.h>
+#else
+/*
+ * Fallback definitions of things we need in afunix.h, if not available from
+ * the used Windows SDK or MinGW headers.
+ */
+#define UNIX_PATH_MAX 108
+
+typedef struct sockaddr_un {
+ ADDRESS_FAMILY sun_family;
+ char sun_path[UNIX_PATH_MAX];
+} SOCKADDR_UN, *PSOCKADDR_UN;
+
+#define SIO_AF_UNIX_GETPEERPID _WSAIOR(IOC_VENDOR, 256)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__aarch64__)
+/*
+ * On windows-arm64, setjmp is available in only one variant, and longjmp always
+ * does stack unwinding. This crash with generated code.
+ * Thus, we use another implementation of setjmp (not windows one), coming from
+ * mingw, which never performs stack unwinding.
+ */
+#undef setjmp
+#undef longjmp
+/*
+ * These functions are not declared in setjmp.h because __aarch64__ defines
+ * setjmp to _setjmpex instead. However, they are still defined in libmingwex.a,
+ * which gets linked automatically.
+ */
+int __mingw_setjmp(jmp_buf);
+void __attribute__((noreturn)) __mingw_longjmp(jmp_buf, int);
+#define setjmp(env) __mingw_setjmp(env)
+#define longjmp(env, val) __mingw_longjmp(env, val)
+#elif defined(_WIN64)
+/*
+ * On windows-x64, setjmp is implemented by _setjmp which needs a second parameter.
* If this parameter is NULL, longjump does no stack unwinding.
* That is what we need for QEMU. Passing the value of register rsp (default)
- * lets longjmp try a stack unwinding which will crash with generated code. */
+ * lets longjmp try a stack unwinding which will crash with generated code.
+ */
# undef setjmp
# define setjmp(env) _setjmp(env, NULL)
-#endif
+#endif /* __aarch64__ */
/* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify
* "longjmp and don't touch the signal masks". Since we know that the
* savemask parameter will always be zero we can safely define these
@@ -48,18 +90,19 @@
#define siglongjmp(env, val) longjmp(env, val)
/* Missing POSIX functions. Don't use MinGW-w64 macros. */
-#ifndef CONFIG_LOCALTIME_R
+#ifndef _POSIX_THREAD_SAFE_FUNCTIONS
#undef gmtime_r
struct tm *gmtime_r(const time_t *timep, struct tm *result);
#undef localtime_r
struct tm *localtime_r(const time_t *timep, struct tm *result);
-#endif /* CONFIG_LOCALTIME_R */
+#endif /* _POSIX_THREAD_SAFE_FUNCTIONS */
static inline void os_setup_signal_handling(void) {}
static inline void os_daemonize(void) {}
static inline void os_setup_post(void) {}
-void os_set_line_buffering(void);
static inline void os_set_proc_name(const char *dummy) {}
+void os_set_line_buffering(void);
+void os_setup_early_signal_handling(void);
int getpagesize(void);
@@ -67,13 +110,13 @@ int getpagesize(void);
# define EPROTONOSUPPORT EINVAL
#endif
-int setenv(const char *name, const char *value, int overwrite);
-
-typedef struct {
- long tv_sec;
- long tv_usec;
-} qemu_timeval;
-int qemu_gettimeofday(qemu_timeval *tp);
+static inline int os_set_daemonize(bool d)
+{
+ if (d) {
+ return -ENOTSUP;
+ }
+ return 0;
+}
static inline bool is_daemonized(void)
{
@@ -85,6 +128,11 @@ static inline int os_mlock(void)
return -ENOSYS;
}
+static inline void os_setup_limits(void)
+{
+ return;
+}
+
#define fsync _commit
#if !defined(lseek)
@@ -103,25 +151,48 @@ static inline char *realpath(const char *path, char *resolved_path)
return resolved_path;
}
-/* ??? Mingw appears to export _lock_file and _unlock_file as the functions
- * with which to lock a stdio handle. But something is wrong in the markup,
- * either in the header or the library, such that we get undefined references
- * to "_imp___lock_file" etc when linking. Since we seem to have no other
- * alternative, and the usage within the logging functions isn't critical,
- * ignore FILE locking.
+/*
+ * Older versions of MinGW do not import _lock_file and _unlock_file properly.
+ * This was fixed for v6.0.0 with commit b48e3ac8969d.
*/
-
static inline void qemu_flockfile(FILE *f)
{
+#ifdef HAVE__LOCK_FILE
+ _lock_file(f);
+#endif
}
static inline void qemu_funlockfile(FILE *f)
{
+#ifdef HAVE__LOCK_FILE
+ _unlock_file(f);
+#endif
}
-/* We wrap all the sockets functions so that we can
- * set errno based on WSAGetLastError()
+/* Helper for WSAEventSelect, to report errors */
+bool qemu_socket_select(int sockfd, WSAEVENT hEventObject,
+ long lNetworkEvents, Error **errp);
+
+bool qemu_socket_unselect(int sockfd, Error **errp);
+
+/* We wrap all the sockets functions so that we can set errno based on
+ * WSAGetLastError(), and use file-descriptors instead of SOCKET.
+ */
+
+/*
+ * qemu_close_socket_osfhandle:
+ * @fd: a file descriptor associated with a SOCKET
+ *
+ * Close only the C run-time file descriptor, leave the SOCKET opened.
+ *
+ * Returns zero on success. On error, -1 is returned, and errno is set to
+ * indicate the error.
*/
+int qemu_close_socket_osfhandle(int fd);
+
+#undef close
+#define close qemu_close_wrap
+int qemu_close_wrap(int fd);
#undef connect
#define connect qemu_connect_wrap
@@ -154,10 +225,6 @@ int qemu_shutdown_wrap(int sockfd, int how);
#define ioctlsocket qemu_ioctlsocket_wrap
int qemu_ioctlsocket_wrap(int fd, int req, void *val);
-#undef closesocket
-#define closesocket qemu_closesocket_wrap
-int qemu_closesocket_wrap(int fd);
-
#undef getsockopt
#define getsockopt qemu_getsockopt_wrap
int qemu_getsockopt_wrap(int sockfd, int level, int optname,
@@ -196,4 +263,15 @@ ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags);
ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags,
struct sockaddr *addr, socklen_t *addrlen);
+EXCEPTION_DISPOSITION
+win32_close_exception_handler(struct _EXCEPTION_RECORD*, void*,
+ struct _CONTEXT*, void*);
+
+void *qemu_win32_map_alloc(size_t size, HANDLE *h, Error **errp);
+void qemu_win32_map_free(void *ptr, HANDLE h, Error **errp);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/include/sysemu/qtest.h b/include/sysemu/qtest.h
index 70aa40aa72..b5d5fd3463 100644
--- a/include/sysemu/qtest.h
+++ b/include/sysemu/qtest.h
@@ -14,7 +14,7 @@
#ifndef QTEST_H
#define QTEST_H
-#include "qemu-common.h"
+#include "chardev/char.h"
extern bool qtest_allowed;
@@ -23,17 +23,19 @@ static inline bool qtest_enabled(void)
return qtest_allowed;
}
+#ifndef CONFIG_USER_ONLY
+void qtest_send_prefix(CharBackend *chr);
+void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...);
+void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words));
bool qtest_driver(void);
-void qtest_init(const char *qtest_chrdev, const char *qtest_log, Error **errp);
+void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp);
-static inline int qtest_available(void)
-{
-#ifdef CONFIG_POSIX
- return 1;
-#else
- return 0;
+void qtest_server_set_send_handler(void (*send)(void *, const char *),
+ void *opaque);
+void qtest_server_inproc_recv(void *opaque, const char *buf);
+
+int64_t qtest_get_virtual_clock(void);
#endif
-}
#endif
diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h
index 3a7c58e423..f229b2109c 100644
--- a/include/sysemu/replay.h
+++ b/include/sysemu/replay.h
@@ -1,8 +1,5 @@
-#ifndef REPLAY_H
-#define REPLAY_H
-
/*
- * replay.h
+ * QEMU replay (system interface)
*
* Copyright (c) 2010-2015 Institute for System Programming
* of the Russian Academy of Sciences.
@@ -11,10 +8,18 @@
* See the COPYING file in the top-level directory.
*
*/
+#ifndef SYSEMU_REPLAY_H
+#define SYSEMU_REPLAY_H
+
+#ifdef CONFIG_USER_ONLY
+#error Cannot include this header from user emulation
+#endif
-#include "sysemu.h"
+#include "exec/replay-core.h"
#include "qapi/qapi-types-misc.h"
+#include "qapi/qapi-types-run-state.h"
#include "qapi/qapi-types-ui.h"
+#include "block/aio.h"
/* replay clock kinds */
enum ReplayClockKind {
@@ -43,8 +48,6 @@ typedef enum ReplayCheckpoint ReplayCheckpoint;
typedef struct ReplayNetState ReplayNetState;
-extern ReplayMode replay_mode;
-
/* Name of the initial VM snapshot */
extern char *replay_snapshot;
@@ -61,41 +64,19 @@ extern char *replay_snapshot;
void replay_mutex_lock(void);
void replay_mutex_unlock(void);
-/* Replay process control functions */
-
-/*! Enables recording or saving event log with specified parameters */
-void replay_configure(struct QemuOpts *opts);
-/*! Initializes timers used for snapshotting and enables events recording */
-void replay_start(void);
-/*! Closes replay log file and frees other resources. */
-void replay_finish(void);
-/*! Adds replay blocker with the specified error description */
-void replay_add_blocker(Error *reason);
-
/* Processing the instructions */
/*! Returns number of executed instructions. */
-uint64_t replay_get_current_step(void);
+uint64_t replay_get_current_icount(void);
/*! Returns number of instructions to execute in replay mode. */
int replay_get_instructions(void);
/*! Updates instructions counter in replay mode. */
void replay_account_executed_instructions(void);
-/* Interrupts and exceptions */
-
-/*! Called by exception handler to write or read
- exception processing events. */
-bool replay_exception(void);
-/*! Used to determine that exception is pending.
- Does not proceed to the next event in the log. */
-bool replay_has_exception(void);
-/*! Called by interrupt handlers to write or read
- interrupt processing events.
- \return true if interrupt should be processed */
-bool replay_interrupt(void);
-/*! Tries to read interrupt event from the file.
- Returns true, when interrupt request is pending */
-bool replay_has_interrupt(void);
+/**
+ * replay_can_wait: check if we should pause for wait-io
+ */
+bool replay_can_wait(void);
/* Processing clocks and other time sources */
@@ -103,18 +84,22 @@ bool replay_has_interrupt(void);
int64_t replay_save_clock(ReplayClockKind kind, int64_t clock,
int64_t raw_icount);
/*! Read the specified clock from the log or return cached data */
-int64_t replay_read_clock(ReplayClockKind kind);
+int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount);
/*! Saves or reads the clock depending on the current replay mode. */
#define REPLAY_CLOCK(clock, value) \
- (replay_mode == REPLAY_MODE_PLAY ? replay_read_clock((clock)) \
+ !icount_enabled() ? (value) : \
+ (replay_mode == REPLAY_MODE_PLAY \
+ ? replay_read_clock((clock), icount_get_raw()) \
: replay_mode == REPLAY_MODE_RECORD \
- ? replay_save_clock((clock), (value), cpu_get_icount_raw()) \
- : (value))
+ ? replay_save_clock((clock), (value), icount_get_raw()) \
+ : (value))
#define REPLAY_CLOCK_LOCKED(clock, value) \
- (replay_mode == REPLAY_MODE_PLAY ? replay_read_clock((clock)) \
+ !icount_enabled() ? (value) : \
+ (replay_mode == REPLAY_MODE_PLAY \
+ ? replay_read_clock((clock), icount_get_raw_locked()) \
: replay_mode == REPLAY_MODE_RECORD \
- ? replay_save_clock((clock), (value), cpu_get_icount_raw_locked()) \
- : (value))
+ ? replay_save_clock((clock), (value), icount_get_raw_locked()) \
+ : (value))
/* Events */
@@ -126,9 +111,14 @@ void replay_shutdown_request(ShutdownCause cause);
Returns 0 in PLAY mode if checkpoint was not found.
Returns 1 in all other cases. */
bool replay_checkpoint(ReplayCheckpoint checkpoint);
-/*! Used to determine that checkpoint is pending.
+/*! Used to determine that checkpoint or async event is pending.
Does not proceed to the next event in the log. */
-bool replay_has_checkpoint(void);
+bool replay_has_event(void);
+/*
+ * Processes the async events added to the queue (while recording)
+ * or reads the events from the file (while replaying).
+ */
+void replay_async_events(void);
/* Asynchronous events queue */
@@ -138,8 +128,13 @@ void replay_disable_events(void);
void replay_enable_events(void);
/*! Returns true when saving events is enabled */
bool replay_events_enabled(void);
+/* Flushes events queue */
+void replay_flush_events(void);
/*! Adds bottom half event to the queue */
void replay_bh_schedule_event(QEMUBH *bh);
+/* Adds oneshot bottom half event to the queue */
+void replay_bh_schedule_oneshot_event(AioContext *ctx,
+ QEMUBHFunc *cb, void *opaque);
/*! Adds input event to the queue */
void replay_input_event(QemuConsole *src, InputEvent *evt);
/*! Adds input sync event to the queue */
@@ -154,7 +149,7 @@ uint64_t blkreplay_next_id(void);
/*! Registers char driver to save it's events */
void replay_register_char_driver(struct Chardev *chr);
/*! Saves write to char device event to the log */
-void replay_chr_be_write(struct Chardev *s, uint8_t *buf, int len);
+void replay_chr_be_write(struct Chardev *s, const uint8_t *buf, int len);
/*! Writes char write return value to the replay log. */
void replay_char_write_event_save(int res, int offset);
/*! Reads char write return value from the replay log. */
@@ -179,9 +174,9 @@ void replay_net_packet_event(ReplayNetState *rns, unsigned flags,
/* Audio */
/*! Saves/restores number of played samples of audio out operation. */
-void replay_audio_out(int *played);
+void replay_audio_out(size_t *played);
/*! Saves/restores recorded samples of audio in operation. */
-void replay_audio_in(int *recorded, void *samples, int *wpos, int size);
+void replay_audio_in(size_t *recorded, void *samples, size_t *wpos, size_t size);
/* VM state operations */
diff --git a/include/sysemu/reset.h b/include/sysemu/reset.h
index 0b0d6d7598..ae436044a9 100644
--- a/include/sysemu/reset.h
+++ b/include/sysemu/reset.h
@@ -1,10 +1,126 @@
+/*
+ * Reset handlers.
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2016 Red Hat, Inc.
+ * Copyright (c) 2024 Linaro, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
#ifndef QEMU_SYSEMU_RESET_H
#define QEMU_SYSEMU_RESET_H
+#include "qapi/qapi-events-run-state.h"
+
typedef void QEMUResetHandler(void *opaque);
+/**
+ * qemu_register_resettable: Register an object to be reset
+ * @obj: object to be reset: it must implement the Resettable interface
+ *
+ * Register @obj on the list of objects which will be reset when the
+ * simulation is reset. These objects will be reset in the order
+ * they were added, using the three-phase Resettable protocol,
+ * so first all objects go through the enter phase, then all objects
+ * go through the hold phase, and then finally all go through the
+ * exit phase.
+ *
+ * It is not permitted to register or unregister reset functions or
+ * resettable objects from within any of the reset phase methods of @obj.
+ *
+ * We assume that the caller holds the BQL.
+ */
+void qemu_register_resettable(Object *obj);
+
+/**
+ * qemu_unregister_resettable: Unregister an object to be reset
+ * @obj: object to unregister
+ *
+ * Remove @obj from the list of objects which are reset when the
+ * simulation is reset. It must have been previously added to
+ * the list via qemu_register_resettable().
+ *
+ * We assume that the caller holds the BQL.
+ */
+void qemu_unregister_resettable(Object *obj);
+
+/**
+ * qemu_register_reset: Register a callback for system reset
+ * @func: function to call
+ * @opaque: opaque data to pass to @func
+ *
+ * Register @func on the list of functions which are called when the
+ * entire system is reset. Functions registered with this API and
+ * Resettable objects registered with qemu_register_resettable() are
+ * handled together, in the order in which they were registered.
+ * Functions registered with this API are called in the 'hold' phase
+ * of the 3-phase reset.
+ *
+ * In general this function should not be used in new code where possible;
+ * for instance, device model reset is better accomplished using the
+ * methods on DeviceState.
+ *
+ * It is not permitted to register or unregister reset functions or
+ * resettable objects from within the @func callback.
+ *
+ * We assume that the caller holds the BQL.
+ */
void qemu_register_reset(QEMUResetHandler *func, void *opaque);
+
+/**
+ * qemu_register_reset_nosnapshotload: Register a callback for system reset
+ * @func: function to call
+ * @opaque: opaque data to pass to @func
+ *
+ * This is the same as qemu_register_reset(), except that @func is
+ * not called if the reason that the system is being reset is to
+ * put it into a clean state prior to loading a snapshot (i.e. for
+ * SHUTDOWN_CAUSE_SNAPSHOT_LOAD).
+ */
+void qemu_register_reset_nosnapshotload(QEMUResetHandler *func, void *opaque);
+
+/**
+ * qemu_unregister_reset: Unregister a system reset callback
+ * @func: function registered with qemu_register_reset()
+ * @opaque: the same opaque data that was passed to qemu_register_reset()
+ *
+ * Undo the effects of a qemu_register_reset(). The @func and @opaque
+ * must both match the arguments originally used with qemu_register_reset().
+ *
+ * We assume that the caller holds the BQL.
+ */
void qemu_unregister_reset(QEMUResetHandler *func, void *opaque);
-void qemu_devices_reset(void);
+
+/**
+ * qemu_devices_reset: Perform a complete system reset
+ * @reason: reason for the reset
+ *
+ * This function performs the low-level work needed to do a complete reset
+ * of the system (calling all the callbacks registered with
+ * qemu_register_reset() and resetting all the Resettable objects registered
+ * with qemu_register_resettable()). It should only be called by the code in a
+ * MachineClass reset method.
+ *
+ * If you want to trigger a system reset from, for instance, a device
+ * model, don't use this function. Use qemu_system_reset_request().
+ */
+void qemu_devices_reset(ShutdownCause reason);
#endif
diff --git a/include/sysemu/rng-random.h b/include/sysemu/rng-random.h
index 38186fe83d..0fdc6c6974 100644
--- a/include/sysemu/rng-random.h
+++ b/include/sysemu/rng-random.h
@@ -15,8 +15,7 @@
#include "qom/object.h"
#define TYPE_RNG_RANDOM "rng-random"
-#define RNG_RANDOM(obj) OBJECT_CHECK(RngRandom, (obj), TYPE_RNG_RANDOM)
+OBJECT_DECLARE_SIMPLE_TYPE(RngRandom, RNG_RANDOM)
-typedef struct RngRandom RngRandom;
#endif
diff --git a/include/sysemu/rng.h b/include/sysemu/rng.h
index 27b37da05d..e383f87d20 100644
--- a/include/sysemu/rng.h
+++ b/include/sysemu/rng.h
@@ -13,20 +13,16 @@
#ifndef QEMU_RNG_H
#define QEMU_RNG_H
+#include "qemu/queue.h"
#include "qom/object.h"
-#include "qemu-common.h"
#define TYPE_RNG_BACKEND "rng-backend"
-#define RNG_BACKEND(obj) \
- OBJECT_CHECK(RngBackend, (obj), TYPE_RNG_BACKEND)
-#define RNG_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(RngBackendClass, (obj), TYPE_RNG_BACKEND)
-#define RNG_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(RngBackendClass, (klass), TYPE_RNG_BACKEND)
+OBJECT_DECLARE_TYPE(RngBackend, RngBackendClass,
+ RNG_BACKEND)
+
+#define TYPE_RNG_BUILTIN "rng-builtin"
typedef struct RngRequest RngRequest;
-typedef struct RngBackendClass RngBackendClass;
-typedef struct RngBackend RngBackend;
typedef void (EntropyReceiveFunc)(void *opaque,
const void *data,
diff --git a/include/sysemu/rtc.h b/include/sysemu/rtc.h
new file mode 100644
index 0000000000..0fc8ad6fdf
--- /dev/null
+++ b/include/sysemu/rtc.h
@@ -0,0 +1,58 @@
+/*
+ * RTC configuration and clock read
+ *
+ * Copyright (c) 2003-2021 QEMU contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef SYSEMU_RTC_H
+#define SYSEMU_RTC_H
+
+/**
+ * qemu_get_timedate: Get the current RTC time
+ * @tm: struct tm to fill in with RTC time
+ * @offset: offset in seconds to adjust the RTC time by before
+ * converting to struct tm format.
+ *
+ * This function fills in @tm with the current RTC time, as adjusted
+ * by @offset (for example, if @offset is 3600 then the returned time/date
+ * will be one hour further ahead than the current RTC time).
+ *
+ * The usual use is by RTC device models, which should call this function
+ * to find the time/date value that they should return to the guest
+ * when it reads the RTC registers.
+ *
+ * The behaviour of the clock whose value this function returns will
+ * depend on the -rtc command line option passed by the user.
+ */
+void qemu_get_timedate(struct tm *tm, time_t offset);
+
+/**
+ * qemu_timedate_diff: Return difference between a struct tm and the RTC
+ * @tm: struct tm containing the date/time to compare against
+ *
+ * Returns the difference in seconds between the RTC clock time
+ * and the date/time specified in @tm. For example, if @tm specifies
+ * a timestamp one hour further ahead than the current RTC time
+ * then this function will return 3600.
+ */
+time_t qemu_timedate_diff(struct tm *tm);
+
+#endif
diff --git a/include/sysemu/runstate-action.h b/include/sysemu/runstate-action.h
new file mode 100644
index 0000000000..db4e3099ae
--- /dev/null
+++ b/include/sysemu/runstate-action.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2020 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef RUNSTATE_ACTION_H
+#define RUNSTATE_ACTION_H
+
+#include "qapi/qapi-commands-run-state.h"
+
+/* in system/runstate-action.c */
+extern RebootAction reboot_action;
+extern ShutdownAction shutdown_action;
+extern PanicAction panic_action;
+
+#endif /* RUNSTATE_ACTION_H */
diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h
new file mode 100644
index 0000000000..0117d243c4
--- /dev/null
+++ b/include/sysemu/runstate.h
@@ -0,0 +1,110 @@
+#ifndef SYSEMU_RUNSTATE_H
+#define SYSEMU_RUNSTATE_H
+
+#include "qapi/qapi-types-run-state.h"
+#include "qemu/notify.h"
+
+bool runstate_check(RunState state);
+void runstate_set(RunState new_state);
+RunState runstate_get(void);
+bool runstate_is_running(void);
+bool runstate_needs_reset(void);
+
+typedef void VMChangeStateHandler(void *opaque, bool running, RunState state);
+
+VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
+ void *opaque);
+VMChangeStateEntry *qemu_add_vm_change_state_handler_prio(
+ VMChangeStateHandler *cb, void *opaque, int priority);
+VMChangeStateEntry *
+qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb,
+ VMChangeStateHandler *prepare_cb,
+ void *opaque, int priority);
+VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev,
+ VMChangeStateHandler *cb,
+ void *opaque);
+VMChangeStateEntry *qdev_add_vm_change_state_handler_full(
+ DeviceState *dev, VMChangeStateHandler *cb,
+ VMChangeStateHandler *prepare_cb, void *opaque);
+void qemu_del_vm_change_state_handler(VMChangeStateEntry *e);
+/**
+ * vm_state_notify: Notify the state of the VM
+ *
+ * @running: whether the VM is running or not.
+ * @state: the #RunState of the VM.
+ */
+void vm_state_notify(bool running, RunState state);
+
+static inline bool shutdown_caused_by_guest(ShutdownCause cause)
+{
+ return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN;
+}
+
+/*
+ * In a "live" state, the vcpu clock is ticking, and the runstate notifiers
+ * think we are running.
+ */
+static inline bool runstate_is_live(RunState state)
+{
+ return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED;
+}
+
+void vm_start(void);
+
+/**
+ * vm_prepare_start: Prepare for starting/resuming the VM
+ *
+ * @step_pending: whether any of the CPUs is about to be single-stepped by gdb
+ */
+int vm_prepare_start(bool step_pending);
+
+/**
+ * vm_resume: If @state is a live state, start the vm and set the state,
+ * else just set the state.
+ *
+ * @state: the state to restore
+ */
+void vm_resume(RunState state);
+
+int vm_stop(RunState state);
+int vm_stop_force_state(RunState state);
+int vm_shutdown(void);
+void vm_set_suspended(bool suspended);
+bool vm_get_suspended(void);
+
+typedef enum WakeupReason {
+ /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */
+ QEMU_WAKEUP_REASON_NONE = 0,
+ QEMU_WAKEUP_REASON_RTC,
+ QEMU_WAKEUP_REASON_PMTIMER,
+ QEMU_WAKEUP_REASON_OTHER,
+} WakeupReason;
+
+void qemu_system_reset_request(ShutdownCause reason);
+void qemu_system_suspend_request(void);
+void qemu_register_suspend_notifier(Notifier *notifier);
+bool qemu_wakeup_suspend_enabled(void);
+void qemu_system_wakeup_request(WakeupReason reason, Error **errp);
+void qemu_system_wakeup_enable(WakeupReason reason, bool enabled);
+void qemu_register_wakeup_notifier(Notifier *notifier);
+void qemu_register_wakeup_support(void);
+void qemu_system_shutdown_request_with_code(ShutdownCause reason,
+ int exit_code);
+void qemu_system_shutdown_request(ShutdownCause reason);
+void qemu_system_powerdown_request(void);
+void qemu_register_powerdown_notifier(Notifier *notifier);
+void qemu_register_shutdown_notifier(Notifier *notifier);
+void qemu_system_debug_request(void);
+void qemu_system_vmstop_request(RunState reason);
+void qemu_system_vmstop_request_prepare(void);
+bool qemu_vmstop_requested(RunState *r);
+ShutdownCause qemu_shutdown_requested_get(void);
+ShutdownCause qemu_reset_requested_get(void);
+void qemu_system_killed(int signal, pid_t pid);
+void qemu_system_reset(ShutdownCause reason);
+void qemu_system_guest_panicked(GuestPanicInformation *info);
+void qemu_system_guest_crashloaded(GuestPanicInformation *info);
+bool qemu_system_dump_in_progress(void);
+
+#endif
+
diff --git a/include/sysemu/sev.h b/include/sysemu/sev.h
deleted file mode 100644
index 98c1ec8d38..0000000000
--- a/include/sysemu/sev.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * QEMU Secure Encrypted Virutualization (SEV) support
- *
- * Copyright: Advanced Micro Devices, 2016-2018
- *
- * Authors:
- * Brijesh Singh <brijesh.singh@amd.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMU_SEV_H
-#define QEMU_SEV_H
-
-#include "sysemu/kvm.h"
-
-void *sev_guest_init(const char *id);
-int sev_encrypt_data(void *handle, uint8_t *ptr, uint64_t len);
-#endif
diff --git a/include/sysemu/stats.h b/include/sysemu/stats.h
new file mode 100644
index 0000000000..42c236c795
--- /dev/null
+++ b/include/sysemu/stats.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef STATS_H
+#define STATS_H
+
+#include "qapi/qapi-types-stats.h"
+
+typedef void StatRetrieveFunc(StatsResultList **result, StatsTarget target,
+ strList *names, strList *targets, Error **errp);
+typedef void SchemaRetrieveFunc(StatsSchemaList **result, Error **errp);
+
+/*
+ * Register callbacks for the QMP query-stats command.
+ *
+ * @provider: stats provider checked against QMP command arguments
+ * @stats_fn: routine to query stats:
+ * @schema_fn: routine to query stat schemas:
+ */
+void add_stats_callbacks(StatsProvider provider,
+ StatRetrieveFunc *stats_fn,
+ SchemaRetrieveFunc *schemas_fn);
+
+/*
+ * Helper routines for adding stats entries to the results lists.
+ */
+void add_stats_entry(StatsResultList **, StatsProvider, const char *id,
+ StatsList *stats_list);
+void add_stats_schema(StatsSchemaList **, StatsProvider, StatsTarget,
+ StatsSchemaValueList *);
+
+/*
+ * True if a string matches the filter passed to the stats_fn callback,
+ * false otherwise.
+ *
+ * Note that an empty list means no filtering, i.e. all strings will
+ * return true.
+ */
+bool apply_str_list_filter(const char *string, strList *list);
+
+#endif /* STATS_H */
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 85877b7e43..5b4397eeb8 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -2,86 +2,28 @@
#define SYSEMU_H
/* Misc. things related to the system emulator. */
-#include "qapi/qapi-types-run-state.h"
-#include "qemu/queue.h"
#include "qemu/timer.h"
#include "qemu/notify.h"
-#include "qemu/main-loop.h"
-#include "qemu/bitmap.h"
#include "qemu/uuid.h"
-#include "qom/object.h"
/* vl.c */
-extern const char *bios_name;
+extern int only_migratable;
extern const char *qemu_name;
extern QemuUUID qemu_uuid;
extern bool qemu_uuid_set;
-bool runstate_check(RunState state);
-void runstate_set(RunState new_state);
-int runstate_is_running(void);
-bool runstate_needs_reset(void);
-bool runstate_store(char *str, size_t size);
-typedef struct vm_change_state_entry VMChangeStateEntry;
-typedef void VMChangeStateHandler(void *opaque, int running, RunState state);
-
-VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
- void *opaque);
-void qemu_del_vm_change_state_handler(VMChangeStateEntry *e);
-void vm_state_notify(int running, RunState state);
-
-static inline bool shutdown_caused_by_guest(ShutdownCause cause)
-{
- return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN;
-}
-
-void vm_start(void);
-int vm_prepare_start(void);
-int vm_stop(RunState state);
-int vm_stop_force_state(RunState state);
-int vm_shutdown(void);
-
-typedef enum WakeupReason {
- /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */
- QEMU_WAKEUP_REASON_NONE = 0,
- QEMU_WAKEUP_REASON_RTC,
- QEMU_WAKEUP_REASON_PMTIMER,
- QEMU_WAKEUP_REASON_OTHER,
-} WakeupReason;
-
-void qemu_exit_preconfig_request(void);
-void qemu_system_reset_request(ShutdownCause reason);
-void qemu_system_suspend_request(void);
-void qemu_register_suspend_notifier(Notifier *notifier);
-bool qemu_wakeup_suspend_enabled(void);
-void qemu_system_wakeup_request(WakeupReason reason, Error **errp);
-void qemu_system_wakeup_enable(WakeupReason reason, bool enabled);
-void qemu_register_wakeup_notifier(Notifier *notifier);
-void qemu_register_wakeup_support(void);
-void qemu_system_shutdown_request(ShutdownCause reason);
-void qemu_system_powerdown_request(void);
-void qemu_register_powerdown_notifier(Notifier *notifier);
-void qemu_register_shutdown_notifier(Notifier *notifier);
-void qemu_system_debug_request(void);
-void qemu_system_vmstop_request(RunState reason);
-void qemu_system_vmstop_request_prepare(void);
-bool qemu_vmstop_requested(RunState *r);
-ShutdownCause qemu_shutdown_requested_get(void);
-ShutdownCause qemu_reset_requested_get(void);
-void qemu_system_killed(int signal, pid_t pid);
-void qemu_system_reset(ShutdownCause reason);
-void qemu_system_guest_panicked(GuestPanicInformation *info);
+const char *qemu_get_vm_name(void);
void qemu_add_exit_notifier(Notifier *notify);
void qemu_remove_exit_notifier(Notifier *notify);
-extern bool machine_init_done;
-
void qemu_add_machine_init_done_notifier(Notifier *notify);
void qemu_remove_machine_init_done_notifier(Notifier *notify);
-void qemu_announce_self(void);
+void configure_rtc(QemuOpts *opts);
+
+void qemu_init_subsystems(void);
extern int autostart;
@@ -92,40 +34,19 @@ typedef enum {
} VGAInterfaceType;
extern int vga_interface_type;
-#define xenfb_enabled (vga_interface_type == VGA_XENFB)
+extern bool vga_interface_created;
extern int graphic_width;
extern int graphic_height;
extern int graphic_depth;
extern int display_opengl;
extern const char *keyboard_layout;
-extern int win2k_install_hack;
-extern int alt_grab;
-extern int ctrl_grab;
-extern int no_frame;
-extern int smp_cpus;
-extern unsigned int max_cpus;
-extern int cursor_hide;
extern int graphic_rotate;
-extern int no_quit;
-extern int no_shutdown;
extern int old_param;
-extern int boot_menu;
-extern bool boot_strict;
extern uint8_t *boot_splash_filedata;
-extern size_t boot_splash_filedata_size;
extern bool enable_mlock;
extern bool enable_cpu_pm;
extern QEMUClockType rtc_clock;
-extern const char *mem_path;
-extern int mem_prealloc;
-
-#define MAX_NODES 128
-#define NUMA_NODE_UNASSIGNED MAX_NODES
-#define NUMA_DISTANCE_MIN 10
-#define NUMA_DISTANCE_DEFAULT 20
-#define NUMA_DISTANCE_MAX 254
-#define NUMA_DISTANCE_UNREACHABLE 255
#define MAX_OPTION_ROMS 16
typedef struct QEMUOptionRom {
@@ -139,20 +60,10 @@ extern int nb_option_roms;
extern const char *prom_envs[MAX_PROM_ENVS];
extern unsigned int nb_prom_envs;
-/* generic hotplug */
-void hmp_drive_add(Monitor *mon, const QDict *qdict);
-
-/* pcie aer error injection */
-void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict);
-
/* serial ports */
/* Return the Chardev for serial port i, or NULL if none */
Chardev *serial_hd(int i);
-/* return the number of serial ports defined by the user. serial_hd(i)
- * will always return NULL for any i which is greater than or equal to this.
- */
-int serial_max_hds(void);
/* parallel ports */
@@ -160,8 +71,6 @@ int serial_max_hds(void);
extern Chardev *parallel_hds[MAX_PARALLEL_PORTS];
-void hmp_info_usb(Monitor *mon, const QDict *qdict);
-
void add_boot_device_path(int32_t bootindex, DeviceState *dev,
const char *suffix);
char *get_boot_devices_list(size_t *size);
@@ -171,9 +80,13 @@ void check_boot_index(int32_t bootindex, Error **errp);
void del_boot_device_path(DeviceState *dev, const char *suffix);
void device_add_bootindex_property(Object *obj, int32_t *bootindex,
const char *name, const char *suffix,
- DeviceState *dev, Error **errp);
+ DeviceState *dev);
void restore_boot_order(void *opaque);
void validate_bootdevices(const char *devices, Error **errp);
+void add_boot_device_lchs(DeviceState *dev, const char *suffix,
+ uint32_t lcyls, uint32_t lheads, uint32_t lsecs);
+void del_boot_device_lchs(DeviceState *dev, const char *suffix);
+char *get_boot_devices_lchs_list(size_t *size);
/* handler to set the boot_device order for a specific type of MachineClass */
typedef void QEMUBootSetHandler(void *opaque, const char *boot_order,
@@ -181,10 +94,12 @@ typedef void QEMUBootSetHandler(void *opaque, const char *boot_order,
void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque);
void qemu_boot_set(const char *boot_order, Error **errp);
-QemuOpts *qemu_get_machine_opts(void);
-
bool defaults_enabled(void);
+void qemu_init(int argc, char **argv);
+int qemu_main_loop(void);
+void qemu_cleanup(int);
+
extern QemuOptsList qemu_legacy_drive_opts;
extern QemuOptsList qemu_common_drive_opts;
extern QemuOptsList qemu_drive_opts;
@@ -195,6 +110,6 @@ extern QemuOptsList qemu_netdev_opts;
extern QemuOptsList qemu_nic_opts;
extern QemuOptsList qemu_net_opts;
extern QemuOptsList qemu_global_opts;
-extern QemuOptsList qemu_mon_opts;
+extern QemuOptsList qemu_semihosting_config_opts;
#endif
diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h
new file mode 100644
index 0000000000..5e2ca9aab3
--- /dev/null
+++ b/include/sysemu/tcg.h
@@ -0,0 +1,20 @@
+/*
+ * QEMU TCG support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/* header to be included in non-TCG-specific code */
+
+#ifndef SYSEMU_TCG_H
+#define SYSEMU_TCG_H
+
+#ifdef CONFIG_TCG
+extern bool tcg_allowed;
+#define tcg_enabled() (tcg_allowed)
+#else
+#define tcg_enabled() 0
+#endif
+
+#endif
diff --git a/include/sysemu/tpm.h b/include/sysemu/tpm.h
index 5b541a71c8..1ee568b3b6 100644
--- a/include/sysemu/tpm.h
+++ b/include/sysemu/tpm.h
@@ -15,8 +15,10 @@
#include "qapi/qapi-types-tpm.h"
#include "qom/object.h"
-int tpm_config_parse(QemuOptsList *opts_list, const char *optarg);
-void tpm_init(void);
+#ifdef CONFIG_TPM
+
+int tpm_config_parse(QemuOptsList *opts_list, const char *optstr);
+int tpm_init(void);
void tpm_cleanup(void);
typedef enum TPMVersion {
@@ -26,30 +28,38 @@ typedef enum TPMVersion {
} TPMVersion;
#define TYPE_TPM_IF "tpm-if"
-#define TPM_IF_CLASS(klass) \
- OBJECT_CLASS_CHECK(TPMIfClass, (klass), TYPE_TPM_IF)
-#define TPM_IF_GET_CLASS(obj) \
- OBJECT_GET_CLASS(TPMIfClass, (obj), TYPE_TPM_IF)
+typedef struct TPMIfClass TPMIfClass;
+DECLARE_CLASS_CHECKERS(TPMIfClass, TPM_IF,
+ TYPE_TPM_IF)
#define TPM_IF(obj) \
INTERFACE_CHECK(TPMIf, (obj), TYPE_TPM_IF)
typedef struct TPMIf TPMIf;
-typedef struct TPMIfClass {
+struct TPMIfClass {
InterfaceClass parent_class;
enum TpmModel model;
void (*request_completed)(TPMIf *obj, int ret);
enum TPMVersion (*get_version)(TPMIf *obj);
-} TPMIfClass;
+};
-#define TYPE_TPM_TIS "tpm-tis"
+#define TYPE_TPM_TIS_ISA "tpm-tis"
+#define TYPE_TPM_TIS_SYSBUS "tpm-tis-device"
#define TYPE_TPM_CRB "tpm-crb"
+#define TYPE_TPM_SPAPR "tpm-spapr"
+#define TYPE_TPM_TIS_I2C "tpm-tis-i2c"
-#define TPM_IS_TIS(chr) \
- object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS)
+#define TPM_IS_TIS_ISA(chr) \
+ object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_ISA)
+#define TPM_IS_TIS_SYSBUS(chr) \
+ object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_SYSBUS)
#define TPM_IS_CRB(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_TPM_CRB)
+#define TPM_IS_SPAPR(chr) \
+ object_dynamic_cast(OBJECT(chr), TYPE_TPM_SPAPR)
+#define TPM_IS_TIS_I2C(chr) \
+ object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_I2C)
/* returns NULL unless there is exactly one TPM device */
static inline TPMIf *tpm_find(void)
@@ -68,4 +78,17 @@ static inline TPMVersion tpm_get_version(TPMIf *ti)
return TPM_IF_GET_CLASS(ti)->get_version(ti);
}
+#else /* CONFIG_TPM */
+
+#define tpm_init() (0)
+#define tpm_cleanup()
+
+/* needed for an alignment check in non-tpm code */
+static inline Object *TPM_IS_CRB(Object *obj)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_TPM */
+
#endif /* QEMU_TPM_H */
diff --git a/include/sysemu/tpm_backend.h b/include/sysemu/tpm_backend.h
index 14488820f6..7fabafefee 100644
--- a/include/sysemu/tpm_backend.h
+++ b/include/sysemu/tpm_backend.h
@@ -14,21 +14,16 @@
#define TPM_BACKEND_H
#include "qom/object.h"
-#include "qemu-common.h"
#include "qemu/option.h"
#include "sysemu/tpm.h"
#include "qapi/error.h"
+#ifdef CONFIG_TPM
+
#define TYPE_TPM_BACKEND "tpm-backend"
-#define TPM_BACKEND(obj) \
- OBJECT_CHECK(TPMBackend, (obj), TYPE_TPM_BACKEND)
-#define TPM_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(TPMBackendClass, (obj), TYPE_TPM_BACKEND)
-#define TPM_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(TPMBackendClass, (klass), TYPE_TPM_BACKEND)
+OBJECT_DECLARE_TYPE(TPMBackend, TPMBackendClass,
+ TPM_BACKEND)
-typedef struct TPMBackendClass TPMBackendClass;
-typedef struct TPMBackend TPMBackend;
typedef struct TPMBackendCmd {
uint8_t locty;
@@ -120,7 +115,7 @@ int tpm_backend_startup_tpm(TPMBackend *s, size_t buffersize);
/**
* tpm_backend_had_startup_error:
- * @s: the backend to query for a statup error
+ * @s: the backend to query for a startup error
*
* Check whether the backend had an error during startup. Returns
* false if no error occurred and the backend can be used, true
@@ -216,4 +211,6 @@ TPMInfo *tpm_backend_query_tpm(TPMBackend *s);
TPMBackend *qemu_find_tpm_be(const char *id);
-#endif
+#endif /* CONFIG_TPM */
+
+#endif /* TPM_BACKEND_H */
diff --git a/include/sysemu/tpm_util.h b/include/sysemu/tpm_util.h
new file mode 100644
index 0000000000..08f05172a7
--- /dev/null
+++ b/include/sysemu/tpm_util.h
@@ -0,0 +1,72 @@
+/*
+ * TPM utility functions
+ *
+ * Copyright (c) 2010 - 2015 IBM Corporation
+ * Authors:
+ * Stefan Berger <stefanb@us.ibm.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#ifndef SYSEMU_TPM_UTIL_H
+#define SYSEMU_TPM_UTIL_H
+
+#include "sysemu/tpm.h"
+#include "qemu/bswap.h"
+
+void tpm_util_write_fatal_error_response(uint8_t *out, uint32_t out_len);
+
+bool tpm_util_is_selftest(const uint8_t *in, uint32_t in_len);
+
+int tpm_util_test_tpmdev(int tpm_fd, TPMVersion *tpm_version);
+
+static inline uint16_t tpm_cmd_get_tag(const void *b)
+{
+ return lduw_be_p(b);
+}
+
+static inline void tpm_cmd_set_tag(void *b, uint16_t tag)
+{
+ stw_be_p(b, tag);
+}
+
+static inline uint32_t tpm_cmd_get_size(const void *b)
+{
+ return ldl_be_p(b + 2);
+}
+
+static inline void tpm_cmd_set_size(void *b, uint32_t size)
+{
+ stl_be_p(b + 2, size);
+}
+
+static inline uint32_t tpm_cmd_get_ordinal(const void *b)
+{
+ return ldl_be_p(b + 6);
+}
+
+static inline uint32_t tpm_cmd_get_errcode(const void *b)
+{
+ return ldl_be_p(b + 6);
+}
+
+static inline void tpm_cmd_set_error(void *b, uint32_t error)
+{
+ stl_be_p(b + 6, error);
+}
+
+void tpm_util_show_buffer(const unsigned char *buffer,
+ size_t buffer_size, const char *string);
+
+#endif /* SYSEMU_TPM_UTIL_H */
diff --git a/include/sysemu/vhost-user-backend.h b/include/sysemu/vhost-user-backend.h
new file mode 100644
index 0000000000..327b0b84f1
--- /dev/null
+++ b/include/sysemu/vhost-user-backend.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU vhost-user backend
+ *
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_VHOST_USER_BACKEND_H
+#define QEMU_VHOST_USER_BACKEND_H
+
+#include "qom/object.h"
+#include "exec/memory.h"
+#include "qemu/option.h"
+#include "qemu/bitmap.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "chardev/char-fe.h"
+#include "io/channel.h"
+
+#define TYPE_VHOST_USER_BACKEND "vhost-user-backend"
+OBJECT_DECLARE_SIMPLE_TYPE(VhostUserBackend,
+ VHOST_USER_BACKEND)
+
+
+
+struct VhostUserBackend {
+ /* private */
+ Object parent;
+
+ char *chr_name;
+ CharBackend chr;
+ VhostUserState vhost_user;
+ struct vhost_dev dev;
+ VirtIODevice *vdev;
+ bool started;
+ bool completed;
+};
+
+int vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
+ unsigned nvqs, Error **errp);
+void vhost_user_backend_start(VhostUserBackend *b);
+void vhost_user_backend_stop(VhostUserBackend *b);
+
+#endif
diff --git a/include/sysemu/watchdog.h b/include/sysemu/watchdog.h
index a08d16380d..745c89b02b 100644
--- a/include/sysemu/watchdog.h
+++ b/include/sysemu/watchdog.h
@@ -25,21 +25,8 @@
#include "qemu/queue.h"
#include "qapi/qapi-types-run-state.h"
-struct WatchdogTimerModel {
- QLIST_ENTRY(WatchdogTimerModel) entry;
-
- /* Short name of the device - used to select it on the command line. */
- const char *wdt_name;
- /* Longer description (eg. manufacturer and full model number). */
- const char *wdt_description;
-};
-typedef struct WatchdogTimerModel WatchdogTimerModel;
-
/* in hw/watchdog.c */
-int select_watchdog(const char *p);
-int select_watchdog_action(const char *action);
WatchdogAction get_watchdog_action(void);
-void watchdog_add_model(WatchdogTimerModel *model);
void watchdog_perform_action(void);
#endif /* QEMU_WATCHDOG_H */
diff --git a/include/sysemu/whpx.h b/include/sysemu/whpx.h
index d200ee01d0..781ca5b2b6 100644
--- a/include/sysemu/whpx.h
+++ b/include/sysemu/whpx.h
@@ -10,30 +10,25 @@
*
*/
+/* header to be included in non-WHPX-specific code */
+
#ifndef QEMU_WHPX_H
#define QEMU_WHPX_H
-#include "qemu-common.h"
-
-int whpx_init_vcpu(CPUState *cpu);
-int whpx_vcpu_exec(CPUState *cpu);
-void whpx_destroy_vcpu(CPUState *cpu);
-void whpx_vcpu_kick(CPUState *cpu);
-
-
-void whpx_cpu_synchronize_state(CPUState *cpu);
-void whpx_cpu_synchronize_post_reset(CPUState *cpu);
-void whpx_cpu_synchronize_post_init(CPUState *cpu);
-void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu);
+#ifdef NEED_CPU_H
#ifdef CONFIG_WHPX
int whpx_enabled(void);
+bool whpx_apic_in_platform(void);
#else /* CONFIG_WHPX */
#define whpx_enabled() (0)
+#define whpx_apic_in_platform() (0)
#endif /* CONFIG_WHPX */
+#endif /* NEED_CPU_H */
+
#endif /* QEMU_WHPX_H */
diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
index a03e2f1878..10c2e3082a 100644
--- a/include/sysemu/xen-mapcache.h
+++ b/include/sysemu/xen-mapcache.h
@@ -9,9 +9,12 @@
#ifndef XEN_MAPCACHE_H
#define XEN_MAPCACHE_H
+#include "exec/cpu-common.h"
+#include "sysemu/xen.h"
+
typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset,
ram_addr_t size);
-#ifdef CONFIG_XEN
+#ifdef CONFIG_XEN_IS_POSSIBLE
void xen_map_cache_init(phys_offset_to_gaddr_t f,
void *opaque);
diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h
new file mode 100644
index 0000000000..a9f591f26d
--- /dev/null
+++ b/include/sysemu/xen.h
@@ -0,0 +1,52 @@
+/*
+ * QEMU Xen support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/* header to be included in non-Xen-specific code */
+
+#ifndef SYSEMU_XEN_H
+#define SYSEMU_XEN_H
+
+#ifdef CONFIG_USER_ONLY
+#error Cannot include sysemu/xen.h from user emulation
+#endif
+
+#include "exec/cpu-common.h"
+
+#ifdef NEED_CPU_H
+# ifdef CONFIG_XEN
+# define CONFIG_XEN_IS_POSSIBLE
+# endif
+#else
+# define CONFIG_XEN_IS_POSSIBLE
+#endif
+
+#ifdef CONFIG_XEN_IS_POSSIBLE
+
+extern bool xen_allowed;
+
+#define xen_enabled() (xen_allowed)
+
+void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
+void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
+ struct MemoryRegion *mr, Error **errp);
+
+#else /* !CONFIG_XEN_IS_POSSIBLE */
+
+#define xen_enabled() 0
+static inline void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
+{
+ /* nothing */
+}
+static inline void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
+ MemoryRegion *mr, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+#endif /* CONFIG_XEN_IS_POSSIBLE */
+
+#endif
diff --git a/include/tcg/debug-assert.h b/include/tcg/debug-assert.h
new file mode 100644
index 0000000000..596765a3d2
--- /dev/null
+++ b/include/tcg/debug-assert.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define tcg_debug_assert
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_DEBUG_ASSERT_H
+#define TCG_DEBUG_ASSERT_H
+
+#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
+# define tcg_debug_assert(X) do { assert(X); } while (0)
+#else
+# define tcg_debug_assert(X) \
+ do { if (!(X)) { __builtin_unreachable(); } } while (0)
+#endif
+
+#endif
diff --git a/include/tcg/debuginfo.h b/include/tcg/debuginfo.h
new file mode 100644
index 0000000000..858535b5da
--- /dev/null
+++ b/include/tcg/debuginfo.h
@@ -0,0 +1,79 @@
+/*
+ * Debug information support.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TCG_DEBUGINFO_H
+#define TCG_DEBUGINFO_H
+
+#include "qemu/bitops.h"
+
+/*
+ * Debuginfo describing a certain address.
+ */
+struct debuginfo_query {
+ uint64_t address; /* Input: address. */
+ int flags; /* Input: debuginfo subset. */
+ const char *symbol; /* Symbol that the address is part of. */
+ uint64_t offset; /* Offset from the symbol. */
+ const char *file; /* Source file associated with the address. */
+ int line; /* Line number in the source file. */
+};
+
+/*
+ * Debuginfo subsets.
+ */
+#define DEBUGINFO_SYMBOL BIT(1)
+#define DEBUGINFO_LINE BIT(2)
+
+#if defined(CONFIG_TCG) && defined(CONFIG_LIBDW)
+/*
+ * Load debuginfo for the specified guest ELF image.
+ * Return true on success, false on failure.
+ */
+void debuginfo_report_elf(const char *name, int fd, uint64_t bias);
+
+/*
+ * Take the debuginfo lock.
+ */
+void debuginfo_lock(void);
+
+/*
+ * Fill each on N Qs with the debuginfo about Q->ADDRESS as specified by
+ * Q->FLAGS:
+ *
+ * - DEBUGINFO_SYMBOL: update Q->SYMBOL and Q->OFFSET. If symbol debuginfo is
+ * missing, then leave them as is.
+ * - DEBUINFO_LINE: update Q->FILE and Q->LINE. If line debuginfo is missing,
+ * then leave them as is.
+ *
+ * This function must be called under the debuginfo lock. The results can be
+ * accessed only until the debuginfo lock is released.
+ */
+void debuginfo_query(struct debuginfo_query *q, size_t n);
+
+/*
+ * Release the debuginfo lock.
+ */
+void debuginfo_unlock(void);
+#else
+static inline void debuginfo_report_elf(const char *image_name, int image_fd,
+ uint64_t load_bias)
+{
+}
+
+static inline void debuginfo_lock(void)
+{
+}
+
+static inline void debuginfo_query(struct debuginfo_query *q, size_t n)
+{
+}
+
+static inline void debuginfo_unlock(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/tcg/helper-info.h b/include/tcg/helper-info.h
new file mode 100644
index 0000000000..7c27d6164a
--- /dev/null
+++ b/include/tcg/helper-info.h
@@ -0,0 +1,64 @@
+/*
+ * TCG Helper Information Structure
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TCG_HELPER_INFO_H
+#define TCG_HELPER_INFO_H
+
+#ifdef CONFIG_TCG_INTERPRETER
+#include <ffi.h>
+#endif
+
+/*
+ * Describe the calling convention of a given argument type.
+ */
+typedef enum {
+ TCG_CALL_RET_NORMAL, /* by registers */
+ TCG_CALL_RET_BY_REF, /* for i128, by reference */
+ TCG_CALL_RET_BY_VEC, /* for i128, by vector register */
+} TCGCallReturnKind;
+
+typedef enum {
+ TCG_CALL_ARG_NORMAL, /* by registers (continuing onto stack) */
+ TCG_CALL_ARG_EVEN, /* like normal, but skipping odd slots */
+ TCG_CALL_ARG_EXTEND, /* for i32, as a sign/zero-extended i64 */
+ TCG_CALL_ARG_EXTEND_U, /* ... as a zero-extended i64 */
+ TCG_CALL_ARG_EXTEND_S, /* ... as a sign-extended i64 */
+ TCG_CALL_ARG_BY_REF, /* for i128, by reference, first */
+ TCG_CALL_ARG_BY_REF_N, /* ... by reference, subsequent */
+} TCGCallArgumentKind;
+
+typedef struct TCGCallArgumentLoc {
+ TCGCallArgumentKind kind : 8;
+ unsigned arg_slot : 8;
+ unsigned ref_slot : 8;
+ unsigned arg_idx : 4;
+ unsigned tmp_subindex : 2;
+} TCGCallArgumentLoc;
+
+struct TCGHelperInfo {
+ void *func;
+ const char *name;
+
+ /* Used with g_once_init_enter. */
+#ifdef CONFIG_TCG_INTERPRETER
+ ffi_cif *cif;
+#else
+ uintptr_t init;
+#endif
+
+ unsigned typemask : 32;
+ unsigned flags : 8;
+ unsigned nr_in : 8;
+ unsigned nr_out : 8;
+ TCGCallReturnKind out_kind : 8;
+
+ /* Maximum physical arguments are constrained by TCG_TYPE_I128. */
+ TCGCallArgumentLoc in[MAX_CALL_IARGS * (128 / TCG_TARGET_REG_BITS)];
+};
+
+#endif /* TCG_HELPER_INFO_H */
diff --git a/include/tcg/insn-start-words.h b/include/tcg/insn-start-words.h
new file mode 100644
index 0000000000..50c18bd326
--- /dev/null
+++ b/include/tcg/insn-start-words.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define TARGET_INSN_START_WORDS
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TARGET_INSN_START_WORDS
+
+#include "cpu.h"
+
+#ifndef TARGET_INSN_START_EXTRA_WORDS
+# define TARGET_INSN_START_WORDS 1
+#else
+# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
+#endif
+
+#endif /* TARGET_INSN_START_WORDS */
diff --git a/include/tcg/oversized-guest.h b/include/tcg/oversized-guest.h
new file mode 100644
index 0000000000..641b9749ff
--- /dev/null
+++ b/include/tcg/oversized-guest.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define TCG_OVERSIZED_GUEST
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef EXEC_TCG_OVERSIZED_GUEST_H
+#define EXEC_TCG_OVERSIZED_GUEST_H
+
+#include "tcg-target-reg-bits.h"
+#include "cpu-param.h"
+
+/*
+ * Oversized TCG guests make things like MTTCG hard
+ * as we can't use atomics for cputlb updates.
+ */
+#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
+#define TCG_OVERSIZED_GUEST 1
+#else
+#define TCG_OVERSIZED_GUEST 0
+#endif
+
+#endif
diff --git a/include/tcg/perf.h b/include/tcg/perf.h
new file mode 100644
index 0000000000..c96b5920a3
--- /dev/null
+++ b/include/tcg/perf.h
@@ -0,0 +1,49 @@
+/*
+ * Linux perf perf-<pid>.map and jit-<pid>.dump integration.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TCG_PERF_H
+#define TCG_PERF_H
+
+#if defined(CONFIG_TCG) && defined(CONFIG_LINUX)
+/* Start writing perf-<pid>.map. */
+void perf_enable_perfmap(void);
+
+/* Start writing jit-<pid>.dump. */
+void perf_enable_jitdump(void);
+
+/* Add information about TCG prologue to profiler maps. */
+void perf_report_prologue(const void *start, size_t size);
+
+/* Add information about JITted guest code to profiler maps. */
+void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
+ const void *start);
+
+/* Stop writing perf-<pid>.map and/or jit-<pid>.dump. */
+void perf_exit(void);
+#else
+static inline void perf_enable_perfmap(void)
+{
+}
+
+static inline void perf_enable_jitdump(void)
+{
+}
+
+static inline void perf_report_prologue(const void *start, size_t size)
+{
+}
+
+static inline void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
+ const void *start)
+{
+}
+
+static inline void perf_exit(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/tcg/startup.h b/include/tcg/startup.h
new file mode 100644
index 0000000000..f71305765c
--- /dev/null
+++ b/include/tcg/startup.h
@@ -0,0 +1,58 @@
+/*
+ * Tiny Code Generator for QEMU: definitions used by runtime startup
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_STARTUP_H
+#define TCG_STARTUP_H
+
+/**
+ * tcg_init: Initialize the TCG runtime
+ * @tb_size: translation buffer size
+ * @splitwx: use separate rw and rx mappings
+ * @max_cpus: number of vcpus in system mode
+ *
+ * Allocate and initialize TCG resources, especially the JIT buffer.
+ * In user-only mode, @max_cpus is unused.
+ */
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
+
+/**
+ * tcg_register_thread: Register this thread with the TCG runtime
+ *
+ * All TCG threads except the parent (i.e. the one that called the TCG
+ * accelerator's init_machine() method) must register with this
+ * function before initiating translation.
+ */
+void tcg_register_thread(void);
+
+/**
+ * tcg_prologue_init(): Generate the code for the TCG prologue
+ *
+ * In softmmu this is done automatically as part of the TCG
+ * accelerator's init_machine() method, but for user-mode, the
+ * user-mode code must call this function after it has loaded
+ * the guest binary and the value of guest_base is known.
+ */
+void tcg_prologue_init(void);
+
+#endif
diff --git a/include/tcg/tcg-cond.h b/include/tcg/tcg-cond.h
new file mode 100644
index 0000000000..5cadbd6ff2
--- /dev/null
+++ b/include/tcg/tcg-cond.h
@@ -0,0 +1,133 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_COND_H
+#define TCG_COND_H
+
+/*
+ * Conditions. Note that these are laid out for easy manipulation by
+ * the functions below:
+ * bit 0 is used for inverting;
+ * bit 1 is used for conditions that need swapping (signed/unsigned).
+ * bit 2 is used with bit 1 for swapping.
+ * bit 3 is used for unsigned conditions.
+ */
+typedef enum {
+ /* non-signed */
+ TCG_COND_NEVER = 0 | 0 | 0 | 0,
+ TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
+
+ /* equality */
+ TCG_COND_EQ = 8 | 0 | 0 | 0,
+ TCG_COND_NE = 8 | 0 | 0 | 1,
+
+ /* "test" i.e. and then compare vs 0 */
+ TCG_COND_TSTEQ = 8 | 4 | 0 | 0,
+ TCG_COND_TSTNE = 8 | 4 | 0 | 1,
+
+ /* signed */
+ TCG_COND_LT = 0 | 0 | 2 | 0,
+ TCG_COND_GE = 0 | 0 | 2 | 1,
+ TCG_COND_GT = 0 | 4 | 2 | 0,
+ TCG_COND_LE = 0 | 4 | 2 | 1,
+
+ /* unsigned */
+ TCG_COND_LTU = 8 | 0 | 2 | 0,
+ TCG_COND_GEU = 8 | 0 | 2 | 1,
+ TCG_COND_GTU = 8 | 4 | 2 | 0,
+ TCG_COND_LEU = 8 | 4 | 2 | 1,
+} TCGCond;
+
+/* Invert the sense of the comparison. */
+static inline TCGCond tcg_invert_cond(TCGCond c)
+{
+ return (TCGCond)(c ^ 1);
+}
+
+/* Swap the operands in a comparison. */
+static inline TCGCond tcg_swap_cond(TCGCond c)
+{
+ return (TCGCond)(c ^ ((c & 2) << 1));
+}
+
+/* Must a comparison be considered signed? */
+static inline bool is_signed_cond(TCGCond c)
+{
+ return (c & (8 | 2)) == 2;
+}
+
+/* Must a comparison be considered unsigned? */
+static inline bool is_unsigned_cond(TCGCond c)
+{
+ return (c & (8 | 2)) == (8 | 2);
+}
+
+/* Must a comparison be considered a test? */
+static inline bool is_tst_cond(TCGCond c)
+{
+ return (c | 1) == TCG_COND_TSTNE;
+}
+
+/* Create an "unsigned" version of a "signed" comparison. */
+static inline TCGCond tcg_unsigned_cond(TCGCond c)
+{
+ return is_signed_cond(c) ? (TCGCond)(c + 8) : c;
+}
+
+/* Create a "signed" version of an "unsigned" comparison. */
+static inline TCGCond tcg_signed_cond(TCGCond c)
+{
+ return is_unsigned_cond(c) ? (TCGCond)(c - 8) : c;
+}
+
+/* Create the eq/ne version of a tsteq/tstne comparison. */
+static inline TCGCond tcg_tst_eqne_cond(TCGCond c)
+{
+ return is_tst_cond(c) ? (TCGCond)(c - 4) : c;
+}
+
+/* Create the lt/ge version of a tstne/tsteq comparison of the sign. */
+static inline TCGCond tcg_tst_ltge_cond(TCGCond c)
+{
+ return is_tst_cond(c) ? (TCGCond)(c ^ 0xf) : c;
+}
+
+/*
+ * Create a "high" version of a double-word comparison.
+ * This removes equality from a LTE or GTE comparison.
+ */
+static inline TCGCond tcg_high_cond(TCGCond c)
+{
+ switch (c) {
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ return (TCGCond)(c ^ (4 | 1));
+ default:
+ return c;
+ }
+}
+
+#endif /* TCG_COND_H */
diff --git a/include/tcg/tcg-gvec-desc.h b/include/tcg/tcg-gvec-desc.h
new file mode 100644
index 0000000000..704bd86454
--- /dev/null
+++ b/include/tcg/tcg-gvec-desc.h
@@ -0,0 +1,66 @@
+/*
+ * Generic vector operation descriptor
+ *
+ * Copyright (c) 2018 Linaro
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TCG_TCG_GVEC_DESC_H
+#define TCG_TCG_GVEC_DESC_H
+
+/*
+ * This configuration allows MAXSZ to represent 2048 bytes, and
+ * OPRSZ to match MAXSZ, or represent the smaller values 8, 16, or 32.
+ *
+ * Encode this with:
+ * 0, 1, 3 -> 8, 16, 32
+ * 2 -> maxsz
+ *
+ * This steals the input that would otherwise map to 24 to match maxsz.
+ */
+#define SIMD_MAXSZ_SHIFT 0
+#define SIMD_MAXSZ_BITS 8
+
+#define SIMD_OPRSZ_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS)
+#define SIMD_OPRSZ_BITS 2
+
+#define SIMD_DATA_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS)
+#define SIMD_DATA_BITS (32 - SIMD_DATA_SHIFT)
+
+/* Create a descriptor from components. */
+uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data);
+
+/* Extract the max vector size from a descriptor. */
+static inline intptr_t simd_maxsz(uint32_t desc)
+{
+ return extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) * 8 + 8;
+}
+
+/* Extract the operation size from a descriptor. */
+static inline intptr_t simd_oprsz(uint32_t desc)
+{
+ uint32_t f = extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS);
+ intptr_t o = f * 8 + 8;
+ intptr_t m = simd_maxsz(desc);
+ return f == 2 ? m : o;
+}
+
+/* Extract the operation-specific data from a descriptor. */
+static inline int32_t simd_data(uint32_t desc)
+{
+ return sextract32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS);
+}
+
+#endif
diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h
new file mode 100644
index 0000000000..6ccfe9131d
--- /dev/null
+++ b/include/tcg/tcg-ldst.h
@@ -0,0 +1,63 @@
+/*
+ * Memory helpers that will be used by TCG generated code.
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_LDST_H
+#define TCG_LDST_H
+
+/* Value zero-extended to tcg register size. */
+tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+
+/* Value sign-extended to tcg register size. */
+tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+
+/*
+ * Value extended to at least uint32_t, so that some ABIs do not require
+ * zero-extension from uint8_t or uint16_t.
+ */
+void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr);
+void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr);
+void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr);
+void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
+ MemOpIdx oi, uintptr_t retaddr);
+void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#endif /* TCG_LDST_H */
diff --git a/include/tcg/tcg-mo.h b/include/tcg/tcg-mo.h
new file mode 100644
index 0000000000..c2c55704e1
--- /dev/null
+++ b/include/tcg/tcg-mo.h
@@ -0,0 +1,48 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_MO_H
+#define TCG_MO_H
+
+typedef enum {
+ /* Used to indicate the type of accesses on which ordering
+ is to be ensured. Modeled after SPARC barriers.
+
+ This is of the form TCG_MO_A_B where A is before B in program order.
+ */
+ TCG_MO_LD_LD = 0x01,
+ TCG_MO_ST_LD = 0x02,
+ TCG_MO_LD_ST = 0x04,
+ TCG_MO_ST_ST = 0x08,
+ TCG_MO_ALL = 0x0F, /* OR of the above */
+
+ /* Used to indicate the kind of ordering which is to be ensured by the
+ instruction. These types are derived from x86/aarch64 instructions.
+ It should be noted that these are different from C11 semantics. */
+ TCG_BAR_LDAQ = 0x10, /* Following ops will not come forward */
+ TCG_BAR_STRL = 0x20, /* Previous ops will not be delayed */
+ TCG_BAR_SC = 0x30, /* No ops cross barrier; OR of the above */
+} TCGBar;
+
+#endif /* TCG_MO_H */
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
new file mode 100644
index 0000000000..2d932a515e
--- /dev/null
+++ b/include/tcg/tcg-op-common.h
@@ -0,0 +1,561 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Target independent opcode generation functions.
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TCG_OP_COMMON_H
+#define TCG_TCG_OP_COMMON_H
+
+#include "tcg/tcg.h"
+#include "exec/helper-proto-common.h"
+#include "exec/helper-gen-common.h"
+
+TCGv_i32 tcg_constant_i32(int32_t val);
+TCGv_i64 tcg_constant_i64(int64_t val);
+TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
+TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
+
+TCGv_i32 tcg_temp_new_i32(void);
+TCGv_i64 tcg_temp_new_i64(void);
+TCGv_ptr tcg_temp_new_ptr(void);
+TCGv_i128 tcg_temp_new_i128(void);
+TCGv_vec tcg_temp_new_vec(TCGType type);
+TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
+
+TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t off, const char *name);
+TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t off, const char *name);
+TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t off, const char *name);
+
+/* Generic ops. */
+
+void gen_set_label(TCGLabel *l);
+void tcg_gen_br(TCGLabel *l);
+void tcg_gen_mb(TCGBar);
+
+/**
+ * tcg_gen_exit_tb() - output exit_tb TCG operation
+ * @tb: The TranslationBlock from which we are exiting
+ * @idx: Direct jump slot index, or exit request
+ *
+ * See tcg/README for more info about this TCG operation.
+ * See also tcg.h and the block comment above TB_EXIT_MASK.
+ *
+ * For a normal exit from the TB, back to the main loop, @tb should
+ * be NULL and @idx should be 0. Otherwise, @tb should be valid and
+ * @idx should be one of the TB_EXIT_ values.
+ */
+void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx);
+
+/**
+ * tcg_gen_goto_tb() - output goto_tb TCG operation
+ * @idx: Direct jump slot index (0 or 1)
+ *
+ * See tcg/README for more info about this TCG operation.
+ *
+ * NOTE: In system emulation, direct jumps with goto_tb are only safe within
+ * the pages this TB resides in because we don't take care of direct jumps when
+ * address mapping changes, e.g. in tlb_flush(). In user mode, there's only a
+ * static address translation, so the destination address is always valid, TBs
+ * are always invalidated properly, and direct jumps are reset when mapping
+ * changes.
+ */
+void tcg_gen_goto_tb(unsigned idx);
+
+/**
+ * tcg_gen_lookup_and_goto_ptr() - look up the current TB, jump to it if valid
+ * @addr: Guest address of the target TB
+ *
+ * If the TB is not valid, jump to the epilogue.
+ *
+ * This operation is optional. If the TCG backend does not implement goto_ptr,
+ * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument.
+ */
+void tcg_gen_lookup_and_goto_ptr(void);
+
+void tcg_gen_plugin_cb_start(unsigned from, unsigned type, unsigned wr);
+void tcg_gen_plugin_cb_end(void);
+
+/* 32 bit ops */
+
+void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg);
+void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2);
+void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
+void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
+void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ctpop_i32(TCGv_i32 a1, TCGv_i32 a2);
+void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
+ unsigned int ofs);
+void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *);
+void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *);
+void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
+ TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2);
+void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
+void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
+void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc);
+void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags);
+void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_abs_i32(TCGv_i32, TCGv_i32);
+
+/* Replicate a value of size @vece from @in to all the lanes in @out */
+void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in);
+
+void tcg_gen_discard_i32(TCGv_i32 arg);
+void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg);
+
+void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+
+void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset);
+
+void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg);
+
+/* 64 bit ops */
+
+void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg);
+void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2);
+void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
+void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
+void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ctpop_i64(TCGv_i64 a1, TCGv_i64 a2);
+void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
+ unsigned int ofs);
+void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *);
+void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *);
+void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_negsetcondi_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
+ TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2);
+void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
+void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
+void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc);
+void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
+void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
+void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_wswap_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_abs_i64(TCGv_i64, TCGv_i64);
+
+/* Replicate a value of size @vece from @in to all the lanes in @out */
+void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in);
+
+void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+
+void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+
+void tcg_gen_discard_i64(TCGv_i64 arg);
+void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg);
+
+
+/* Size changing operations. */
+
+void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
+void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
+void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high);
+void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
+void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
+void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
+void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
+void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi);
+
+void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg);
+void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi);
+
+/* 128 bit ops */
+
+void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src);
+void tcg_gen_ld_i128(TCGv_i128 ret, TCGv_ptr base, tcg_target_long offset);
+void tcg_gen_st_i128(TCGv_i128 val, TCGv_ptr base, tcg_target_long offset);
+
+/* Local load/store bit ops */
+
+void tcg_gen_qemu_ld_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_st_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_ld_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_st_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_ld_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_st_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType);
+
+/* Atomic ops */
+
+void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
+ TCGv_i128, TCGArg, MemOp, TCGType);
+
+void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
+ TCGv_i128, TCGArg, MemOp, TCGType);
+
+void tcg_gen_atomic_xchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_xchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+
+void tcg_gen_atomic_fetch_add_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_add_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_and_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_and_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_or_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_or_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_xor_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_xor_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_smin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_smin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_umin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_umin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_smax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_smax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_umax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_umax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+
+void tcg_gen_atomic_add_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_add_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_and_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_and_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_or_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_or_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_xor_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_xor_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_smin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_smin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_umin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_umin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_smax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_smax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_umax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_umax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+
+/* Vector ops */
+
+void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
+void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
+void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64);
+void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long);
+void tcg_gen_dupi_vec(unsigned vece, TCGv_vec, uint64_t);
+void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_andc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
+void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
+void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
+void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_smin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_umin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_smax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+
+void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_rotli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+
+void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+void tcg_gen_rotls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+
+void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+
+void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
+ TCGv_vec a, TCGv_vec b);
+
+void tcg_gen_bitsel_vec(unsigned vece, TCGv_vec r, TCGv_vec a,
+ TCGv_vec b, TCGv_vec c);
+void tcg_gen_cmpsel_vec(TCGCond cond, unsigned vece, TCGv_vec r,
+ TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d);
+
+void tcg_gen_ld_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
+void tcg_gen_st_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
+void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
+
+/* Host pointer ops */
+
+#if UINTPTR_MAX == UINT32_MAX
+# define PTR i32
+# define NAT TCGv_i32
+#else
+# define PTR i64
+# define NAT TCGv_i64
+#endif
+
+TCGv_ptr tcg_constant_ptr_int(intptr_t x);
+#define tcg_constant_ptr(X) tcg_constant_ptr_int((intptr_t)(X))
+
+static inline void tcg_gen_ld_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
+{
+ glue(tcg_gen_ld_,PTR)((NAT)r, a, o);
+}
+
+static inline void tcg_gen_st_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
+{
+ glue(tcg_gen_st_, PTR)((NAT)r, a, o);
+}
+
+static inline void tcg_gen_discard_ptr(TCGv_ptr a)
+{
+ glue(tcg_gen_discard_,PTR)((NAT)a);
+}
+
+static inline void tcg_gen_add_ptr(TCGv_ptr r, TCGv_ptr a, TCGv_ptr b)
+{
+ glue(tcg_gen_add_,PTR)((NAT)r, (NAT)a, (NAT)b);
+}
+
+static inline void tcg_gen_addi_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t b)
+{
+ glue(tcg_gen_addi_,PTR)((NAT)r, (NAT)a, b);
+}
+
+static inline void tcg_gen_mov_ptr(TCGv_ptr d, TCGv_ptr s)
+{
+ glue(tcg_gen_mov_,PTR)((NAT)d, (NAT)s);
+}
+
+static inline void tcg_gen_movi_ptr(TCGv_ptr d, intptr_t s)
+{
+ glue(tcg_gen_movi_,PTR)((NAT)d, s);
+}
+
+static inline void tcg_gen_brcondi_ptr(TCGCond cond, TCGv_ptr a,
+ intptr_t b, TCGLabel *label)
+{
+ glue(tcg_gen_brcondi_,PTR)(cond, (NAT)a, b, label);
+}
+
+static inline void tcg_gen_ext_i32_ptr(TCGv_ptr r, TCGv_i32 a)
+{
+#if UINTPTR_MAX == UINT32_MAX
+ tcg_gen_mov_i32((NAT)r, a);
+#else
+ tcg_gen_ext_i32_i64((NAT)r, a);
+#endif
+}
+
+static inline void tcg_gen_trunc_i64_ptr(TCGv_ptr r, TCGv_i64 a)
+{
+#if UINTPTR_MAX == UINT32_MAX
+ tcg_gen_extrl_i64_i32((NAT)r, a);
+#else
+ tcg_gen_mov_i64((NAT)r, a);
+#endif
+}
+
+static inline void tcg_gen_extu_ptr_i64(TCGv_i64 r, TCGv_ptr a)
+{
+#if UINTPTR_MAX == UINT32_MAX
+ tcg_gen_extu_i32_i64(r, (NAT)a);
+#else
+ tcg_gen_mov_i64(r, (NAT)a);
+#endif
+}
+
+static inline void tcg_gen_trunc_ptr_i32(TCGv_i32 r, TCGv_ptr a)
+{
+#if UINTPTR_MAX == UINT32_MAX
+ tcg_gen_mov_i32(r, (NAT)a);
+#else
+ tcg_gen_extrl_i64_i32(r, (NAT)a);
+#endif
+}
+
+#undef PTR
+#undef NAT
+
+#endif /* TCG_TCG_OP_COMMON_H */
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
new file mode 100644
index 0000000000..4db8a58c14
--- /dev/null
+++ b/include/tcg/tcg-op-gvec-common.h
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Target independent generic vector operation expansion
+ *
+ * Copyright (c) 2018 Linaro
+ */
+
+#ifndef TCG_TCG_OP_GVEC_COMMON_H
+#define TCG_TCG_OP_GVEC_COMMON_H
+
+/*
+ * "Generic" vectors. All operands are given as offsets from ENV,
+ * and therefore cannot also be allocated via tcg_global_mem_new_*.
+ * OPRSZ is the byte size of the vector upon which the operation is performed.
+ * MAXSZ is the byte size of the full vector; bytes beyond OPSZ are cleared.
+ *
+ * All sizes must be 8 or any multiple of 16.
+ * When OPRSZ is 8, the alignment may be 8, otherwise must be 16.
+ * Operands may completely, but not partially, overlap.
+ */
+
+/* Expand a call to a gvec-style helper, with pointers to two vector
+ operands, and a descriptor (see tcg-gvec-desc.h). */
+typedef void gen_helper_gvec_2(TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_2 *fn);
+
+/* Similarly, passing an extra data value. */
+typedef void gen_helper_gvec_2i(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
+void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_2i *fn);
+
+/* Similarly, passing an extra pointer (e.g. env or float_status). */
+typedef void gen_helper_gvec_2_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
+ TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_2_ptr *fn);
+
+/* Similarly, with three vector operands. */
+typedef void gen_helper_gvec_3(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_3 *fn);
+
+/* Similarly, with four vector operands. */
+typedef void gen_helper_gvec_4(TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_4 *fn);
+
+/* Similarly, with five vector operands. */
+typedef void gen_helper_gvec_5(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t cofs, uint32_t xofs, uint32_t oprsz,
+ uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn);
+
+typedef void gen_helper_gvec_3_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_3_ptr *fn);
+
+typedef void gen_helper_gvec_4_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz,
+ uint32_t maxsz, int32_t data,
+ gen_helper_gvec_4_ptr *fn);
+
+typedef void gen_helper_gvec_5_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t cofs, uint32_t eofs, TCGv_ptr ptr,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_5_ptr *fn);
+
+/* Expand a gvec operation. Either inline or out-of-line depending on
+ the actual vector size and the operations supported by the host. */
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64);
+ void (*fni4)(TCGv_i32, TCGv_i32);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec);
+ /* Expand out-of-line helper w/descriptor. */
+ gen_helper_gvec_2 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The data argument to the out-of-line helper. */
+ int32_t data;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 2nd source operand. */
+ bool load_dest;
+} GVecGen2;
+
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64, int64_t);
+ void (*fni4)(TCGv_i32, TCGv_i32, int32_t);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, int64_t);
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
+ gen_helper_gvec_2 *fno;
+ /* Expand out-of-line helper w/descriptor, data as argument. */
+ gen_helper_gvec_2i *fnoi;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 3rd source operand. */
+ bool load_dest;
+} GVecGen2i;
+
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
+ /* Expand out-of-line helper w/descriptor. */
+ gen_helper_gvec_2i *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The data argument to the out-of-line helper. */
+ uint32_t data;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load scalar as 1st source operand. */
+ bool scalar_first;
+} GVecGen2s;
+
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
+ /* Expand out-of-line helper w/descriptor. */
+ gen_helper_gvec_3 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The data argument to the out-of-line helper. */
+ int32_t data;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 3rd source operand. */
+ bool load_dest;
+} GVecGen3;
+
+typedef struct {
+ /*
+ * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
+ * non-NULL.
+ */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
+ gen_helper_gvec_3 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 3rd source operand. */
+ bool load_dest;
+} GVecGen3i;
+
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec);
+ /* Expand out-of-line helper w/descriptor. */
+ gen_helper_gvec_4 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The data argument to the out-of-line helper. */
+ int32_t data;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Write aofs as a 2nd dest operand. */
+ bool write_aofs;
+} GVecGen4;
+
+typedef struct {
+ /*
+ * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
+ * non-NULL.
+ */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
+ gen_helper_gvec_4 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+} GVecGen4i;
+
+void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *);
+void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
+ uint32_t maxsz, int64_t c, const GVecGen2i *);
+void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
+ uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
+void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
+void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, int64_t c,
+ const GVecGen3i *);
+void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
+void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
+ uint32_t oprsz, uint32_t maxsz, int64_t c,
+ const GVecGen4i *);
+
+/* Expand a specific vector operation. */
+
+void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+
+/* Saturated arithmetic. */
+void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+/* Min/max. */
+void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_or(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_nand(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_nor(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t s, uint32_t m);
+void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t s,
+ uint32_t m, uint64_t imm);
+void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
+ uint32_t m, TCGv_i32);
+void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
+ uint32_t m, TCGv_i64);
+
+void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotrs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+
+/*
+ * Perform vector shift by vector element, modulo the element size.
+ * E.g. D[i] = A[i] << (B[i] % (8 << vece)).
+ */
+void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, int64_t c,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, TCGv_i64 c,
+ uint32_t oprsz, uint32_t maxsz);
+
+/*
+ * Perform vector bit select: d = (b & a) | (c & ~a).
+ */
+void tcg_gen_gvec_bitsel(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t cofs,
+ uint32_t oprsz, uint32_t maxsz);
+
+/*
+ * 64-bit vector operations. Use these when the register has been allocated
+ * with tcg_global_mem_new_i64, and so we cannot also address it via pointer.
+ * OPRSZ = MAXSZ = 8.
+ */
+
+void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 a);
+void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 a);
+void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 a);
+
+void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+
+void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+
+void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
+void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
+
+/* 32-bit vector operations. */
+void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+
+void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+
+void tcg_gen_vec_shl8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_shr8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+
+#endif
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
new file mode 100644
index 0000000000..b0a81ad4bf
--- /dev/null
+++ b/include/tcg/tcg-op-gvec.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Target dependent generic vector operation expansion
+ *
+ * Copyright (c) 2018 Linaro
+ */
+
+#ifndef TCG_TCG_OP_GVEC_H
+#define TCG_TCG_OP_GVEC_H
+
+#include "tcg/tcg-op-gvec-common.h"
+
+#ifndef TARGET_LONG_BITS
+#error must include QEMU headers
+#endif
+
+#if TARGET_LONG_BITS == 64
+#define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i64
+#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i64
+#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i64
+#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
+#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
+#define tcg_gen_vec_add32_tl tcg_gen_vec_add32_i64
+#define tcg_gen_vec_sub32_tl tcg_gen_vec_sub32_i64
+#define tcg_gen_vec_shl8i_tl tcg_gen_vec_shl8i_i64
+#define tcg_gen_vec_shr8i_tl tcg_gen_vec_shr8i_i64
+#define tcg_gen_vec_sar8i_tl tcg_gen_vec_sar8i_i64
+#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i64
+#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i64
+#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i64
+#elif TARGET_LONG_BITS == 32
+#define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i32
+#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i32
+#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i32
+#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
+#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
+#define tcg_gen_vec_add32_tl tcg_gen_add_i32
+#define tcg_gen_vec_sub32_tl tcg_gen_sub_i32
+#define tcg_gen_vec_shl8i_tl tcg_gen_vec_shl8i_i32
+#define tcg_gen_vec_shr8i_tl tcg_gen_vec_shr8i_i32
+#define tcg_gen_vec_sar8i_tl tcg_gen_vec_sar8i_i32
+#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i32
+#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i32
+#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i32
+#else
+# error
+#endif
+
+#endif
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
new file mode 100644
index 0000000000..a02850583b
--- /dev/null
+++ b/include/tcg/tcg-op.h
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Target dependent opcode generation functions.
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TCG_OP_H
+#define TCG_TCG_OP_H
+
+#include "tcg/tcg-op-common.h"
+
+#ifndef TARGET_LONG_BITS
+#error must include QEMU headers
+#endif
+
+#if TARGET_LONG_BITS == 32
+# define TCG_TYPE_TL TCG_TYPE_I32
+#elif TARGET_LONG_BITS == 64
+# define TCG_TYPE_TL TCG_TYPE_I64
+#else
+# error
+#endif
+
+#ifndef TARGET_INSN_START_EXTRA_WORDS
+static inline void tcg_gen_insn_start(target_ulong pc)
+{
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 64 / TCG_TARGET_REG_BITS);
+ tcg_set_insn_start_param(op, 0, pc);
+}
+#elif TARGET_INSN_START_EXTRA_WORDS == 1
+static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
+{
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 2 * 64 / TCG_TARGET_REG_BITS);
+ tcg_set_insn_start_param(op, 0, pc);
+ tcg_set_insn_start_param(op, 1, a1);
+}
+#elif TARGET_INSN_START_EXTRA_WORDS == 2
+static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
+ target_ulong a2)
+{
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 3 * 64 / TCG_TARGET_REG_BITS);
+ tcg_set_insn_start_param(op, 0, pc);
+ tcg_set_insn_start_param(op, 1, a1);
+ tcg_set_insn_start_param(op, 2, a2);
+}
+#else
+#error Unhandled TARGET_INSN_START_EXTRA_WORDS value
+#endif
+
+#if TARGET_LONG_BITS == 32
+typedef TCGv_i32 TCGv;
+#define tcg_temp_new() tcg_temp_new_i32()
+#define tcg_global_mem_new tcg_global_mem_new_i32
+#define tcgv_tl_temp tcgv_i32_temp
+#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
+#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
+#elif TARGET_LONG_BITS == 64
+typedef TCGv_i64 TCGv;
+#define tcg_temp_new() tcg_temp_new_i64()
+#define tcg_global_mem_new tcg_global_mem_new_i64
+#define tcgv_tl_temp tcgv_i64_temp
+#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
+#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
+#else
+#error Unhandled TARGET_LONG_BITS value
+#endif
+
+static inline void
+tcg_gen_qemu_ld_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m)
+{
+ tcg_gen_qemu_ld_i32_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
+}
+
+static inline void
+tcg_gen_qemu_st_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m)
+{
+ tcg_gen_qemu_st_i32_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
+}
+
+static inline void
+tcg_gen_qemu_ld_i64(TCGv_i64 v, TCGv a, TCGArg i, MemOp m)
+{
+ tcg_gen_qemu_ld_i64_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
+}
+
+static inline void
+tcg_gen_qemu_st_i64(TCGv_i64 v, TCGv a, TCGArg i, MemOp m)
+{
+ tcg_gen_qemu_st_i64_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
+}
+
+static inline void
+tcg_gen_qemu_ld_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m)
+{
+ tcg_gen_qemu_ld_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
+}
+
+static inline void
+tcg_gen_qemu_st_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m)
+{
+ tcg_gen_qemu_st_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
+}
+
+#define DEF_ATOMIC2(N, S) \
+ static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S v, \
+ TCGArg i, MemOp m) \
+ { N##_##S##_chk(r, tcgv_tl_temp(a), v, i, m, TCG_TYPE_TL); }
+
+#define DEF_ATOMIC3(N, S) \
+ static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S o, \
+ TCGv_##S n, TCGArg i, MemOp m) \
+ { N##_##S##_chk(r, tcgv_tl_temp(a), o, n, i, m, TCG_TYPE_TL); }
+
+DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i32)
+DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i64)
+DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i128)
+
+DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i32)
+DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i64)
+DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i128)
+
+DEF_ATOMIC2(tcg_gen_atomic_xchg, i32)
+DEF_ATOMIC2(tcg_gen_atomic_xchg, i64)
+
+DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_umin, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_umin, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_smax, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_smax, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_umax, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_umax, i64)
+
+DEF_ATOMIC2(tcg_gen_atomic_add_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_add_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_and_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_and_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_or_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_or_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_xor_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_xor_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_smin_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_smin_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_umin_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_umin_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_smax_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_smax_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
+
+#undef DEF_ATOMIC2
+#undef DEF_ATOMIC3
+
+#if TARGET_LONG_BITS == 64
+#define tcg_gen_movi_tl tcg_gen_movi_i64
+#define tcg_gen_mov_tl tcg_gen_mov_i64
+#define tcg_gen_ld8u_tl tcg_gen_ld8u_i64
+#define tcg_gen_ld8s_tl tcg_gen_ld8s_i64
+#define tcg_gen_ld16u_tl tcg_gen_ld16u_i64
+#define tcg_gen_ld16s_tl tcg_gen_ld16s_i64
+#define tcg_gen_ld32u_tl tcg_gen_ld32u_i64
+#define tcg_gen_ld32s_tl tcg_gen_ld32s_i64
+#define tcg_gen_ld_tl tcg_gen_ld_i64
+#define tcg_gen_st8_tl tcg_gen_st8_i64
+#define tcg_gen_st16_tl tcg_gen_st16_i64
+#define tcg_gen_st32_tl tcg_gen_st32_i64
+#define tcg_gen_st_tl tcg_gen_st_i64
+#define tcg_gen_add_tl tcg_gen_add_i64
+#define tcg_gen_addi_tl tcg_gen_addi_i64
+#define tcg_gen_sub_tl tcg_gen_sub_i64
+#define tcg_gen_neg_tl tcg_gen_neg_i64
+#define tcg_gen_abs_tl tcg_gen_abs_i64
+#define tcg_gen_subfi_tl tcg_gen_subfi_i64
+#define tcg_gen_subi_tl tcg_gen_subi_i64
+#define tcg_gen_and_tl tcg_gen_and_i64
+#define tcg_gen_andi_tl tcg_gen_andi_i64
+#define tcg_gen_or_tl tcg_gen_or_i64
+#define tcg_gen_ori_tl tcg_gen_ori_i64
+#define tcg_gen_xor_tl tcg_gen_xor_i64
+#define tcg_gen_xori_tl tcg_gen_xori_i64
+#define tcg_gen_not_tl tcg_gen_not_i64
+#define tcg_gen_shl_tl tcg_gen_shl_i64
+#define tcg_gen_shli_tl tcg_gen_shli_i64
+#define tcg_gen_shr_tl tcg_gen_shr_i64
+#define tcg_gen_shri_tl tcg_gen_shri_i64
+#define tcg_gen_sar_tl tcg_gen_sar_i64
+#define tcg_gen_sari_tl tcg_gen_sari_i64
+#define tcg_gen_brcond_tl tcg_gen_brcond_i64
+#define tcg_gen_brcondi_tl tcg_gen_brcondi_i64
+#define tcg_gen_setcond_tl tcg_gen_setcond_i64
+#define tcg_gen_setcondi_tl tcg_gen_setcondi_i64
+#define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i64
+#define tcg_gen_negsetcondi_tl tcg_gen_negsetcondi_i64
+#define tcg_gen_mul_tl tcg_gen_mul_i64
+#define tcg_gen_muli_tl tcg_gen_muli_i64
+#define tcg_gen_div_tl tcg_gen_div_i64
+#define tcg_gen_rem_tl tcg_gen_rem_i64
+#define tcg_gen_divu_tl tcg_gen_divu_i64
+#define tcg_gen_remu_tl tcg_gen_remu_i64
+#define tcg_gen_discard_tl tcg_gen_discard_i64
+#define tcg_gen_trunc_tl_i32 tcg_gen_extrl_i64_i32
+#define tcg_gen_trunc_i64_tl tcg_gen_mov_i64
+#define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64
+#define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64
+#define tcg_gen_extu_tl_i64 tcg_gen_mov_i64
+#define tcg_gen_ext_tl_i64 tcg_gen_mov_i64
+#define tcg_gen_ext8u_tl tcg_gen_ext8u_i64
+#define tcg_gen_ext8s_tl tcg_gen_ext8s_i64
+#define tcg_gen_ext16u_tl tcg_gen_ext16u_i64
+#define tcg_gen_ext16s_tl tcg_gen_ext16s_i64
+#define tcg_gen_ext32u_tl tcg_gen_ext32u_i64
+#define tcg_gen_ext32s_tl tcg_gen_ext32s_i64
+#define tcg_gen_ext_tl tcg_gen_ext_i64
+#define tcg_gen_bswap16_tl tcg_gen_bswap16_i64
+#define tcg_gen_bswap32_tl tcg_gen_bswap32_i64
+#define tcg_gen_bswap64_tl tcg_gen_bswap64_i64
+#define tcg_gen_bswap_tl tcg_gen_bswap64_i64
+#define tcg_gen_hswap_tl tcg_gen_hswap_i64
+#define tcg_gen_wswap_tl tcg_gen_wswap_i64
+#define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64
+#define tcg_gen_extr_i64_tl tcg_gen_extr32_i64
+#define tcg_gen_andc_tl tcg_gen_andc_i64
+#define tcg_gen_eqv_tl tcg_gen_eqv_i64
+#define tcg_gen_nand_tl tcg_gen_nand_i64
+#define tcg_gen_nor_tl tcg_gen_nor_i64
+#define tcg_gen_orc_tl tcg_gen_orc_i64
+#define tcg_gen_clz_tl tcg_gen_clz_i64
+#define tcg_gen_ctz_tl tcg_gen_ctz_i64
+#define tcg_gen_clzi_tl tcg_gen_clzi_i64
+#define tcg_gen_ctzi_tl tcg_gen_ctzi_i64
+#define tcg_gen_clrsb_tl tcg_gen_clrsb_i64
+#define tcg_gen_ctpop_tl tcg_gen_ctpop_i64
+#define tcg_gen_rotl_tl tcg_gen_rotl_i64
+#define tcg_gen_rotli_tl tcg_gen_rotli_i64
+#define tcg_gen_rotr_tl tcg_gen_rotr_i64
+#define tcg_gen_rotri_tl tcg_gen_rotri_i64
+#define tcg_gen_deposit_tl tcg_gen_deposit_i64
+#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i64
+#define tcg_gen_extract_tl tcg_gen_extract_i64
+#define tcg_gen_sextract_tl tcg_gen_sextract_i64
+#define tcg_gen_extract2_tl tcg_gen_extract2_i64
+#define tcg_constant_tl tcg_constant_i64
+#define tcg_gen_movcond_tl tcg_gen_movcond_i64
+#define tcg_gen_add2_tl tcg_gen_add2_i64
+#define tcg_gen_sub2_tl tcg_gen_sub2_i64
+#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64
+#define tcg_gen_muls2_tl tcg_gen_muls2_i64
+#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64
+#define tcg_gen_smin_tl tcg_gen_smin_i64
+#define tcg_gen_umin_tl tcg_gen_umin_i64
+#define tcg_gen_smax_tl tcg_gen_smax_i64
+#define tcg_gen_umax_tl tcg_gen_umax_i64
+#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i64
+#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i64
+#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i64
+#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i64
+#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i64
+#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i64
+#define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i64
+#define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i64
+#define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i64
+#define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i64
+#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i64
+#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i64
+#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i64
+#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i64
+#define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i64
+#define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i64
+#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i64
+#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64
+#define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec
+#define tcg_gen_dup_tl tcg_gen_dup_i64
+#define dup_const_tl dup_const
+#else
+#define tcg_gen_movi_tl tcg_gen_movi_i32
+#define tcg_gen_mov_tl tcg_gen_mov_i32
+#define tcg_gen_ld8u_tl tcg_gen_ld8u_i32
+#define tcg_gen_ld8s_tl tcg_gen_ld8s_i32
+#define tcg_gen_ld16u_tl tcg_gen_ld16u_i32
+#define tcg_gen_ld16s_tl tcg_gen_ld16s_i32
+#define tcg_gen_ld32u_tl tcg_gen_ld_i32
+#define tcg_gen_ld32s_tl tcg_gen_ld_i32
+#define tcg_gen_ld_tl tcg_gen_ld_i32
+#define tcg_gen_st8_tl tcg_gen_st8_i32
+#define tcg_gen_st16_tl tcg_gen_st16_i32
+#define tcg_gen_st32_tl tcg_gen_st_i32
+#define tcg_gen_st_tl tcg_gen_st_i32
+#define tcg_gen_add_tl tcg_gen_add_i32
+#define tcg_gen_addi_tl tcg_gen_addi_i32
+#define tcg_gen_sub_tl tcg_gen_sub_i32
+#define tcg_gen_neg_tl tcg_gen_neg_i32
+#define tcg_gen_abs_tl tcg_gen_abs_i32
+#define tcg_gen_subfi_tl tcg_gen_subfi_i32
+#define tcg_gen_subi_tl tcg_gen_subi_i32
+#define tcg_gen_and_tl tcg_gen_and_i32
+#define tcg_gen_andi_tl tcg_gen_andi_i32
+#define tcg_gen_or_tl tcg_gen_or_i32
+#define tcg_gen_ori_tl tcg_gen_ori_i32
+#define tcg_gen_xor_tl tcg_gen_xor_i32
+#define tcg_gen_xori_tl tcg_gen_xori_i32
+#define tcg_gen_not_tl tcg_gen_not_i32
+#define tcg_gen_shl_tl tcg_gen_shl_i32
+#define tcg_gen_shli_tl tcg_gen_shli_i32
+#define tcg_gen_shr_tl tcg_gen_shr_i32
+#define tcg_gen_shri_tl tcg_gen_shri_i32
+#define tcg_gen_sar_tl tcg_gen_sar_i32
+#define tcg_gen_sari_tl tcg_gen_sari_i32
+#define tcg_gen_brcond_tl tcg_gen_brcond_i32
+#define tcg_gen_brcondi_tl tcg_gen_brcondi_i32
+#define tcg_gen_setcond_tl tcg_gen_setcond_i32
+#define tcg_gen_setcondi_tl tcg_gen_setcondi_i32
+#define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i32
+#define tcg_gen_negsetcondi_tl tcg_gen_negsetcondi_i32
+#define tcg_gen_mul_tl tcg_gen_mul_i32
+#define tcg_gen_muli_tl tcg_gen_muli_i32
+#define tcg_gen_div_tl tcg_gen_div_i32
+#define tcg_gen_rem_tl tcg_gen_rem_i32
+#define tcg_gen_divu_tl tcg_gen_divu_i32
+#define tcg_gen_remu_tl tcg_gen_remu_i32
+#define tcg_gen_discard_tl tcg_gen_discard_i32
+#define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32
+#define tcg_gen_trunc_i64_tl tcg_gen_extrl_i64_i32
+#define tcg_gen_extu_i32_tl tcg_gen_mov_i32
+#define tcg_gen_ext_i32_tl tcg_gen_mov_i32
+#define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64
+#define tcg_gen_ext_tl_i64 tcg_gen_ext_i32_i64
+#define tcg_gen_ext8u_tl tcg_gen_ext8u_i32
+#define tcg_gen_ext8s_tl tcg_gen_ext8s_i32
+#define tcg_gen_ext16u_tl tcg_gen_ext16u_i32
+#define tcg_gen_ext16s_tl tcg_gen_ext16s_i32
+#define tcg_gen_ext32u_tl tcg_gen_mov_i32
+#define tcg_gen_ext32s_tl tcg_gen_mov_i32
+#define tcg_gen_ext_tl tcg_gen_ext_i32
+#define tcg_gen_bswap16_tl tcg_gen_bswap16_i32
+#define tcg_gen_bswap32_tl(D, S, F) tcg_gen_bswap32_i32(D, S)
+#define tcg_gen_bswap_tl tcg_gen_bswap32_i32
+#define tcg_gen_hswap_tl tcg_gen_hswap_i32
+#define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64
+#define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32
+#define tcg_gen_andc_tl tcg_gen_andc_i32
+#define tcg_gen_eqv_tl tcg_gen_eqv_i32
+#define tcg_gen_nand_tl tcg_gen_nand_i32
+#define tcg_gen_nor_tl tcg_gen_nor_i32
+#define tcg_gen_orc_tl tcg_gen_orc_i32
+#define tcg_gen_clz_tl tcg_gen_clz_i32
+#define tcg_gen_ctz_tl tcg_gen_ctz_i32
+#define tcg_gen_clzi_tl tcg_gen_clzi_i32
+#define tcg_gen_ctzi_tl tcg_gen_ctzi_i32
+#define tcg_gen_clrsb_tl tcg_gen_clrsb_i32
+#define tcg_gen_ctpop_tl tcg_gen_ctpop_i32
+#define tcg_gen_rotl_tl tcg_gen_rotl_i32
+#define tcg_gen_rotli_tl tcg_gen_rotli_i32
+#define tcg_gen_rotr_tl tcg_gen_rotr_i32
+#define tcg_gen_rotri_tl tcg_gen_rotri_i32
+#define tcg_gen_deposit_tl tcg_gen_deposit_i32
+#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i32
+#define tcg_gen_extract_tl tcg_gen_extract_i32
+#define tcg_gen_sextract_tl tcg_gen_sextract_i32
+#define tcg_gen_extract2_tl tcg_gen_extract2_i32
+#define tcg_constant_tl tcg_constant_i32
+#define tcg_gen_movcond_tl tcg_gen_movcond_i32
+#define tcg_gen_add2_tl tcg_gen_add2_i32
+#define tcg_gen_sub2_tl tcg_gen_sub2_i32
+#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32
+#define tcg_gen_muls2_tl tcg_gen_muls2_i32
+#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32
+#define tcg_gen_smin_tl tcg_gen_smin_i32
+#define tcg_gen_umin_tl tcg_gen_umin_i32
+#define tcg_gen_smax_tl tcg_gen_smax_i32
+#define tcg_gen_umax_tl tcg_gen_umax_i32
+#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i32
+#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i32
+#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i32
+#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i32
+#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i32
+#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i32
+#define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i32
+#define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i32
+#define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i32
+#define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i32
+#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i32
+#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i32
+#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i32
+#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i32
+#define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i32
+#define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i32
+#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i32
+#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32
+#define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec
+#define tcg_gen_dup_tl tcg_gen_dup_i32
+
+#define dup_const_tl(VECE, C) \
+ (__builtin_constant_p(VECE) \
+ ? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \
+ : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \
+ : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \
+ : (qemu_build_not_reached_always(), 0)) \
+ : (target_long)dup_const(VECE, C))
+
+#endif /* TARGET_LONG_BITS == 64 */
+#endif /* TCG_TCG_OP_H */
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
new file mode 100644
index 0000000000..b80227fa1c
--- /dev/null
+++ b/include/tcg/tcg-opc.h
@@ -0,0 +1,318 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * DEF(name, oargs, iargs, cargs, flags)
+ */
+
+/* predefined ops */
+DEF(discard, 1, 0, 0, TCG_OPF_NOT_PRESENT)
+DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
+
+/* variable number of parameters */
+DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT)
+
+DEF(br, 0, 0, 1, TCG_OPF_BB_END)
+
+#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0)
+#if TCG_TARGET_REG_BITS == 32
+# define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT
+#else
+# define IMPL64 TCG_OPF_64BIT
+#endif
+
+DEF(mb, 0, 0, 1, 0)
+
+DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
+DEF(setcond_i32, 1, 2, 1, 0)
+DEF(negsetcond_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_negsetcond_i32))
+DEF(movcond_i32, 1, 4, 1, 0)
+/* load/store */
+DEF(ld8u_i32, 1, 1, 1, 0)
+DEF(ld8s_i32, 1, 1, 1, 0)
+DEF(ld16u_i32, 1, 1, 1, 0)
+DEF(ld16s_i32, 1, 1, 1, 0)
+DEF(ld_i32, 1, 1, 1, 0)
+DEF(st8_i32, 0, 2, 1, 0)
+DEF(st16_i32, 0, 2, 1, 0)
+DEF(st_i32, 0, 2, 1, 0)
+/* arith */
+DEF(add_i32, 1, 2, 0, 0)
+DEF(sub_i32, 1, 2, 0, 0)
+DEF(mul_i32, 1, 2, 0, 0)
+DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
+DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
+DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
+DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
+DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
+DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
+DEF(and_i32, 1, 2, 0, 0)
+DEF(or_i32, 1, 2, 0, 0)
+DEF(xor_i32, 1, 2, 0, 0)
+/* shifts/rotates */
+DEF(shl_i32, 1, 2, 0, 0)
+DEF(shr_i32, 1, 2, 0, 0)
+DEF(sar_i32, 1, 2, 0, 0)
+DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
+DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
+DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32))
+DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
+DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
+DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32))
+
+DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
+
+DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32))
+DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32))
+DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32))
+DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32))
+DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32))
+DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32))
+DEF(brcond2_i32, 0, 4, 2,
+ TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL(TCG_TARGET_REG_BITS == 32))
+DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32))
+
+DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32))
+DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32))
+DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32))
+DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32))
+DEF(bswap16_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap16_i32))
+DEF(bswap32_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap32_i32))
+DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32))
+DEF(neg_i32, 1, 1, 0, 0)
+DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32))
+DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32))
+DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32))
+DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32))
+DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32))
+DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32))
+DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
+DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
+
+DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
+DEF(setcond_i64, 1, 2, 1, IMPL64)
+DEF(negsetcond_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_negsetcond_i64))
+DEF(movcond_i64, 1, 4, 1, IMPL64)
+/* load/store */
+DEF(ld8u_i64, 1, 1, 1, IMPL64)
+DEF(ld8s_i64, 1, 1, 1, IMPL64)
+DEF(ld16u_i64, 1, 1, 1, IMPL64)
+DEF(ld16s_i64, 1, 1, 1, IMPL64)
+DEF(ld32u_i64, 1, 1, 1, IMPL64)
+DEF(ld32s_i64, 1, 1, 1, IMPL64)
+DEF(ld_i64, 1, 1, 1, IMPL64)
+DEF(st8_i64, 0, 2, 1, IMPL64)
+DEF(st16_i64, 0, 2, 1, IMPL64)
+DEF(st32_i64, 0, 2, 1, IMPL64)
+DEF(st_i64, 0, 2, 1, IMPL64)
+/* arith */
+DEF(add_i64, 1, 2, 0, IMPL64)
+DEF(sub_i64, 1, 2, 0, IMPL64)
+DEF(mul_i64, 1, 2, 0, IMPL64)
+DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
+DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
+DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
+DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
+DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
+DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
+DEF(and_i64, 1, 2, 0, IMPL64)
+DEF(or_i64, 1, 2, 0, IMPL64)
+DEF(xor_i64, 1, 2, 0, IMPL64)
+/* shifts/rotates */
+DEF(shl_i64, 1, 2, 0, IMPL64)
+DEF(shr_i64, 1, 2, 0, IMPL64)
+DEF(sar_i64, 1, 2, 0, IMPL64)
+DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
+DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
+DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
+DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64))
+DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64))
+DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
+
+/* size changing ops */
+DEF(ext_i32_i64, 1, 1, 0, IMPL64)
+DEF(extu_i32_i64, 1, 1, 0, IMPL64)
+DEF(extrl_i64_i32, 1, 1, 0,
+ IMPL(TCG_TARGET_HAS_extr_i64_i32)
+ | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
+DEF(extrh_i64_i32, 1, 1, 0,
+ IMPL(TCG_TARGET_HAS_extr_i64_i32)
+ | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
+
+DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64)
+DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
+DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
+DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64))
+DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64))
+DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64))
+DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64))
+DEF(bswap16_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
+DEF(bswap32_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
+DEF(bswap64_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
+DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64))
+DEF(neg_i64, 1, 1, 0, IMPL64)
+DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64))
+DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64))
+DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64))
+DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64))
+DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64))
+DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64))
+DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64))
+DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64))
+
+DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64))
+DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64))
+DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64))
+DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64))
+DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64))
+DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64))
+
+#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
+
+/* There are tcg_ctx->insn_start_words here, not just one. */
+DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT)
+
+DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
+DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
+DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
+
+DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT)
+DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT)
+
+/* Replicate ld/st ops for 32 and 64-bit guest addresses. */
+DEF(qemu_ld_a32_i32, 1, 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st_a32_i32, 0, 1 + 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
+DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
+
+DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
+DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
+
+/* Only used by i386 to cope with stupid register constraints. */
+DEF(qemu_st8_a32_i32, 0, 1 + 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
+ IMPL(TCG_TARGET_HAS_qemu_st8_i32))
+DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
+ IMPL(TCG_TARGET_HAS_qemu_st8_i32))
+
+/* Only for 64-bit hosts at the moment. */
+DEF(qemu_ld_a32_i128, 2, 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+DEF(qemu_ld_a64_i128, 2, 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+DEF(qemu_st_a32_i128, 0, 3, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+DEF(qemu_st_a64_i128, 0, 3, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+
+/* Host vector support. */
+
+#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
+
+DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
+
+DEF(dup_vec, 1, 1, 0, IMPLVEC)
+DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32))
+
+DEF(ld_vec, 1, 1, 1, IMPLVEC)
+DEF(st_vec, 0, 2, 1, IMPLVEC)
+DEF(dupm_vec, 1, 1, 1, IMPLVEC)
+
+DEF(add_vec, 1, 2, 0, IMPLVEC)
+DEF(sub_vec, 1, 2, 0, IMPLVEC)
+DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec))
+DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec))
+DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec))
+DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
+DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
+DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
+DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
+DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
+DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
+DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
+DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
+
+DEF(and_vec, 1, 2, 0, IMPLVEC)
+DEF(or_vec, 1, 2, 0, IMPLVEC)
+DEF(xor_vec, 1, 2, 0, IMPLVEC)
+DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
+DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
+DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec))
+DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec))
+DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec))
+DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
+
+DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
+DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
+DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
+DEF(rotli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_roti_vec))
+
+DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
+DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
+DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
+DEF(rotls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rots_vec))
+
+DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
+DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
+DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
+DEF(rotlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
+DEF(rotrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
+
+DEF(cmp_vec, 1, 2, 1, IMPLVEC)
+
+DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec))
+DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec))
+
+DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
+
+#if TCG_TARGET_MAYBE_vec
+#include "tcg-target.opc.h"
+#endif
+
+#ifdef TCG_TARGET_INTERPRETER
+/* These opcodes are only for use between the tci generator and interpreter. */
+DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
+DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
+#endif
+
+#undef DATA64_ARGS
+#undef IMPL
+#undef IMPL64
+#undef IMPLVEC
+#undef DEF
diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h
new file mode 100644
index 0000000000..44192c55a9
--- /dev/null
+++ b/include/tcg/tcg-temp-internal.h
@@ -0,0 +1,45 @@
+/*
+ * TCG internals related to TCG temp allocation
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_TEMP_INTERNAL_H
+#define TCG_TEMP_INTERNAL_H
+
+/*
+ * Allocation and freeing of EBB temps is reserved to TCG internals
+ */
+
+void tcg_temp_free_internal(TCGTemp *);
+
+void tcg_temp_free_i32(TCGv_i32 arg);
+void tcg_temp_free_i64(TCGv_i64 arg);
+void tcg_temp_free_i128(TCGv_i128 arg);
+void tcg_temp_free_ptr(TCGv_ptr arg);
+void tcg_temp_free_vec(TCGv_vec arg);
+
+TCGv_i32 tcg_temp_ebb_new_i32(void);
+TCGv_i64 tcg_temp_ebb_new_i64(void);
+TCGv_ptr tcg_temp_ebb_new_ptr(void);
+TCGv_i128 tcg_temp_ebb_new_i128(void);
+
+#endif /* TCG_TEMP_FREE_H */
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
new file mode 100644
index 0000000000..05a1912f8a
--- /dev/null
+++ b/include/tcg/tcg.h
@@ -0,0 +1,1075 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_H
+#define TCG_H
+
+#include "exec/memop.h"
+#include "exec/memopidx.h"
+#include "qemu/bitops.h"
+#include "qemu/plugin.h"
+#include "qemu/queue.h"
+#include "tcg/tcg-mo.h"
+#include "tcg-target-reg-bits.h"
+#include "tcg-target.h"
+#include "tcg/tcg-cond.h"
+#include "tcg/debug-assert.h"
+
+/* XXX: make safe guess about sizes */
+#define MAX_OP_PER_INSTR 266
+
+#define MAX_CALL_IARGS 7
+
+#define CPU_TEMP_BUF_NLONGS 128
+#define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long))
+
+#if TCG_TARGET_REG_BITS == 32
+typedef int32_t tcg_target_long;
+typedef uint32_t tcg_target_ulong;
+#define TCG_PRIlx PRIx32
+#define TCG_PRIld PRId32
+#elif TCG_TARGET_REG_BITS == 64
+typedef int64_t tcg_target_long;
+typedef uint64_t tcg_target_ulong;
+#define TCG_PRIlx PRIx64
+#define TCG_PRIld PRId64
+#else
+#error unsupported
+#endif
+
+#if TCG_TARGET_NB_REGS <= 32
+typedef uint32_t TCGRegSet;
+#elif TCG_TARGET_NB_REGS <= 64
+typedef uint64_t TCGRegSet;
+#else
+#error unsupported
+#endif
+
+#if TCG_TARGET_REG_BITS == 32
+/* Turn some undef macros into false macros. */
+#define TCG_TARGET_HAS_extr_i64_i32 0
+#define TCG_TARGET_HAS_div_i64 0
+#define TCG_TARGET_HAS_rem_i64 0
+#define TCG_TARGET_HAS_div2_i64 0
+#define TCG_TARGET_HAS_rot_i64 0
+#define TCG_TARGET_HAS_ext8s_i64 0
+#define TCG_TARGET_HAS_ext16s_i64 0
+#define TCG_TARGET_HAS_ext32s_i64 0
+#define TCG_TARGET_HAS_ext8u_i64 0
+#define TCG_TARGET_HAS_ext16u_i64 0
+#define TCG_TARGET_HAS_ext32u_i64 0
+#define TCG_TARGET_HAS_bswap16_i64 0
+#define TCG_TARGET_HAS_bswap32_i64 0
+#define TCG_TARGET_HAS_bswap64_i64 0
+#define TCG_TARGET_HAS_not_i64 0
+#define TCG_TARGET_HAS_andc_i64 0
+#define TCG_TARGET_HAS_orc_i64 0
+#define TCG_TARGET_HAS_eqv_i64 0
+#define TCG_TARGET_HAS_nand_i64 0
+#define TCG_TARGET_HAS_nor_i64 0
+#define TCG_TARGET_HAS_clz_i64 0
+#define TCG_TARGET_HAS_ctz_i64 0
+#define TCG_TARGET_HAS_ctpop_i64 0
+#define TCG_TARGET_HAS_deposit_i64 0
+#define TCG_TARGET_HAS_extract_i64 0
+#define TCG_TARGET_HAS_sextract_i64 0
+#define TCG_TARGET_HAS_extract2_i64 0
+#define TCG_TARGET_HAS_negsetcond_i64 0
+#define TCG_TARGET_HAS_add2_i64 0
+#define TCG_TARGET_HAS_sub2_i64 0
+#define TCG_TARGET_HAS_mulu2_i64 0
+#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
+/* Turn some undef macros into true macros. */
+#define TCG_TARGET_HAS_add2_i32 1
+#define TCG_TARGET_HAS_sub2_i32 1
+#endif
+
+#ifndef TCG_TARGET_deposit_i32_valid
+#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
+#endif
+#ifndef TCG_TARGET_deposit_i64_valid
+#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
+#endif
+#ifndef TCG_TARGET_extract_i32_valid
+#define TCG_TARGET_extract_i32_valid(ofs, len) 1
+#endif
+#ifndef TCG_TARGET_extract_i64_valid
+#define TCG_TARGET_extract_i64_valid(ofs, len) 1
+#endif
+
+/* Only one of DIV or DIV2 should be defined. */
+#if defined(TCG_TARGET_HAS_div_i32)
+#define TCG_TARGET_HAS_div2_i32 0
+#elif defined(TCG_TARGET_HAS_div2_i32)
+#define TCG_TARGET_HAS_div_i32 0
+#define TCG_TARGET_HAS_rem_i32 0
+#endif
+#if defined(TCG_TARGET_HAS_div_i64)
+#define TCG_TARGET_HAS_div2_i64 0
+#elif defined(TCG_TARGET_HAS_div2_i64)
+#define TCG_TARGET_HAS_div_i64 0
+#define TCG_TARGET_HAS_rem_i64 0
+#endif
+
+#if !defined(TCG_TARGET_HAS_v64) \
+ && !defined(TCG_TARGET_HAS_v128) \
+ && !defined(TCG_TARGET_HAS_v256)
+#define TCG_TARGET_MAYBE_vec 0
+#define TCG_TARGET_HAS_abs_vec 0
+#define TCG_TARGET_HAS_neg_vec 0
+#define TCG_TARGET_HAS_not_vec 0
+#define TCG_TARGET_HAS_andc_vec 0
+#define TCG_TARGET_HAS_orc_vec 0
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
+#define TCG_TARGET_HAS_roti_vec 0
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec 0
+#define TCG_TARGET_HAS_shi_vec 0
+#define TCG_TARGET_HAS_shs_vec 0
+#define TCG_TARGET_HAS_shv_vec 0
+#define TCG_TARGET_HAS_mul_vec 0
+#define TCG_TARGET_HAS_sat_vec 0
+#define TCG_TARGET_HAS_minmax_vec 0
+#define TCG_TARGET_HAS_bitsel_vec 0
+#define TCG_TARGET_HAS_cmpsel_vec 0
+#else
+#define TCG_TARGET_MAYBE_vec 1
+#endif
+#ifndef TCG_TARGET_HAS_v64
+#define TCG_TARGET_HAS_v64 0
+#endif
+#ifndef TCG_TARGET_HAS_v128
+#define TCG_TARGET_HAS_v128 0
+#endif
+#ifndef TCG_TARGET_HAS_v256
+#define TCG_TARGET_HAS_v256 0
+#endif
+
+typedef enum TCGOpcode {
+#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
+#include "tcg/tcg-opc.h"
+#undef DEF
+ NB_OPS,
+} TCGOpcode;
+
+#define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r))
+#define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
+#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
+
+#ifndef TCG_TARGET_INSN_UNIT_SIZE
+# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
+#elif TCG_TARGET_INSN_UNIT_SIZE == 1
+typedef uint8_t tcg_insn_unit;
+#elif TCG_TARGET_INSN_UNIT_SIZE == 2
+typedef uint16_t tcg_insn_unit;
+#elif TCG_TARGET_INSN_UNIT_SIZE == 4
+typedef uint32_t tcg_insn_unit;
+#elif TCG_TARGET_INSN_UNIT_SIZE == 8
+typedef uint64_t tcg_insn_unit;
+#else
+/* The port better have done this. */
+#endif
+
+typedef struct TCGRelocation TCGRelocation;
+struct TCGRelocation {
+ QSIMPLEQ_ENTRY(TCGRelocation) next;
+ tcg_insn_unit *ptr;
+ intptr_t addend;
+ int type;
+};
+
+typedef struct TCGOp TCGOp;
+typedef struct TCGLabelUse TCGLabelUse;
+struct TCGLabelUse {
+ QSIMPLEQ_ENTRY(TCGLabelUse) next;
+ TCGOp *op;
+};
+
+typedef struct TCGLabel TCGLabel;
+struct TCGLabel {
+ bool present;
+ bool has_value;
+ uint16_t id;
+ union {
+ uintptr_t value;
+ const tcg_insn_unit *value_ptr;
+ } u;
+ QSIMPLEQ_HEAD(, TCGLabelUse) branches;
+ QSIMPLEQ_HEAD(, TCGRelocation) relocs;
+ QSIMPLEQ_ENTRY(TCGLabel) next;
+};
+
+typedef struct TCGPool {
+ struct TCGPool *next;
+ int size;
+ uint8_t data[] __attribute__ ((aligned));
+} TCGPool;
+
+#define TCG_POOL_CHUNK_SIZE 32768
+
+#define TCG_MAX_TEMPS 512
+#define TCG_MAX_INSNS 512
+
+/* when the size of the arguments of a called function is smaller than
+ this value, they are statically allocated in the TB stack frame */
+#define TCG_STATIC_CALL_ARGS_SIZE 128
+
+typedef enum TCGType {
+ TCG_TYPE_I32,
+ TCG_TYPE_I64,
+ TCG_TYPE_I128,
+
+ TCG_TYPE_V64,
+ TCG_TYPE_V128,
+ TCG_TYPE_V256,
+
+ /* Number of different types (integer not enum) */
+#define TCG_TYPE_COUNT (TCG_TYPE_V256 + 1)
+
+ /* An alias for the size of the host register. */
+#if TCG_TARGET_REG_BITS == 32
+ TCG_TYPE_REG = TCG_TYPE_I32,
+#else
+ TCG_TYPE_REG = TCG_TYPE_I64,
+#endif
+
+ /* An alias for the size of the native pointer. */
+#if UINTPTR_MAX == UINT32_MAX
+ TCG_TYPE_PTR = TCG_TYPE_I32,
+#else
+ TCG_TYPE_PTR = TCG_TYPE_I64,
+#endif
+} TCGType;
+
+/**
+ * tcg_type_size
+ * @t: type
+ *
+ * Return the size of the type in bytes.
+ */
+static inline int tcg_type_size(TCGType t)
+{
+ unsigned i = t;
+ if (i >= TCG_TYPE_V64) {
+ tcg_debug_assert(i < TCG_TYPE_COUNT);
+ i -= TCG_TYPE_V64 - 1;
+ }
+ return 4 << i;
+}
+
+/**
+ * get_alignment_bits
+ * @memop: MemOp value
+ *
+ * Extract the alignment size from the memop.
+ */
+static inline unsigned get_alignment_bits(MemOp memop)
+{
+ unsigned a = memop & MO_AMASK;
+
+ if (a == MO_UNALN) {
+ /* No alignment required. */
+ a = 0;
+ } else if (a == MO_ALIGN) {
+ /* A natural alignment requirement. */
+ a = memop & MO_SIZE;
+ } else {
+ /* A specific alignment requirement. */
+ a = a >> MO_ASHIFT;
+ }
+ return a;
+}
+
+typedef tcg_target_ulong TCGArg;
+
+/* Define type and accessor macros for TCG variables.
+
+ TCG variables are the inputs and outputs of TCG ops, as described
+ in tcg/README. Target CPU front-end code uses these types to deal
+ with TCG variables as it emits TCG code via the tcg_gen_* functions.
+ They come in several flavours:
+ * TCGv_i32 : 32 bit integer type
+ * TCGv_i64 : 64 bit integer type
+ * TCGv_i128 : 128 bit integer type
+ * TCGv_ptr : a host pointer type
+ * TCGv_vec : a host vector type; the exact size is not exposed
+ to the CPU front-end code.
+ * TCGv : an integer type the same size as target_ulong
+ (an alias for either TCGv_i32 or TCGv_i64)
+ The compiler's type checking will complain if you mix them
+ up and pass the wrong sized TCGv to a function.
+
+ Users of tcg_gen_* don't need to know about any of the internal
+ details of these, and should treat them as opaque types.
+ You won't be able to look inside them in a debugger either.
+
+ Internal implementation details follow:
+
+ Note that there is no definition of the structs TCGv_i32_d etc anywhere.
+ This is deliberate, because the values we store in variables of type
+ TCGv_i32 are not really pointers-to-structures. They're just small
+ integers, but keeping them in pointer types like this means that the
+ compiler will complain if you accidentally pass a TCGv_i32 to a
+ function which takes a TCGv_i64, and so on. Only the internals of
+ TCG need to care about the actual contents of the types. */
+
+typedef struct TCGv_i32_d *TCGv_i32;
+typedef struct TCGv_i64_d *TCGv_i64;
+typedef struct TCGv_i128_d *TCGv_i128;
+typedef struct TCGv_ptr_d *TCGv_ptr;
+typedef struct TCGv_vec_d *TCGv_vec;
+typedef TCGv_ptr TCGv_env;
+
+/* call flags */
+/* Helper does not read globals (either directly or through an exception). It
+ implies TCG_CALL_NO_WRITE_GLOBALS. */
+#define TCG_CALL_NO_READ_GLOBALS 0x0001
+/* Helper does not write globals */
+#define TCG_CALL_NO_WRITE_GLOBALS 0x0002
+/* Helper can be safely suppressed if the return value is not used. */
+#define TCG_CALL_NO_SIDE_EFFECTS 0x0004
+/* Helper is G_NORETURN. */
+#define TCG_CALL_NO_RETURN 0x0008
+/* Helper is part of Plugins. */
+#define TCG_CALL_PLUGIN 0x0010
+
+/* convenience version of most used call flags */
+#define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
+#define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS
+#define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS
+#define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
+#define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
+
+/*
+ * Flags for the bswap opcodes.
+ * If IZ, the input is zero-extended, otherwise unknown.
+ * If OZ or OS, the output is zero- or sign-extended respectively,
+ * otherwise the high bits are undefined.
+ */
+enum {
+ TCG_BSWAP_IZ = 1,
+ TCG_BSWAP_OZ = 2,
+ TCG_BSWAP_OS = 4,
+};
+
+typedef enum TCGTempVal {
+ TEMP_VAL_DEAD,
+ TEMP_VAL_REG,
+ TEMP_VAL_MEM,
+ TEMP_VAL_CONST,
+} TCGTempVal;
+
+typedef enum TCGTempKind {
+ /*
+ * Temp is dead at the end of the extended basic block (EBB),
+ * the single-entry multiple-exit region that falls through
+ * conditional branches.
+ */
+ TEMP_EBB,
+ /* Temp is live across the entire translation block, but dead at end. */
+ TEMP_TB,
+ /* Temp is live across the entire translation block, and between them. */
+ TEMP_GLOBAL,
+ /* Temp is in a fixed register. */
+ TEMP_FIXED,
+ /* Temp is a fixed constant. */
+ TEMP_CONST,
+} TCGTempKind;
+
+typedef struct TCGTemp {
+ TCGReg reg:8;
+ TCGTempVal val_type:8;
+ TCGType base_type:8;
+ TCGType type:8;
+ TCGTempKind kind:3;
+ unsigned int indirect_reg:1;
+ unsigned int indirect_base:1;
+ unsigned int mem_coherent:1;
+ unsigned int mem_allocated:1;
+ unsigned int temp_allocated:1;
+ unsigned int temp_subindex:2;
+
+ int64_t val;
+ struct TCGTemp *mem_base;
+ intptr_t mem_offset;
+ const char *name;
+
+ /* Pass-specific information that can be stored for a temporary.
+ One word worth of integer data, and one pointer to data
+ allocated separately. */
+ uintptr_t state;
+ void *state_ptr;
+} TCGTemp;
+
+typedef struct TCGContext TCGContext;
+
+typedef struct TCGTempSet {
+ unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
+} TCGTempSet;
+
+/*
+ * With 1 128-bit output, a 32-bit host requires 4 output parameters,
+ * which leaves a maximum of 28 other slots. Which is enough for 7
+ * 128-bit operands.
+ */
+#define DEAD_ARG (1 << 4)
+#define SYNC_ARG (1 << 0)
+typedef uint32_t TCGLifeData;
+
+struct TCGOp {
+ TCGOpcode opc : 8;
+ unsigned nargs : 8;
+
+ /* Parameters for this opcode. See below. */
+ unsigned param1 : 8;
+ unsigned param2 : 8;
+
+ /* Lifetime data of the operands. */
+ TCGLifeData life;
+
+ /* Next and previous opcodes. */
+ QTAILQ_ENTRY(TCGOp) link;
+
+ /* Register preferences for the output(s). */
+ TCGRegSet output_pref[2];
+
+ /* Arguments for the opcode. */
+ TCGArg args[];
+};
+
+#define TCGOP_CALLI(X) (X)->param1
+#define TCGOP_CALLO(X) (X)->param2
+
+#define TCGOP_VECL(X) (X)->param1
+#define TCGOP_VECE(X) (X)->param2
+
+/* Make sure operands fit in the bitfields above. */
+QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
+
+static inline TCGRegSet output_pref(const TCGOp *op, unsigned i)
+{
+ return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0;
+}
+
+struct TCGContext {
+ uint8_t *pool_cur, *pool_end;
+ TCGPool *pool_first, *pool_current, *pool_first_large;
+ int nb_labels;
+ int nb_globals;
+ int nb_temps;
+ int nb_indirects;
+ int nb_ops;
+ TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
+
+ int page_mask;
+ uint8_t page_bits;
+ uint8_t tlb_dyn_max_bits;
+ uint8_t insn_start_words;
+ TCGBar guest_mo;
+
+ TCGRegSet reserved_regs;
+ intptr_t current_frame_offset;
+ intptr_t frame_start;
+ intptr_t frame_end;
+ TCGTemp *frame_temp;
+
+ TranslationBlock *gen_tb; /* tb for which code is being generated */
+ tcg_insn_unit *code_buf; /* pointer for start of tb */
+ tcg_insn_unit *code_ptr; /* pointer for running end of tb */
+
+#ifdef CONFIG_DEBUG_TCG
+ int goto_tb_issue_mask;
+ const TCGOpcode *vecop_list;
+#endif
+
+ /* Code generation. Note that we specifically do not use tcg_insn_unit
+ here, because there's too much arithmetic throughout that relies
+ on addition and subtraction working on bytes. Rely on the GCC
+ extension that allows arithmetic on void*. */
+ void *code_gen_buffer;
+ size_t code_gen_buffer_size;
+ void *code_gen_ptr;
+ void *data_gen_ptr;
+
+ /* Threshold to flush the translated code buffer. */
+ void *code_gen_highwater;
+
+ /* Track which vCPU triggers events */
+ CPUState *cpu; /* *_trans */
+
+ /* These structures are private to tcg-target.c.inc. */
+#ifdef TCG_TARGET_NEED_LDST_LABELS
+ QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
+#endif
+#ifdef TCG_TARGET_NEED_POOL_LABELS
+ struct TCGLabelPoolData *pool_labels;
+#endif
+
+ TCGLabel *exitreq_label;
+
+#ifdef CONFIG_PLUGIN
+ /*
+ * We keep one plugin_tb struct per TCGContext. Note that on every TB
+ * translation we clear but do not free its contents; this way we
+ * avoid a lot of malloc/free churn, since after a few TB's it's
+ * unlikely that we'll need to allocate either more instructions or more
+ * space for instructions (for variable-instruction-length ISAs).
+ */
+ struct qemu_plugin_tb *plugin_tb;
+
+ /* descriptor of the instruction being translated */
+ struct qemu_plugin_insn *plugin_insn;
+#endif
+
+ GHashTable *const_table[TCG_TYPE_COUNT];
+ TCGTempSet free_temps[TCG_TYPE_COUNT];
+ TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
+
+ QTAILQ_HEAD(, TCGOp) ops, free_ops;
+ QSIMPLEQ_HEAD(, TCGLabel) labels;
+
+ /*
+ * When clear, new ops are added to the tail of @ops.
+ * When set, new ops are added in front of @emit_before_op.
+ */
+ TCGOp *emit_before_op;
+
+ /* Tells which temporary holds a given register.
+ It does not take into account fixed registers */
+ TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
+
+ uint16_t gen_insn_end_off[TCG_MAX_INSNS];
+ uint64_t *gen_insn_data;
+
+ /* Exit to translator on overflow. */
+ sigjmp_buf jmp_trans;
+};
+
+static inline bool temp_readonly(TCGTemp *ts)
+{
+ return ts->kind >= TEMP_FIXED;
+}
+
+#ifdef CONFIG_USER_ONLY
+extern bool tcg_use_softmmu;
+#else
+#define tcg_use_softmmu true
+#endif
+
+extern __thread TCGContext *tcg_ctx;
+extern const void *tcg_code_gen_epilogue;
+extern uintptr_t tcg_splitwx_diff;
+extern TCGv_env tcg_env;
+
+bool in_code_gen_buffer(const void *p);
+
+#ifdef CONFIG_DEBUG_TCG
+const void *tcg_splitwx_to_rx(void *rw);
+void *tcg_splitwx_to_rw(const void *rx);
+#else
+static inline const void *tcg_splitwx_to_rx(void *rw)
+{
+ return rw ? rw + tcg_splitwx_diff : NULL;
+}
+
+static inline void *tcg_splitwx_to_rw(const void *rx)
+{
+ return rx ? (void *)rx - tcg_splitwx_diff : NULL;
+}
+#endif
+
+static inline TCGArg temp_arg(TCGTemp *ts)
+{
+ return (uintptr_t)ts;
+}
+
+static inline TCGTemp *arg_temp(TCGArg a)
+{
+ return (TCGTemp *)(uintptr_t)a;
+}
+
+#ifdef CONFIG_DEBUG_TCG
+size_t temp_idx(TCGTemp *ts);
+TCGTemp *tcgv_i32_temp(TCGv_i32 v);
+#else
+static inline size_t temp_idx(TCGTemp *ts)
+{
+ return ts - tcg_ctx->temps;
+}
+
+/*
+ * Using the offset of a temporary, relative to TCGContext, rather than
+ * its index means that we don't use 0. That leaves offset 0 free for
+ * a NULL representation without having to leave index 0 unused.
+ */
+static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
+{
+ return (void *)tcg_ctx + (uintptr_t)v;
+}
+#endif
+
+static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
+{
+ return tcgv_i32_temp((TCGv_i32)v);
+}
+
+static inline TCGTemp *tcgv_i128_temp(TCGv_i128 v)
+{
+ return tcgv_i32_temp((TCGv_i32)v);
+}
+
+static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
+{
+ return tcgv_i32_temp((TCGv_i32)v);
+}
+
+static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
+{
+ return tcgv_i32_temp((TCGv_i32)v);
+}
+
+static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
+{
+ return temp_arg(tcgv_i32_temp(v));
+}
+
+static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
+{
+ return temp_arg(tcgv_i64_temp(v));
+}
+
+static inline TCGArg tcgv_i128_arg(TCGv_i128 v)
+{
+ return temp_arg(tcgv_i128_temp(v));
+}
+
+static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
+{
+ return temp_arg(tcgv_ptr_temp(v));
+}
+
+static inline TCGArg tcgv_vec_arg(TCGv_vec v)
+{
+ return temp_arg(tcgv_vec_temp(v));
+}
+
+static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
+{
+ (void)temp_idx(t); /* trigger embedded assert */
+ return (TCGv_i32)((void *)t - (void *)tcg_ctx);
+}
+
+static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
+{
+ return (TCGv_i64)temp_tcgv_i32(t);
+}
+
+static inline TCGv_i128 temp_tcgv_i128(TCGTemp *t)
+{
+ return (TCGv_i128)temp_tcgv_i32(t);
+}
+
+static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
+{
+ return (TCGv_ptr)temp_tcgv_i32(t);
+}
+
+static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
+{
+ return (TCGv_vec)temp_tcgv_i32(t);
+}
+
+static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
+{
+ return op->args[arg];
+}
+
+static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
+{
+ op->args[arg] = v;
+}
+
+static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg)
+{
+ if (TCG_TARGET_REG_BITS == 64) {
+ return tcg_get_insn_param(op, arg);
+ } else {
+ return deposit64(tcg_get_insn_param(op, arg * 2), 32, 32,
+ tcg_get_insn_param(op, arg * 2 + 1));
+ }
+}
+
+static inline void tcg_set_insn_start_param(TCGOp *op, int arg, uint64_t v)
+{
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_set_insn_param(op, arg, v);
+ } else {
+ tcg_set_insn_param(op, arg * 2, v);
+ tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
+ }
+}
+
+/* The last op that was emitted. */
+static inline TCGOp *tcg_last_op(void)
+{
+ return QTAILQ_LAST(&tcg_ctx->ops);
+}
+
+/* Test for whether to terminate the TB for using too many opcodes. */
+static inline bool tcg_op_buf_full(void)
+{
+ /* This is not a hard limit, it merely stops translation when
+ * we have produced "enough" opcodes. We want to limit TB size
+ * such that a RISC host can reasonably use a 16-bit signed
+ * branch within the TB. We also need to be mindful of the
+ * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
+ * and TCGContext.gen_insn_end_off[].
+ */
+ return tcg_ctx->nb_ops >= 4000;
+}
+
+/* pool based memory allocation */
+
+/* user-mode: mmap_lock must be held for tcg_malloc_internal. */
+void *tcg_malloc_internal(TCGContext *s, int size);
+void tcg_pool_reset(TCGContext *s);
+TranslationBlock *tcg_tb_alloc(TCGContext *s);
+
+void tcg_region_reset_all(void);
+
+size_t tcg_code_size(void);
+size_t tcg_code_capacity(void);
+
+void tcg_tb_insert(TranslationBlock *tb);
+void tcg_tb_remove(TranslationBlock *tb);
+TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
+void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
+size_t tcg_nb_tbs(void);
+
+/* user-mode: Called with mmap_lock held. */
+static inline void *tcg_malloc(int size)
+{
+ TCGContext *s = tcg_ctx;
+ uint8_t *ptr, *ptr_end;
+
+ /* ??? This is a weak placeholder for minimum malloc alignment. */
+ size = QEMU_ALIGN_UP(size, 8);
+
+ ptr = s->pool_cur;
+ ptr_end = ptr + size;
+ if (unlikely(ptr_end > s->pool_end)) {
+ return tcg_malloc_internal(tcg_ctx, size);
+ } else {
+ s->pool_cur = ptr_end;
+ return ptr;
+ }
+}
+
+void tcg_func_start(TCGContext *s);
+
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start);
+
+void tb_target_set_jmp_target(const TranslationBlock *, int,
+ uintptr_t, uintptr_t);
+
+void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
+
+#define TCG_CT_CONST 1 /* any constant of register size */
+
+typedef struct TCGArgConstraint {
+ unsigned ct : 16;
+ unsigned alias_index : 4;
+ unsigned sort_index : 4;
+ unsigned pair_index : 4;
+ unsigned pair : 2; /* 0: none, 1: first, 2: second, 3: second alias */
+ bool oalias : 1;
+ bool ialias : 1;
+ bool newreg : 1;
+ TCGRegSet regs;
+} TCGArgConstraint;
+
+#define TCG_MAX_OP_ARGS 16
+
+/* Bits for TCGOpDef->flags, 8 bits available, all used. */
+enum {
+ /* Instruction exits the translation block. */
+ TCG_OPF_BB_EXIT = 0x01,
+ /* Instruction defines the end of a basic block. */
+ TCG_OPF_BB_END = 0x02,
+ /* Instruction clobbers call registers and potentially update globals. */
+ TCG_OPF_CALL_CLOBBER = 0x04,
+ /* Instruction has side effects: it cannot be removed if its outputs
+ are not used, and might trigger exceptions. */
+ TCG_OPF_SIDE_EFFECTS = 0x08,
+ /* Instruction operands are 64-bits (otherwise 32-bits). */
+ TCG_OPF_64BIT = 0x10,
+ /* Instruction is optional and not implemented by the host, or insn
+ is generic and should not be implemented by the host. */
+ TCG_OPF_NOT_PRESENT = 0x20,
+ /* Instruction operands are vectors. */
+ TCG_OPF_VECTOR = 0x40,
+ /* Instruction is a conditional branch. */
+ TCG_OPF_COND_BRANCH = 0x80
+};
+
+typedef struct TCGOpDef {
+ const char *name;
+ uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
+ uint8_t flags;
+ TCGArgConstraint *args_ct;
+} TCGOpDef;
+
+extern TCGOpDef tcg_op_defs[];
+extern const size_t tcg_op_defs_max;
+
+typedef struct TCGTargetOpDef {
+ TCGOpcode op;
+ const char *args_ct_str[TCG_MAX_OP_ARGS];
+} TCGTargetOpDef;
+
+bool tcg_op_supported(TCGOpcode op);
+
+void tcg_gen_call0(TCGHelperInfo *, TCGTemp *ret);
+void tcg_gen_call1(TCGHelperInfo *, TCGTemp *ret, TCGTemp *);
+void tcg_gen_call2(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *);
+void tcg_gen_call3(TCGHelperInfo *, TCGTemp *ret, TCGTemp *,
+ TCGTemp *, TCGTemp *);
+void tcg_gen_call4(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
+ TCGTemp *, TCGTemp *);
+void tcg_gen_call5(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
+ TCGTemp *, TCGTemp *, TCGTemp *);
+void tcg_gen_call6(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
+ TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *);
+void tcg_gen_call7(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
+ TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *);
+
+TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs);
+void tcg_op_remove(TCGContext *s, TCGOp *op);
+TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op,
+ TCGOpcode opc, unsigned nargs);
+TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op,
+ TCGOpcode opc, unsigned nargs);
+
+/**
+ * tcg_remove_ops_after:
+ * @op: target operation
+ *
+ * Discard any opcodes emitted since @op. Expected usage is to save
+ * a starting point with tcg_last_op(), speculatively emit opcodes,
+ * then decide whether or not to keep those opcodes after the fact.
+ */
+void tcg_remove_ops_after(TCGOp *op);
+
+void tcg_optimize(TCGContext *s);
+
+TCGLabel *gen_new_label(void);
+
+/**
+ * label_arg
+ * @l: label
+ *
+ * Encode a label for storage in the TCG opcode stream.
+ */
+
+static inline TCGArg label_arg(TCGLabel *l)
+{
+ return (uintptr_t)l;
+}
+
+/**
+ * arg_label
+ * @i: value
+ *
+ * The opposite of label_arg. Retrieve a label from the
+ * encoding of the TCG opcode stream.
+ */
+
+static inline TCGLabel *arg_label(TCGArg i)
+{
+ return (TCGLabel *)(uintptr_t)i;
+}
+
+/**
+ * tcg_ptr_byte_diff
+ * @a, @b: addresses to be differenced
+ *
+ * There are many places within the TCG backends where we need a byte
+ * difference between two pointers. While this can be accomplished
+ * with local casting, it's easy to get wrong -- especially if one is
+ * concerned with the signedness of the result.
+ *
+ * This version relies on GCC's void pointer arithmetic to get the
+ * correct result.
+ */
+
+static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b)
+{
+ return a - b;
+}
+
+/**
+ * tcg_pcrel_diff
+ * @s: the tcg context
+ * @target: address of the target
+ *
+ * Produce a pc-relative difference, from the current code_ptr
+ * to the destination address.
+ */
+
+static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
+{
+ return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
+}
+
+/**
+ * tcg_tbrel_diff
+ * @s: the tcg context
+ * @target: address of the target
+ *
+ * Produce a difference, from the beginning of the current TB code
+ * to the destination address.
+ */
+static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target)
+{
+ return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf));
+}
+
+/**
+ * tcg_current_code_size
+ * @s: the tcg context
+ *
+ * Compute the current code size within the translation block.
+ * This is used to fill in qemu's data structures for goto_tb.
+ */
+
+static inline size_t tcg_current_code_size(TCGContext *s)
+{
+ return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
+}
+
+/**
+ * tcg_qemu_tb_exec:
+ * @env: pointer to CPUArchState for the CPU
+ * @tb_ptr: address of generated code for the TB to execute
+ *
+ * Start executing code from a given translation block.
+ * Where translation blocks have been linked, execution
+ * may proceed from the given TB into successive ones.
+ * Control eventually returns only when some action is needed
+ * from the top-level loop: either control must pass to a TB
+ * which has not yet been directly linked, or an asynchronous
+ * event such as an interrupt needs handling.
+ *
+ * Return: The return value is the value passed to the corresponding
+ * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
+ * The value is either zero or a 4-byte aligned pointer to that TB combined
+ * with additional information in its two least significant bits. The
+ * additional information is encoded as follows:
+ * 0, 1: the link between this TB and the next is via the specified
+ * TB index (0 or 1). That is, we left the TB via (the equivalent
+ * of) "goto_tb <index>". The main loop uses this to determine
+ * how to link the TB just executed to the next.
+ * 2: we are using instruction counting code generation, and we
+ * did not start executing this TB because the instruction counter
+ * would hit zero midway through it. In this case the pointer
+ * returned is the TB we were about to execute, and the caller must
+ * arrange to execute the remaining count of instructions.
+ * 3: we stopped because the CPU's exit_request flag was set
+ * (usually meaning that there is an interrupt that needs to be
+ * handled). The pointer returned is the TB we were about to execute
+ * when we noticed the pending exit request.
+ *
+ * If the bottom two bits indicate an exit-via-index then the CPU
+ * state is correctly synchronised and ready for execution of the next
+ * TB (and in particular the guest PC is the address to execute next).
+ * Otherwise, we gave up on execution of this TB before it started, and
+ * the caller must fix up the CPU state by calling the CPU's
+ * synchronize_from_tb() method with the TB pointer we return (falling
+ * back to calling the CPU's set_pc method with tb->pb if no
+ * synchronize_from_tb() method exists).
+ *
+ * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
+ * to this default (which just calls the prologue.code emitted by
+ * tcg_target_qemu_prologue()).
+ */
+#define TB_EXIT_MASK 3
+#define TB_EXIT_IDX0 0
+#define TB_EXIT_IDX1 1
+#define TB_EXIT_IDXMAX 1
+#define TB_EXIT_REQUESTED 3
+
+#ifdef CONFIG_TCG_INTERPRETER
+uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr);
+#else
+typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
+extern tcg_prologue_fn *tcg_qemu_tb_exec;
+#endif
+
+void tcg_register_jit(const void *buf, size_t buf_size);
+
+#if TCG_TARGET_MAYBE_vec
+/* Return zero if the tuple (opc, type, vece) is unsupportable;
+ return > 0 if it is directly supportable;
+ return < 0 if we must call tcg_expand_vec_op. */
+int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
+#else
+static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
+{
+ return 0;
+}
+#endif
+
+/* Expand the tuple (opc, type, vece) on the given arguments. */
+void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
+
+/* Replicate a constant C according to the log2 of the element size. */
+uint64_t dup_const(unsigned vece, uint64_t c);
+
+#define dup_const(VECE, C) \
+ (__builtin_constant_p(VECE) \
+ ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \
+ : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \
+ : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \
+ : (VECE) == MO_64 ? (uint64_t)(C) \
+ : (qemu_build_not_reached_always(), 0)) \
+ : dup_const(VECE, C))
+
+static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
+{
+#ifdef CONFIG_DEBUG_TCG
+ const TCGOpcode *o = tcg_ctx->vecop_list;
+ tcg_ctx->vecop_list = n;
+ return o;
+#else
+ return NULL;
+#endif
+}
+
+bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
+
+#endif /* TCG_H */
diff --git a/include/trace-tcg.h b/include/trace-tcg.h
deleted file mode 100644
index da68608c85..0000000000
--- a/include/trace-tcg.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef TRACE_TCG_H
-#define TRACE_TCG_H
-
-#include "trace/generated-tcg-tracers.h"
-
-#endif /* TRACE_TCG_H */
diff --git a/include/ui/clipboard.h b/include/ui/clipboard.h
new file mode 100644
index 0000000000..ab6acdbd8a
--- /dev/null
+++ b/include/ui/clipboard.h
@@ -0,0 +1,277 @@
+#ifndef QEMU_CLIPBOARD_H
+#define QEMU_CLIPBOARD_H
+
+#include "qemu/notify.h"
+
+/**
+ * DOC: Introduction
+ *
+ * The header ``ui/clipboard.h`` declares the qemu clipboard interface.
+ *
+ * All qemu elements which want use the clipboard can register as
+ * clipboard peer. Subsequently they can set the clipboard content
+ * and get notifications for clipboard updates.
+ *
+ * Typical users are user interfaces (gtk), remote access protocols
+ * (vnc) and devices talking to the guest (vdagent).
+ *
+ * Even though the design allows different data types only plain text
+ * is supported for now.
+ */
+
+typedef enum QemuClipboardType QemuClipboardType;
+typedef enum QemuClipboardNotifyType QemuClipboardNotifyType;
+typedef enum QemuClipboardSelection QemuClipboardSelection;
+typedef struct QemuClipboardPeer QemuClipboardPeer;
+typedef struct QemuClipboardNotify QemuClipboardNotify;
+typedef struct QemuClipboardInfo QemuClipboardInfo;
+
+/**
+ * enum QemuClipboardType
+ *
+ * @QEMU_CLIPBOARD_TYPE_TEXT: text/plain; charset=utf-8
+ * @QEMU_CLIPBOARD_TYPE__COUNT: type count.
+ */
+enum QemuClipboardType {
+ QEMU_CLIPBOARD_TYPE_TEXT,
+ QEMU_CLIPBOARD_TYPE__COUNT,
+};
+
+/* same as VD_AGENT_CLIPBOARD_SELECTION_* */
+/**
+ * enum QemuClipboardSelection
+ *
+ * @QEMU_CLIPBOARD_SELECTION_CLIPBOARD: clipboard (explitcit cut+paste).
+ * @QEMU_CLIPBOARD_SELECTION_PRIMARY: primary selection (select + middle mouse button).
+ * @QEMU_CLIPBOARD_SELECTION_SECONDARY: secondary selection (dunno).
+ * @QEMU_CLIPBOARD_SELECTION__COUNT: selection count.
+ */
+enum QemuClipboardSelection {
+ QEMU_CLIPBOARD_SELECTION_CLIPBOARD,
+ QEMU_CLIPBOARD_SELECTION_PRIMARY,
+ QEMU_CLIPBOARD_SELECTION_SECONDARY,
+ QEMU_CLIPBOARD_SELECTION__COUNT,
+};
+
+/**
+ * struct QemuClipboardPeer
+ *
+ * @name: peer name.
+ * @notifier: notifier for clipboard updates.
+ * @request: callback for clipboard data requests.
+ *
+ * Clipboard peer description.
+ */
+struct QemuClipboardPeer {
+ const char *name;
+ Notifier notifier;
+ void (*request)(QemuClipboardInfo *info,
+ QemuClipboardType type);
+};
+
+/**
+ * enum QemuClipboardNotifyType
+ *
+ * @QEMU_CLIPBOARD_UPDATE_INFO: clipboard info update
+ * @QEMU_CLIPBOARD_RESET_SERIAL: reset clipboard serial
+ *
+ * Clipboard notify type.
+ */
+enum QemuClipboardNotifyType {
+ QEMU_CLIPBOARD_UPDATE_INFO,
+ QEMU_CLIPBOARD_RESET_SERIAL,
+};
+
+/**
+ * struct QemuClipboardNotify
+ *
+ * @type: the type of event.
+ * @info: a QemuClipboardInfo event.
+ *
+ * Clipboard notify data.
+ */
+struct QemuClipboardNotify {
+ QemuClipboardNotifyType type;
+ union {
+ QemuClipboardInfo *info;
+ };
+};
+
+/**
+ * struct QemuClipboardInfo
+ *
+ * @refcount: reference counter.
+ * @owner: clipboard owner.
+ * @selection: clipboard selection.
+ * @types: clipboard data array (one entry per type).
+ * @has_serial: whether @serial is available.
+ * @serial: the grab serial counter.
+ *
+ * Clipboard content data and metadata.
+ */
+struct QemuClipboardInfo {
+ uint32_t refcount;
+ QemuClipboardPeer *owner;
+ QemuClipboardSelection selection;
+ bool has_serial;
+ uint32_t serial;
+ struct {
+ bool available;
+ bool requested;
+ size_t size;
+ void *data;
+ } types[QEMU_CLIPBOARD_TYPE__COUNT];
+};
+
+/**
+ * qemu_clipboard_peer_register
+ *
+ * @peer: peer information.
+ *
+ * Register clipboard peer. Registering is needed for both active
+ * (set+grab clipboard) and passive (watch clipboard for updates)
+ * interaction with the qemu clipboard.
+ */
+void qemu_clipboard_peer_register(QemuClipboardPeer *peer);
+
+/**
+ * qemu_clipboard_peer_unregister
+ *
+ * @peer: peer information.
+ *
+ * Unregister clipboard peer.
+ */
+void qemu_clipboard_peer_unregister(QemuClipboardPeer *peer);
+
+/**
+ * qemu_clipboard_peer_owns
+ *
+ * @peer: peer information.
+ * @selection: clipboard selection.
+ *
+ * Return TRUE if the peer owns the clipboard.
+ */
+bool qemu_clipboard_peer_owns(QemuClipboardPeer *peer,
+ QemuClipboardSelection selection);
+
+/**
+ * qemu_clipboard_peer_release
+ *
+ * @peer: peer information.
+ * @selection: clipboard selection.
+ *
+ * If the peer owns the clipboard, release it.
+ */
+void qemu_clipboard_peer_release(QemuClipboardPeer *peer,
+ QemuClipboardSelection selection);
+
+/**
+ * qemu_clipboard_info
+ *
+ * @selection: clipboard selection.
+ *
+ * Return the current clipboard data & owner information.
+ */
+QemuClipboardInfo *qemu_clipboard_info(QemuClipboardSelection selection);
+
+/**
+ * qemu_clipboard_check_serial
+ *
+ * @info: clipboard info.
+ * @client: whether to check from the client context and priority.
+ *
+ * Return TRUE if the @info has a higher serial than the current clipboard.
+ */
+bool qemu_clipboard_check_serial(QemuClipboardInfo *info, bool client);
+
+/**
+ * qemu_clipboard_info_new
+ *
+ * @owner: clipboard owner.
+ * @selection: clipboard selection.
+ *
+ * Allocate a new QemuClipboardInfo and initialize it with the given
+ * @owner and @selection.
+ *
+ * QemuClipboardInfo is a reference-counted struct. The new struct is
+ * returned with a reference already taken (i.e. reference count is
+ * one).
+ */
+QemuClipboardInfo *qemu_clipboard_info_new(QemuClipboardPeer *owner,
+ QemuClipboardSelection selection);
+/**
+ * qemu_clipboard_info_ref
+ *
+ * @info: clipboard info.
+ *
+ * Increase @info reference count.
+ */
+QemuClipboardInfo *qemu_clipboard_info_ref(QemuClipboardInfo *info);
+
+/**
+ * qemu_clipboard_info_unref
+ *
+ * @info: clipboard info.
+ *
+ * Decrease @info reference count. When the count goes down to zero
+ * free the @info struct itself and all clipboard data.
+ */
+void qemu_clipboard_info_unref(QemuClipboardInfo *info);
+
+/**
+ * qemu_clipboard_update
+ *
+ * @info: clipboard info.
+ *
+ * Update the qemu clipboard. Notify all registered peers (including
+ * the clipboard owner) that the qemu clipboard has been updated.
+ *
+ * This is used for both new completely clipboard content and for
+ * clipboard data updates in response to qemu_clipboard_request()
+ * calls.
+ */
+void qemu_clipboard_update(QemuClipboardInfo *info);
+
+/**
+ * qemu_clipboard_reset_serial
+ *
+ * Reset the clipboard serial.
+ */
+void qemu_clipboard_reset_serial(void);
+
+/**
+ * qemu_clipboard_request
+ *
+ * @info: clipboard info.
+ * @type: clipboard data type.
+ *
+ * Request clipboard content. Typically the clipboard owner only
+ * advertises the available data types and provides the actual data
+ * only on request.
+ */
+void qemu_clipboard_request(QemuClipboardInfo *info,
+ QemuClipboardType type);
+
+/**
+ * qemu_clipboard_set_data
+ *
+ * @peer: clipboard peer.
+ * @info: clipboard info.
+ * @type: clipboard data type.
+ * @size: data size.
+ * @data: data blob.
+ * @update: notify peers about the update.
+ *
+ * Set clipboard content for the given @type. This function will make
+ * a copy of the content data and store that.
+ */
+void qemu_clipboard_set_data(QemuClipboardPeer *peer,
+ QemuClipboardInfo *info,
+ QemuClipboardType type,
+ uint32_t size,
+ const void *data,
+ bool update);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuClipboardInfo, qemu_clipboard_info_unref)
+
+#endif /* QEMU_CLIPBOARD_H */
diff --git a/include/ui/console.h b/include/ui/console.h
index 853fcf4eb7..0bc7a00ac0 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -4,13 +4,30 @@
#include "ui/qemu-pixman.h"
#include "qom/object.h"
#include "qemu/notify.h"
-#include "qemu/error-report.h"
#include "qapi/qapi-types-ui.h"
+#include "ui/input.h"
+#include "ui/surface.h"
-#ifdef CONFIG_OPENGL
-# include <epoxy/gl.h>
-# include "ui/shader.h"
-#endif
+#define TYPE_QEMU_CONSOLE "qemu-console"
+OBJECT_DECLARE_TYPE(QemuConsole, QemuConsoleClass, QEMU_CONSOLE)
+
+#define TYPE_QEMU_GRAPHIC_CONSOLE "qemu-graphic-console"
+OBJECT_DECLARE_SIMPLE_TYPE(QemuGraphicConsole, QEMU_GRAPHIC_CONSOLE)
+
+#define TYPE_QEMU_TEXT_CONSOLE "qemu-text-console"
+OBJECT_DECLARE_SIMPLE_TYPE(QemuTextConsole, QEMU_TEXT_CONSOLE)
+
+#define TYPE_QEMU_FIXED_TEXT_CONSOLE "qemu-fixed-text-console"
+OBJECT_DECLARE_SIMPLE_TYPE(QemuFixedTextConsole, QEMU_FIXED_TEXT_CONSOLE)
+
+#define QEMU_IS_GRAPHIC_CONSOLE(c) \
+ object_dynamic_cast(OBJECT(c), TYPE_QEMU_GRAPHIC_CONSOLE)
+
+#define QEMU_IS_TEXT_CONSOLE(c) \
+ object_dynamic_cast(OBJECT(c), TYPE_QEMU_TEXT_CONSOLE)
+
+#define QEMU_IS_FIXED_TEXT_CONSOLE(c) \
+ object_dynamic_cast(OBJECT(c), TYPE_QEMU_FIXED_TEXT_CONSOLE)
/* keyboard/mouse support */
@@ -65,19 +82,12 @@ void qemu_remove_led_event_handler(QEMUPutLEDEntry *entry);
void kbd_put_ledstate(int ledstate);
-struct MouseTransformInfo {
- /* Touchscreen resolution */
- int x;
- int y;
- /* Calibration values as used/generated by tslib */
- int a[7];
-};
-
-void hmp_mouse_set(Monitor *mon, const QDict *qdict);
+bool qemu_mouse_set(int index, Error **errp);
/* keysym is a unicode code except for special keys (see QEMU_KEY_xxx
constants) */
#define QEMU_KEY_ESC1(c) ((c) | 0xe100)
+#define QEMU_KEY_TAB 0x0009
#define QEMU_KEY_BACKSPACE 0x007f
#define QEMU_KEY_UP QEMU_KEY_ESC1('A')
#define QEMU_KEY_DOWN QEMU_KEY_ESC1('B')
@@ -98,69 +108,65 @@ void hmp_mouse_set(Monitor *mon, const QDict *qdict);
#define QEMU_KEY_CTRL_PAGEUP 0xe406
#define QEMU_KEY_CTRL_PAGEDOWN 0xe407
-void kbd_put_keysym_console(QemuConsole *s, int keysym);
-bool kbd_put_qcode_console(QemuConsole *s, int qcode, bool ctrl);
-void kbd_put_string_console(QemuConsole *s, const char *str, int len);
-void kbd_put_keysym(int keysym);
+void qemu_text_console_put_keysym(QemuTextConsole *s, int keysym);
+bool qemu_text_console_put_qcode(QemuTextConsole *s, int qcode, bool ctrl);
+void qemu_text_console_put_string(QemuTextConsole *s, const char *str, int len);
+/* Touch devices */
+typedef struct touch_slot {
+ int x;
+ int y;
+ int tracking_id;
+} touch_slot;
+
+void console_handle_touch_event(QemuConsole *con,
+ struct touch_slot touch_slots[INPUT_EVENT_SLOTS_MAX],
+ uint64_t num_slot,
+ int width, int height,
+ double x, double y,
+ InputMultiTouchType type,
+ Error **errp);
/* consoles */
-#define TYPE_QEMU_CONSOLE "qemu-console"
-#define QEMU_CONSOLE(obj) \
- OBJECT_CHECK(QemuConsole, (obj), TYPE_QEMU_CONSOLE)
-#define QEMU_CONSOLE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QemuConsoleClass, (obj), TYPE_QEMU_CONSOLE)
-#define QEMU_CONSOLE_CLASS(klass) \
- OBJECT_CLASS_CHECK(QemuConsoleClass, (klass), TYPE_QEMU_CONSOLE)
-
-typedef struct QemuConsoleClass QemuConsoleClass;
-
struct QemuConsoleClass {
ObjectClass parent_class;
};
-#define QEMU_ALLOCATED_FLAG 0x01
-
-struct PixelFormat {
- uint8_t bits_per_pixel;
- uint8_t bytes_per_pixel;
- uint8_t depth; /* color depth in bits */
- uint32_t rmask, gmask, bmask, amask;
- uint8_t rshift, gshift, bshift, ashift;
- uint8_t rmax, gmax, bmax, amax;
- uint8_t rbits, gbits, bbits, abits;
-};
-
-struct DisplaySurface {
- pixman_format_code_t format;
- pixman_image_t *image;
- uint8_t flags;
-#ifdef CONFIG_OPENGL
- GLenum glformat;
- GLenum gltype;
- GLuint texture;
-#endif
-};
+typedef struct ScanoutTexture {
+ uint32_t backing_id;
+ bool backing_y_0_top;
+ uint32_t backing_width;
+ uint32_t backing_height;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ void *d3d_tex2d;
+} ScanoutTexture;
typedef struct QemuUIInfo {
+ /* physical dimension */
+ uint16_t width_mm;
+ uint16_t height_mm;
/* geometry */
int xoff;
int yoff;
uint32_t width;
uint32_t height;
+ uint32_t refresh_rate;
} QemuUIInfo;
/* cursor data format is 32bit RGBA */
typedef struct QEMUCursor {
- int width, height;
+ uint16_t width, height;
int hot_x, hot_y;
int refcount;
uint32_t data[];
} QEMUCursor;
-QEMUCursor *cursor_alloc(int width, int height);
-void cursor_get(QEMUCursor *c);
-void cursor_put(QEMUCursor *c);
+QEMUCursor *cursor_alloc(uint16_t width, uint16_t height);
+QEMUCursor *cursor_ref(QEMUCursor *c);
+void cursor_unref(QEMUCursor *c);
QEMUCursor *cursor_builtin_hidden(void);
QEMUCursor *cursor_builtin_left_ptr(void);
void cursor_print_ascii_art(QEMUCursor *c, const char *prefix);
@@ -179,65 +185,104 @@ struct QEMUGLParams {
int minor_ver;
};
-struct QemuDmaBuf {
+typedef struct QemuDmaBuf {
int fd;
uint32_t width;
uint32_t height;
uint32_t stride;
uint32_t fourcc;
+ uint64_t modifier;
uint32_t texture;
+ uint32_t x;
+ uint32_t y;
+ uint32_t backing_width;
+ uint32_t backing_height;
bool y0_top;
+ void *sync;
+ int fence_fd;
+ bool allow_fences;
+ bool draw_submitted;
+} QemuDmaBuf;
+
+enum display_scanout {
+ SCANOUT_NONE,
+ SCANOUT_SURFACE,
+ SCANOUT_TEXTURE,
+ SCANOUT_DMABUF,
};
+typedef struct DisplayScanout {
+ enum display_scanout kind;
+ union {
+ /* DisplaySurface *surface; is kept in QemuConsole */
+ ScanoutTexture texture;
+ QemuDmaBuf *dmabuf;
+ };
+} DisplayScanout;
+
+typedef struct DisplayState DisplayState;
+typedef struct DisplayGLCtx DisplayGLCtx;
+
typedef struct DisplayChangeListenerOps {
const char *dpy_name;
+ /* optional */
void (*dpy_refresh)(DisplayChangeListener *dcl);
+ /* optional */
void (*dpy_gfx_update)(DisplayChangeListener *dcl,
int x, int y, int w, int h);
+ /* optional */
void (*dpy_gfx_switch)(DisplayChangeListener *dcl,
struct DisplaySurface *new_surface);
+ /* optional */
bool (*dpy_gfx_check_format)(DisplayChangeListener *dcl,
pixman_format_code_t format);
+ /* optional */
void (*dpy_text_cursor)(DisplayChangeListener *dcl,
int x, int y);
+ /* optional */
void (*dpy_text_resize)(DisplayChangeListener *dcl,
int w, int h);
+ /* optional */
void (*dpy_text_update)(DisplayChangeListener *dcl,
int x, int y, int w, int h);
+ /* optional */
void (*dpy_mouse_set)(DisplayChangeListener *dcl,
int x, int y, int on);
+ /* optional */
void (*dpy_cursor_define)(DisplayChangeListener *dcl,
QEMUCursor *cursor);
- QEMUGLContext (*dpy_gl_ctx_create)(DisplayChangeListener *dcl,
- QEMUGLParams *params);
- void (*dpy_gl_ctx_destroy)(DisplayChangeListener *dcl,
- QEMUGLContext ctx);
- int (*dpy_gl_ctx_make_current)(DisplayChangeListener *dcl,
- QEMUGLContext ctx);
- QEMUGLContext (*dpy_gl_ctx_get_current)(DisplayChangeListener *dcl);
-
+ /* required if GL */
void (*dpy_gl_scanout_disable)(DisplayChangeListener *dcl);
+ /* required if GL */
void (*dpy_gl_scanout_texture)(DisplayChangeListener *dcl,
uint32_t backing_id,
bool backing_y_0_top,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
+ /* optional (default to true if has dpy_gl_scanout_dmabuf) */
+ bool (*dpy_has_dmabuf)(DisplayChangeListener *dcl);
+ /* optional */
void (*dpy_gl_scanout_dmabuf)(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf);
+ /* optional */
void (*dpy_gl_cursor_dmabuf)(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf, bool have_hot,
uint32_t hot_x, uint32_t hot_y);
+ /* optional */
void (*dpy_gl_cursor_position)(DisplayChangeListener *dcl,
uint32_t pos_x, uint32_t pos_y);
+ /* optional */
void (*dpy_gl_release_dmabuf)(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf);
+ /* required if GL */
void (*dpy_gl_update)(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
@@ -252,40 +297,41 @@ struct DisplayChangeListener {
QLIST_ENTRY(DisplayChangeListener) next;
};
-DisplayState *init_displaystate(void);
-DisplaySurface *qemu_create_displaysurface_from(int width, int height,
- pixman_format_code_t format,
- int linesize, uint8_t *data);
-DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image);
-DisplaySurface *qemu_create_message_surface(int w, int h,
- const char *msg);
-PixelFormat qemu_default_pixelformat(int bpp);
-
-DisplaySurface *qemu_create_displaysurface(int width, int height);
-void qemu_free_displaysurface(DisplaySurface *surface);
-
-static inline int is_surface_bgr(DisplaySurface *surface)
-{
- if (PIXMAN_FORMAT_BPP(surface->format) == 32 &&
- PIXMAN_FORMAT_TYPE(surface->format) == PIXMAN_TYPE_ABGR) {
- return 1;
- } else {
- return 0;
- }
-}
+typedef struct DisplayGLCtxOps {
+ bool (*dpy_gl_ctx_is_compatible_dcl)(DisplayGLCtx *dgc,
+ DisplayChangeListener *dcl);
+ QEMUGLContext (*dpy_gl_ctx_create)(DisplayGLCtx *dgc,
+ QEMUGLParams *params);
+ void (*dpy_gl_ctx_destroy)(DisplayGLCtx *dgc,
+ QEMUGLContext ctx);
+ int (*dpy_gl_ctx_make_current)(DisplayGLCtx *dgc,
+ QEMUGLContext ctx);
+ void (*dpy_gl_ctx_create_texture)(DisplayGLCtx *dgc,
+ DisplaySurface *surface);
+ void (*dpy_gl_ctx_destroy_texture)(DisplayGLCtx *dgc,
+ DisplaySurface *surface);
+ void (*dpy_gl_ctx_update_texture)(DisplayGLCtx *dgc,
+ DisplaySurface *surface,
+ int x, int y, int w, int h);
+} DisplayGLCtxOps;
+
+struct DisplayGLCtx {
+ const DisplayGLCtxOps *ops;
+#ifdef CONFIG_OPENGL
+ QemuGLShader *gls; /* optional shared shader */
+#endif
+};
-static inline int is_buffer_shared(DisplaySurface *surface)
-{
- return !(surface->flags & QEMU_ALLOCATED_FLAG);
-}
+DisplayState *init_displaystate(void);
void register_displaychangelistener(DisplayChangeListener *dcl);
void update_displaychangelistener(DisplayChangeListener *dcl,
uint64_t interval);
void unregister_displaychangelistener(DisplayChangeListener *dcl);
-bool dpy_ui_info_supported(QemuConsole *con);
-int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info);
+bool dpy_ui_info_supported(const QemuConsole *con);
+const QemuUIInfo *dpy_get_ui_info(const QemuConsole *con);
+int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info, bool delay);
void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h);
void dpy_gfx_update_full(QemuConsole *con);
@@ -304,7 +350,8 @@ void dpy_gl_scanout_disable(QemuConsole *con);
void dpy_gl_scanout_texture(QemuConsole *con,
uint32_t backing_id, bool backing_y_0_top,
uint32_t backing_width, uint32_t backing_height,
- uint32_t x, uint32_t y, uint32_t w, uint32_t h);
+ uint32_t x, uint32_t y, uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void dpy_gl_scanout_dmabuf(QemuConsole *con,
QemuDmaBuf *dmabuf);
void dpy_gl_cursor_dmabuf(QemuConsole *con, QemuDmaBuf *dmabuf,
@@ -320,47 +367,8 @@ QEMUGLContext dpy_gl_ctx_create(QemuConsole *con,
QEMUGLParams *params);
void dpy_gl_ctx_destroy(QemuConsole *con, QEMUGLContext ctx);
int dpy_gl_ctx_make_current(QemuConsole *con, QEMUGLContext ctx);
-QEMUGLContext dpy_gl_ctx_get_current(QemuConsole *con);
bool console_has_gl(QemuConsole *con);
-bool console_has_gl_dmabuf(QemuConsole *con);
-
-static inline int surface_stride(DisplaySurface *s)
-{
- return pixman_image_get_stride(s->image);
-}
-
-static inline void *surface_data(DisplaySurface *s)
-{
- return pixman_image_get_data(s->image);
-}
-
-static inline int surface_width(DisplaySurface *s)
-{
- return pixman_image_get_width(s->image);
-}
-
-static inline int surface_height(DisplaySurface *s)
-{
- return pixman_image_get_height(s->image);
-}
-
-static inline int surface_bits_per_pixel(DisplaySurface *s)
-{
- int bits = PIXMAN_FORMAT_BPP(s->format);
- return bits;
-}
-
-static inline int surface_bytes_per_pixel(DisplaySurface *s)
-{
- int bits = PIXMAN_FORMAT_BPP(s->format);
- return DIV_ROUND_UP(bits, 8);
-}
-
-static inline pixman_format_code_t surface_format(DisplaySurface *s)
-{
- return s->format;
-}
typedef uint32_t console_ch_t;
@@ -369,12 +377,21 @@ static inline void console_write_ch(console_ch_t *dest, uint32_t ch)
*dest = ch;
}
+enum {
+ GRAPHIC_FLAGS_NONE = 0,
+ /* require a console/display with GL callbacks */
+ GRAPHIC_FLAGS_GL = 1 << 0,
+ /* require a console/display with DMABUF import */
+ GRAPHIC_FLAGS_DMABUF = 1 << 1,
+};
+
typedef struct GraphicHwOps {
+ int (*get_flags)(void *opaque); /* optional, default 0 */
void (*invalidate)(void *opaque);
void (*gfx_update)(void *opaque);
+ bool gfx_update_async; /* if true, calls graphic_hw_update_done() */
void (*text_update)(void *opaque, console_ch_t *text);
- void (*update_interval)(void *opaque, uint64_t interval);
- int (*ui_info)(void *opaque, uint32_t head, QemuUIInfo *info);
+ void (*ui_info)(void *opaque, uint32_t head, QemuUIInfo *info);
void (*gl_block)(void *opaque, bool block);
} GraphicHwOps;
@@ -387,17 +404,21 @@ void graphic_console_set_hwops(QemuConsole *con,
void graphic_console_close(QemuConsole *con);
void graphic_hw_update(QemuConsole *con);
+void graphic_hw_update_done(QemuConsole *con);
void graphic_hw_invalidate(QemuConsole *con);
void graphic_hw_text_update(QemuConsole *con, console_ch_t *chardata);
void graphic_hw_gl_block(QemuConsole *con, bool block);
void qemu_console_early_init(void);
+void qemu_console_set_display_gl_ctx(QemuConsole *con, DisplayGLCtx *ctx);
+
+QemuConsole *qemu_console_lookup_default(void);
QemuConsole *qemu_console_lookup_by_index(unsigned int index);
QemuConsole *qemu_console_lookup_by_device(DeviceState *dev, uint32_t head);
QemuConsole *qemu_console_lookup_by_device_name(const char *device_id,
uint32_t head, Error **errp);
-QemuConsole *qemu_console_lookup_unused(void);
+QEMUCursor *qemu_console_get_cursor(QemuConsole *con);
bool qemu_console_is_visible(QemuConsole *con);
bool qemu_console_is_graphic(QemuConsole *con);
bool qemu_console_is_fixedsize(QemuConsole *con);
@@ -405,7 +426,6 @@ bool qemu_console_is_gl_blocked(QemuConsole *con);
char *qemu_console_get_label(QemuConsole *con);
int qemu_console_get_index(QemuConsole *con);
uint32_t qemu_console_get_head(QemuConsole *con);
-QemuUIInfo *qemu_console_get_ui_info(QemuConsole *con);
int qemu_console_get_width(QemuConsole *con, int fallback);
int qemu_console_get_height(QemuConsole *con, int fallback);
/* Return the low-level window id for the console */
@@ -413,9 +433,10 @@ int qemu_console_get_window_id(QemuConsole *con);
/* Set the low-level window id for the console */
void qemu_console_set_window_id(QemuConsole *con, int window_id);
-void console_select(unsigned int index);
void qemu_console_resize(QemuConsole *con, int width, int height);
DisplaySurface *qemu_console_surface(QemuConsole *con);
+void coroutine_fn qemu_console_co_wait_update(QemuConsole *con);
+int qemu_invalidate_text_consoles(void);
/* console-gl.c */
#ifdef CONFIG_OPENGL
@@ -441,12 +462,15 @@ struct QemuDisplay {
DisplayType type;
void (*early_init)(DisplayOptions *opts);
void (*init)(DisplayState *ds, DisplayOptions *opts);
+ const char *vc;
};
void qemu_display_register(QemuDisplay *ui);
bool qemu_display_find_default(DisplayOptions *opts);
void qemu_display_early_init(DisplayOptions *opts);
void qemu_display_init(DisplayState *ds, DisplayOptions *opts);
+const char *qemu_display_get_vc(DisplayOptions *opts);
+void qemu_display_help(void);
/* vnc.c */
void vnc_display_init(const char *id, Error **errp);
@@ -454,10 +478,23 @@ void vnc_display_open(const char *id, Error **errp);
void vnc_display_add_client(const char *id, int csock, bool skipauth);
int vnc_display_password(const char *id, const char *password);
int vnc_display_pw_expire(const char *id, time_t expires);
-QemuOpts *vnc_parse(const char *str, Error **errp);
+void vnc_parse(const char *str);
int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp);
+bool vnc_display_reload_certs(const char *id, Error **errp);
+bool vnc_display_update(DisplayUpdateOptionsVNC *arg, Error **errp);
/* input.c */
int index_from_key(const char *key, size_t key_length);
+#ifdef CONFIG_LINUX
+/* udmabuf.c */
+int udmabuf_fd(void);
+#endif
+
+/* util.c */
+bool qemu_console_fill_device_address(QemuConsole *con,
+ char *device_address,
+ size_t size,
+ Error **errp);
+
#endif
diff --git a/include/ui/dbus-display.h b/include/ui/dbus-display.h
new file mode 100644
index 0000000000..7c9ec1a069
--- /dev/null
+++ b/include/ui/dbus-display.h
@@ -0,0 +1,17 @@
+#ifndef DBUS_DISPLAY_H
+#define DBUS_DISPLAY_H
+
+#include "qapi/error.h"
+#include "ui/dbus-module.h"
+
+static inline bool qemu_using_dbus_display(Error **errp)
+{
+ if (!using_dbus_display) {
+ error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
+ "D-Bus display is not in use");
+ return false;
+ }
+ return true;
+}
+
+#endif /* DBUS_DISPLAY_H */
diff --git a/include/ui/dbus-module.h b/include/ui/dbus-module.h
new file mode 100644
index 0000000000..5442ee0bb6
--- /dev/null
+++ b/include/ui/dbus-module.h
@@ -0,0 +1,11 @@
+#ifndef DBUS_MODULE_H
+#define DBUS_MODULE_H
+
+struct QemuDBusDisplayOps {
+ bool (*add_client)(int csock, Error **errp);
+};
+
+extern int using_dbus_display;
+extern struct QemuDBusDisplayOps qemu_dbus_display;
+
+#endif /* DBUS_MODULE_H */
diff --git a/include/ui/egl-context.h b/include/ui/egl-context.h
index f004ce11a7..c2761d747a 100644
--- a/include/ui/egl-context.h
+++ b/include/ui/egl-context.h
@@ -4,11 +4,10 @@
#include "ui/console.h"
#include "ui/egl-helpers.h"
-QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl,
+QEMUGLContext qemu_egl_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params);
-void qemu_egl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx);
-int qemu_egl_make_context_current(DisplayChangeListener *dcl,
+void qemu_egl_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx);
+int qemu_egl_make_context_current(DisplayGLCtx *dgc,
QEMUGLContext ctx);
-QEMUGLContext qemu_egl_get_current_context(DisplayChangeListener *dcl);
#endif /* EGL_CONTEXT_H */
diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h
index 9db7293bdb..4b8c0d2281 100644
--- a/include/ui/egl-helpers.h
+++ b/include/ui/egl-helpers.h
@@ -3,11 +3,16 @@
#include <epoxy/gl.h>
#include <epoxy/egl.h>
+#ifdef CONFIG_GBM
#include <gbm.h>
+#endif
+#include "ui/console.h"
+#include "ui/shader.h"
extern EGLDisplay *qemu_egl_display;
extern EGLConfig qemu_egl_config;
extern DisplayGLMode qemu_egl_mode;
+extern bool qemu_egl_angle_d3d;
typedef struct egl_fb {
int width;
@@ -15,38 +20,60 @@ typedef struct egl_fb {
GLuint texture;
GLuint framebuffer;
bool delete_texture;
+ QemuDmaBuf *dmabuf;
} egl_fb;
+#define EGL_FB_INIT { 0, }
+
void egl_fb_destroy(egl_fb *fb);
void egl_fb_setup_default(egl_fb *fb, int width, int height);
void egl_fb_setup_for_tex(egl_fb *fb, int width, int height,
GLuint texture, bool delete);
void egl_fb_setup_new_tex(egl_fb *fb, int width, int height);
void egl_fb_blit(egl_fb *dst, egl_fb *src, bool flip);
-void egl_fb_read(void *dst, egl_fb *src);
+void egl_fb_read(DisplaySurface *dst, egl_fb *src);
+void egl_fb_read_rect(DisplaySurface *dst, egl_fb *src, int x, int y, int w, int h);
void egl_texture_blit(QemuGLShader *gls, egl_fb *dst, egl_fb *src, bool flip);
void egl_texture_blend(QemuGLShader *gls, egl_fb *dst, egl_fb *src, bool flip,
- int x, int y);
+ int x, int y, double scale_x, double scale_y);
+
+extern EGLContext qemu_egl_rn_ctx;
-#ifdef CONFIG_OPENGL_DMABUF
+#ifdef CONFIG_GBM
extern int qemu_egl_rn_fd;
extern struct gbm_device *qemu_egl_rn_gbm_dev;
-extern EGLContext qemu_egl_rn_ctx;
int egl_rendernode_init(const char *rendernode, DisplayGLMode mode);
-int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc);
+int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc,
+ EGLuint64KHR *modifier);
void egl_dmabuf_import_texture(QemuDmaBuf *dmabuf);
void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf);
+void egl_dmabuf_create_sync(QemuDmaBuf *dmabuf);
+void egl_dmabuf_create_fence(QemuDmaBuf *dmabuf);
#endif
-EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, Window win);
+EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, EGLNativeWindowType win);
+
+#if defined(CONFIG_X11) || defined(CONFIG_GBM)
int qemu_egl_init_dpy_x11(EGLNativeDisplayType dpy, DisplayGLMode mode);
int qemu_egl_init_dpy_mesa(EGLNativeDisplayType dpy, DisplayGLMode mode);
+
+#endif
+
+#ifdef WIN32
+int qemu_egl_init_dpy_win32(EGLNativeDisplayType dpy, DisplayGLMode mode);
+#endif
+
EGLContext qemu_egl_init_ctx(void);
+bool qemu_egl_has_dmabuf(void);
+
+bool egl_init(const char *rendernode, DisplayGLMode mode, Error **errp);
+
+const char *qemu_egl_get_error_string(void);
#endif /* EGL_HELPERS_H */
diff --git a/include/ui/gtk.h b/include/ui/gtk.h
index 99edd3c085..aa3d637029 100644
--- a/include/ui/gtk.h
+++ b/include/ui/gtk.h
@@ -1,15 +1,11 @@
#ifndef UI_GTK_H
#define UI_GTK_H
-#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE
/* Work around an -Wstrict-prototypes warning in GTK headers */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-prototypes"
-#endif
#include <gtk/gtk.h>
-#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE
#pragma GCC diagnostic pop
-#endif
#include <gdk/gdkkeysyms.h>
@@ -22,16 +18,26 @@
#include <gdk/gdkwayland.h>
#endif
+#include "ui/clipboard.h"
+#include "ui/console.h"
+#include "ui/kbd-state.h"
#if defined(CONFIG_OPENGL)
#include "ui/egl-helpers.h"
#include "ui/egl-context.h"
#endif
+#ifdef CONFIG_VTE
+#include "qemu/fifo8.h"
+#endif
+
+#define MAX_VCS 10
typedef struct GtkDisplayState GtkDisplayState;
typedef struct VirtualGfxConsole {
GtkWidget *drawing_area;
+ DisplayGLCtx dgc;
DisplayChangeListener dcl;
+ QKbdState *kbd;
DisplaySurface *ds;
pixman_image_t *convert;
cairo_surface_t *surface;
@@ -50,6 +56,7 @@ typedef struct VirtualGfxConsole {
int cursor_y;
bool y0_top;
bool scanout_mode;
+ bool has_dmabuf;
#endif
} VirtualGfxConsole;
@@ -59,6 +66,7 @@ typedef struct VirtualVteConsole {
GtkWidget *scrollbar;
GtkWidget *terminal;
Chardev *chr;
+ Fifo8 out_fifo;
bool echo;
} VirtualVteConsole;
#endif
@@ -84,10 +92,71 @@ typedef struct VirtualConsole {
};
} VirtualConsole;
+struct GtkDisplayState {
+ GtkWidget *window;
+
+ GtkWidget *menu_bar;
+
+ GtkAccelGroup *accel_group;
+
+ GtkWidget *machine_menu_item;
+ GtkWidget *machine_menu;
+ GtkWidget *pause_item;
+ GtkWidget *reset_item;
+ GtkWidget *powerdown_item;
+ GtkWidget *quit_item;
+
+ GtkWidget *view_menu_item;
+ GtkWidget *view_menu;
+ GtkWidget *full_screen_item;
+ GtkWidget *copy_item;
+ GtkWidget *zoom_in_item;
+ GtkWidget *zoom_out_item;
+ GtkWidget *zoom_fixed_item;
+ GtkWidget *zoom_fit_item;
+ GtkWidget *grab_item;
+ GtkWidget *grab_on_hover_item;
+
+ int nb_vcs;
+ VirtualConsole vc[MAX_VCS];
+
+ GtkWidget *show_tabs_item;
+ GtkWidget *untabify_item;
+ GtkWidget *show_menubar_item;
+
+ GtkWidget *vbox;
+ GtkWidget *notebook;
+ int button_mask;
+ gboolean last_set;
+ int last_x;
+ int last_y;
+ int grab_x_root;
+ int grab_y_root;
+ VirtualConsole *kbd_owner;
+ VirtualConsole *ptr_owner;
+
+ gboolean full_screen;
+
+ GdkCursor *null_cursor;
+ Notifier mouse_mode_notifier;
+ gboolean free_scale;
+
+ bool external_pause_update;
+
+ QemuClipboardPeer cbpeer;
+ uint32_t cbpending[QEMU_CLIPBOARD_SELECTION__COUNT];
+ GtkClipboard *gtkcb[QEMU_CLIPBOARD_SELECTION__COUNT];
+ bool cbowner[QEMU_CLIPBOARD_SELECTION__COUNT];
+
+ DisplayOptions *opts;
+};
+
extern bool gtk_use_gl_area;
/* ui/gtk.c */
void gd_update_windowsize(VirtualConsole *vc);
+void gd_update_monitor_refresh_rate(VirtualConsole *vc, GtkWidget *widget);
+void gd_hw_gl_flushed(void *vc);
/* ui/gtk-egl.c */
void gd_egl_init(VirtualConsole *vc);
@@ -97,7 +166,7 @@ void gd_egl_update(DisplayChangeListener *dcl,
void gd_egl_refresh(DisplayChangeListener *dcl);
void gd_egl_switch(DisplayChangeListener *dcl,
DisplaySurface *surface);
-QEMUGLContext gd_egl_create_context(DisplayChangeListener *dcl,
+QEMUGLContext gd_egl_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params);
void gd_egl_scanout_disable(DisplayChangeListener *dcl);
void gd_egl_scanout_texture(DisplayChangeListener *dcl,
@@ -106,7 +175,8 @@ void gd_egl_scanout_texture(DisplayChangeListener *dcl,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf);
void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl,
@@ -114,12 +184,12 @@ void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl,
uint32_t hot_x, uint32_t hot_y);
void gd_egl_cursor_position(DisplayChangeListener *dcl,
uint32_t pos_x, uint32_t pos_y);
-void gd_egl_release_dmabuf(DisplayChangeListener *dcl,
- QemuDmaBuf *dmabuf);
+void gd_egl_flush(DisplayChangeListener *dcl,
+ uint32_t x, uint32_t y, uint32_t w, uint32_t h);
void gd_egl_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
void gtk_egl_init(DisplayGLMode mode);
-int gd_egl_make_current(DisplayChangeListener *dcl,
+int gd_egl_make_current(DisplayGLCtx *dgc,
QEMUGLContext ctx);
/* ui/gtk-gl-area.c */
@@ -130,22 +200,28 @@ void gd_gl_area_update(DisplayChangeListener *dcl,
void gd_gl_area_refresh(DisplayChangeListener *dcl);
void gd_gl_area_switch(DisplayChangeListener *dcl,
DisplaySurface *surface);
-QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl,
+QEMUGLContext gd_gl_area_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params);
-void gd_gl_area_destroy_context(DisplayChangeListener *dcl,
+void gd_gl_area_destroy_context(DisplayGLCtx *dgc,
QEMUGLContext ctx);
+void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl,
+ QemuDmaBuf *dmabuf);
void gd_gl_area_scanout_texture(DisplayChangeListener *dcl,
uint32_t backing_id,
bool backing_y_0_top,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
+void gd_gl_area_scanout_disable(DisplayChangeListener *dcl);
void gd_gl_area_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
void gtk_gl_area_init(void);
-QEMUGLContext gd_gl_area_get_current_context(DisplayChangeListener *dcl);
-int gd_gl_area_make_current(DisplayChangeListener *dcl,
+int gd_gl_area_make_current(DisplayGLCtx *dgc,
QEMUGLContext ctx);
+/* gtk-clipboard.c */
+void gd_clipboard_init(GtkDisplayState *gd);
+
#endif /* UI_GTK_H */
diff --git a/include/ui/input.h b/include/ui/input.h
index 8c8ccb999f..8f9aac562e 100644
--- a/include/ui/input.h
+++ b/include/ui/input.h
@@ -2,14 +2,18 @@
#define INPUT_H
#include "qapi/qapi-types-ui.h"
+#include "qemu/notify.h"
#define INPUT_EVENT_MASK_KEY (1<<INPUT_EVENT_KIND_KEY)
#define INPUT_EVENT_MASK_BTN (1<<INPUT_EVENT_KIND_BTN)
#define INPUT_EVENT_MASK_REL (1<<INPUT_EVENT_KIND_REL)
#define INPUT_EVENT_MASK_ABS (1<<INPUT_EVENT_KIND_ABS)
+#define INPUT_EVENT_MASK_MTT (1<<INPUT_EVENT_KIND_MTT)
#define INPUT_EVENT_ABS_MIN 0x0000
#define INPUT_EVENT_ABS_MAX 0x7FFF
+#define INPUT_EVENT_SLOTS_MIN 0x0
+#define INPUT_EVENT_SLOTS_MAX 0xa
typedef struct QemuInputHandler QemuInputHandler;
typedef struct QemuInputHandlerState QemuInputHandlerState;
@@ -26,7 +30,7 @@ struct QemuInputHandler {
};
QemuInputHandlerState *qemu_input_handler_register(DeviceState *dev,
- QemuInputHandler *handler);
+ const QemuInputHandler *handler);
void qemu_input_handler_activate(QemuInputHandlerState *s);
void qemu_input_handler_deactivate(QemuInputHandlerState *s);
void qemu_input_handler_unregister(QemuInputHandlerState *s);
@@ -53,13 +57,18 @@ void qemu_input_queue_btn(QemuConsole *src, InputButton btn, bool down);
void qemu_input_update_buttons(QemuConsole *src, uint32_t *button_map,
uint32_t button_old, uint32_t button_new);
-bool qemu_input_is_absolute(void);
+bool qemu_input_is_absolute(QemuConsole *con);
int qemu_input_scale_axis(int value,
int min_in, int max_in,
int min_out, int max_out);
void qemu_input_queue_rel(QemuConsole *src, InputAxis axis, int value);
void qemu_input_queue_abs(QemuConsole *src, InputAxis axis, int value,
int min_in, int max_in);
+void qemu_input_queue_mtt(QemuConsole *src, InputMultiTouchType type, int slot,
+ int tracking_id);
+void qemu_input_queue_mtt_abs(QemuConsole *src, InputAxis axis, int value,
+ int min_in, int max_in,
+ int slot, int tracking_id);
void qemu_input_check_mode_change(void);
void qemu_add_mouse_mode_change_notifier(Notifier *notify);
diff --git a/include/ui/kbd-state.h b/include/ui/kbd-state.h
new file mode 100644
index 0000000000..1f37b932eb
--- /dev/null
+++ b/include/ui/kbd-state.h
@@ -0,0 +1,113 @@
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QEMU_UI_KBD_STATE_H
+#define QEMU_UI_KBD_STATE_H
+
+#include "qapi/qapi-types-ui.h"
+
+typedef enum QKbdModifier QKbdModifier;
+
+enum QKbdModifier {
+ QKBD_MOD_NONE = 0,
+
+ QKBD_MOD_SHIFT,
+ QKBD_MOD_CTRL,
+ QKBD_MOD_ALT,
+ QKBD_MOD_ALTGR,
+
+ QKBD_MOD_NUMLOCK,
+ QKBD_MOD_CAPSLOCK,
+
+ QKBD_MOD__MAX
+};
+
+typedef struct QKbdState QKbdState;
+
+/**
+ * qkbd_state_init: init keyboard state tracker.
+ *
+ * Allocates and initializes keyboard state struct.
+ *
+ * @con: QemuConsole for this state tracker. Gets passed down to
+ * qemu_input_*() functions when sending key events to the guest.
+ */
+QKbdState *qkbd_state_init(QemuConsole *con);
+
+/**
+ * qkbd_state_free: free keyboard tracker state.
+ *
+ * @kbd: state tracker state.
+ */
+void qkbd_state_free(QKbdState *kbd);
+
+/**
+ * qkbd_state_key_event: process key event.
+ *
+ * Update keyboard state, send event to the guest.
+ *
+ * This function takes care to not send suspious events (keyup event
+ * for a key not pressed for example).
+ *
+ * @kbd: state tracker state.
+ * @qcode: the key pressed or released.
+ * @down: true for key down events, false otherwise.
+ */
+void qkbd_state_key_event(QKbdState *kbd, QKeyCode qcode, bool down);
+
+/**
+ * qkbd_state_set_delay: set key press delay.
+ *
+ * When set the specified delay will be added after each key event,
+ * using qemu_input_event_send_key_delay().
+ *
+ * @kbd: state tracker state.
+ * @delay_ms: the delay in milliseconds.
+ */
+void qkbd_state_set_delay(QKbdState *kbd, int delay_ms);
+
+/**
+ * qkbd_state_key_get: get key state.
+ *
+ * Returns true when the key is down.
+ *
+ * @kbd: state tracker state.
+ * @qcode: the key to query.
+ */
+bool qkbd_state_key_get(QKbdState *kbd, QKeyCode qcode);
+
+/**
+ * qkbd_state_modifier_get: get modifier state.
+ *
+ * Returns true when the modifier is active.
+ *
+ * @kbd: state tracker state.
+ * @mod: the modifier to query.
+ */
+bool qkbd_state_modifier_get(QKbdState *kbd, QKbdModifier mod);
+
+/**
+ * qkbd_state_lift_all_keys: lift all pressed keys.
+ *
+ * This sends key up events to the guest for all keys which are in
+ * down state.
+ *
+ * @kbd: state tracker state.
+ */
+void qkbd_state_lift_all_keys(QKbdState *kbd);
+
+/**
+ * qkbd_state_switch_console: Switch console.
+ *
+ * This sends key up events to the previous console for all keys which are in
+ * down state to prevent keys being stuck, and remembers the new console.
+ *
+ * @kbd: state tracker state.
+ * @con: new QemuConsole for this state tracker.
+ */
+void qkbd_state_switch_console(QKbdState *kbd, QemuConsole *con);
+
+#endif /* QEMU_UI_KBD_STATE_H */
diff --git a/include/ui/pixman-minimal.h b/include/ui/pixman-minimal.h
new file mode 100644
index 0000000000..6dd7de1c7e
--- /dev/null
+++ b/include/ui/pixman-minimal.h
@@ -0,0 +1,239 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Tiny subset of PIXMAN API commonly used by QEMU.
+ *
+ * Copyright 1987, 1988, 1989, 1998 The Open Group
+ * Copyright 1987, 1988, 1989 Digital Equipment Corporation
+ * Copyright 1999, 2004, 2008 Keith Packard
+ * Copyright 2000 SuSE, Inc.
+ * Copyright 2000 Keith Packard, member of The XFree86 Project, Inc.
+ * Copyright 2004, 2005, 2007, 2008, 2009, 2010 Red Hat, Inc.
+ * Copyright 2004 Nicholas Miell
+ * Copyright 2005 Lars Knoll & Zack Rusin, Trolltech
+ * Copyright 2005 Trolltech AS
+ * Copyright 2007 Luca Barbato
+ * Copyright 2008 Aaron Plattner, NVIDIA Corporation
+ * Copyright 2008 Rodrigo Kumpera
+ * Copyright 2008 André Tupinambá
+ * Copyright 2008 Mozilla Corporation
+ * Copyright 2008 Frederic Plourde
+ * Copyright 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009, 2010 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PIXMAN_MINIMAL_H
+#define PIXMAN_MINIMAL_H
+
+#define PIXMAN_TYPE_OTHER 0
+#define PIXMAN_TYPE_ARGB 2
+#define PIXMAN_TYPE_ABGR 3
+#define PIXMAN_TYPE_BGRA 8
+#define PIXMAN_TYPE_RGBA 9
+
+#define PIXMAN_FORMAT(bpp, type, a, r, g, b) (((bpp) << 24) | \
+ ((type) << 16) | \
+ ((a) << 12) | \
+ ((r) << 8) | \
+ ((g) << 4) | \
+ ((b)))
+
+#define PIXMAN_FORMAT_RESHIFT(val, ofs, num) \
+ (((val >> (ofs)) & ((1 << (num)) - 1)) << ((val >> 22) & 3))
+
+#define PIXMAN_FORMAT_BPP(f) PIXMAN_FORMAT_RESHIFT(f, 24, 8)
+#define PIXMAN_FORMAT_TYPE(f) (((f) >> 16) & 0x3f)
+#define PIXMAN_FORMAT_A(f) PIXMAN_FORMAT_RESHIFT(f, 12, 4)
+#define PIXMAN_FORMAT_R(f) PIXMAN_FORMAT_RESHIFT(f, 8, 4)
+#define PIXMAN_FORMAT_G(f) PIXMAN_FORMAT_RESHIFT(f, 4, 4)
+#define PIXMAN_FORMAT_B(f) PIXMAN_FORMAT_RESHIFT(f, 0, 4)
+#define PIXMAN_FORMAT_DEPTH(f) (PIXMAN_FORMAT_A(f) + \
+ PIXMAN_FORMAT_R(f) + \
+ PIXMAN_FORMAT_G(f) + \
+ PIXMAN_FORMAT_B(f))
+
+typedef enum {
+ /* 32bpp formats */
+ PIXMAN_a8r8g8b8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ARGB, 8, 8, 8, 8),
+ PIXMAN_x8r8g8b8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ARGB, 0, 8, 8, 8),
+ PIXMAN_a8b8g8r8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ABGR, 8, 8, 8, 8),
+ PIXMAN_x8b8g8r8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ABGR, 0, 8, 8, 8),
+ PIXMAN_b8g8r8a8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_BGRA, 8, 8, 8, 8),
+ PIXMAN_b8g8r8x8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_BGRA, 0, 8, 8, 8),
+ PIXMAN_r8g8b8a8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_RGBA, 8, 8, 8, 8),
+ PIXMAN_r8g8b8x8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_RGBA, 0, 8, 8, 8),
+ /* 24bpp formats */
+ PIXMAN_r8g8b8 = PIXMAN_FORMAT(24, PIXMAN_TYPE_ARGB, 0, 8, 8, 8),
+ PIXMAN_b8g8r8 = PIXMAN_FORMAT(24, PIXMAN_TYPE_ABGR, 0, 8, 8, 8),
+ /* 16bpp formats */
+ PIXMAN_r5g6b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 0, 5, 6, 5),
+ PIXMAN_a1r5g5b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 1, 5, 5, 5),
+ PIXMAN_x1r5g5b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 0, 5, 5, 5),
+} pixman_format_code_t;
+
+typedef struct pixman_image pixman_image_t;
+
+typedef void (*pixman_image_destroy_func_t)(pixman_image_t *image, void *data);
+
+struct pixman_image {
+ int ref_count;
+ pixman_format_code_t format;
+ int width;
+ int height;
+ int stride;
+ uint32_t *data;
+ uint32_t *free_me;
+ pixman_image_destroy_func_t destroy_func;
+ void *destroy_data;
+};
+
+typedef struct pixman_color {
+ uint16_t red;
+ uint16_t green;
+ uint16_t blue;
+ uint16_t alpha;
+} pixman_color_t;
+
+static inline uint32_t *create_bits(pixman_format_code_t format,
+ int width,
+ int height,
+ int *rowstride_bytes)
+{
+ int stride = 0;
+ size_t buf_size = 0;
+ int bpp = PIXMAN_FORMAT_BPP(format);
+
+ /*
+ * Calculate the following while checking for overflow truncation:
+ * stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
+ */
+
+ if (unlikely(__builtin_mul_overflow(width, bpp, &stride))) {
+ return NULL;
+ }
+
+ if (unlikely(__builtin_add_overflow(stride, 0x1f, &stride))) {
+ return NULL;
+ }
+
+ stride >>= 5;
+
+ stride *= sizeof(uint32_t);
+
+ if (unlikely(__builtin_mul_overflow((size_t) height,
+ (size_t) stride,
+ &buf_size))) {
+ return NULL;
+ }
+
+ if (rowstride_bytes) {
+ *rowstride_bytes = stride;
+ }
+
+ return g_malloc0(buf_size);
+}
+
+static inline pixman_image_t *pixman_image_create_bits(pixman_format_code_t format,
+ int width,
+ int height,
+ uint32_t *bits,
+ int rowstride_bytes)
+{
+ pixman_image_t *i = g_new0(pixman_image_t, 1);
+
+ i->width = width;
+ i->height = height;
+ i->format = format;
+ if (bits) {
+ i->data = bits;
+ } else {
+ i->free_me = i->data =
+ create_bits(format, width, height, &rowstride_bytes);
+ if (width && height) {
+ assert(i->data);
+ }
+ }
+ i->stride = rowstride_bytes ? rowstride_bytes :
+ width * DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
+ i->ref_count = 1;
+
+ return i;
+}
+
+static inline pixman_image_t *pixman_image_ref(pixman_image_t *i)
+{
+ i->ref_count++;
+ return i;
+}
+
+static inline bool pixman_image_unref(pixman_image_t *i)
+{
+ i->ref_count--;
+
+ if (i->ref_count == 0) {
+ if (i->destroy_func) {
+ i->destroy_func(i, i->destroy_data);
+ }
+ g_free(i->free_me);
+ g_free(i);
+
+ return true;
+ }
+
+ return false;
+}
+
+static inline void pixman_image_set_destroy_function(pixman_image_t *i,
+ pixman_image_destroy_func_t func,
+ void *data)
+
+{
+ i->destroy_func = func;
+ i->destroy_data = data;
+}
+
+static inline uint32_t *pixman_image_get_data(pixman_image_t *i)
+{
+ return i->data;
+}
+
+static inline int pixman_image_get_height(pixman_image_t *i)
+{
+ return i->height;
+}
+
+static inline int pixman_image_get_width(pixman_image_t *i)
+{
+ return i->width;
+}
+
+static inline int pixman_image_get_stride(pixman_image_t *i)
+{
+ return i->stride;
+}
+
+static inline pixman_format_code_t pixman_image_get_format(pixman_image_t *i)
+{
+ return i->format;
+}
+
+#endif /* PIXMAN_MINIMAL_H */
diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h
index b7c82d17fc..ef13a8210c 100644
--- a/include/ui/qemu-pixman.h
+++ b/include/ui/qemu-pixman.h
@@ -6,14 +6,10 @@
#ifndef QEMU_PIXMAN_H
#define QEMU_PIXMAN_H
-/* pixman-0.16.0 headers have a redundant declaration */
-#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wredundant-decls"
-#endif
+#ifdef CONFIG_PIXMAN
#include <pixman.h>
-#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE
-#pragma GCC diagnostic pop
+#else
+#include "pixman-minimal.h"
#endif
/*
@@ -23,7 +19,7 @@
* feeding libjpeg / libpng and writing screenshots.
*/
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
# define PIXMAN_BE_r8g8b8 PIXMAN_r8g8b8
# define PIXMAN_BE_x8r8g8b8 PIXMAN_x8r8g8b8
# define PIXMAN_BE_a8r8g8b8 PIXMAN_a8r8g8b8
@@ -36,6 +32,8 @@
# define PIXMAN_LE_r8g8b8 PIXMAN_b8g8r8
# define PIXMAN_LE_a8r8g8b8 PIXMAN_b8g8r8a8
# define PIXMAN_LE_x8r8g8b8 PIXMAN_b8g8r8x8
+# define PIXMAN_LE_a8b8g8r8 PIXMAN_r8g8b8a8
+# define PIXMAN_LE_x8b8g8r8 PIXMAN_r8g8b8x8
#else
# define PIXMAN_BE_r8g8b8 PIXMAN_b8g8r8
# define PIXMAN_BE_x8r8g8b8 PIXMAN_b8g8r8x8
@@ -49,29 +47,45 @@
# define PIXMAN_LE_r8g8b8 PIXMAN_r8g8b8
# define PIXMAN_LE_a8r8g8b8 PIXMAN_a8r8g8b8
# define PIXMAN_LE_x8r8g8b8 PIXMAN_x8r8g8b8
+# define PIXMAN_LE_a8b8g8r8 PIXMAN_a8b8g8r8
+# define PIXMAN_LE_x8b8g8r8 PIXMAN_x8b8g8r8
#endif
+#define QEMU_PIXMAN_COLOR(r, g, b) \
+ { .red = r << 8, .green = g << 8, .blue = b << 8, .alpha = 0xffff }
+
+#define QEMU_PIXMAN_COLOR_BLACK QEMU_PIXMAN_COLOR(0x00, 0x00, 0x00)
+#define QEMU_PIXMAN_COLOR_GRAY QEMU_PIXMAN_COLOR(0xaa, 0xaa, 0xaa)
+
/* -------------------------------------------------------------------- */
+typedef struct PixelFormat {
+ uint8_t bits_per_pixel;
+ uint8_t bytes_per_pixel;
+ uint8_t depth; /* color depth in bits */
+ uint32_t rmask, gmask, bmask, amask;
+ uint8_t rshift, gshift, bshift, ashift;
+ uint8_t rmax, gmax, bmax, amax;
+ uint8_t rbits, gbits, bbits, abits;
+} PixelFormat;
+
PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format);
pixman_format_code_t qemu_default_pixman_format(int bpp, bool native_endian);
pixman_format_code_t qemu_drm_format_to_pixman(uint32_t drm_format);
+uint32_t qemu_pixman_to_drm_format(pixman_format_code_t pixman);
int qemu_pixman_get_type(int rshift, int gshift, int bshift);
-pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf);
bool qemu_pixman_check_format(DisplayChangeListener *dcl,
pixman_format_code_t format);
+#ifdef CONFIG_PIXMAN
+pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf);
pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format,
int width);
void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb,
int width, int x, int y);
-void qemu_pixman_linebuf_copy(pixman_image_t *fb, int width, int x, int y,
- pixman_image_t *linebuf);
pixman_image_t *qemu_pixman_mirror_create(pixman_format_code_t format,
pixman_image_t *image);
-void qemu_pixman_image_unref(pixman_image_t *image);
-pixman_color_t qemu_pixman_color(PixelFormat *pf, uint32_t color);
pixman_image_t *qemu_pixman_glyph_from_vgafont(int height, const uint8_t *font,
unsigned int ch);
void qemu_pixman_glyph_render(pixman_image_t *glyph,
@@ -79,5 +93,10 @@ void qemu_pixman_glyph_render(pixman_image_t *glyph,
pixman_color_t *fgcol,
pixman_color_t *bgcol,
int x, int y, int cw, int ch);
+#endif
+
+void qemu_pixman_image_unref(pixman_image_t *image);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(pixman_image_t, qemu_pixman_image_unref)
#endif /* QEMU_PIXMAN_H */
diff --git a/include/ui/qemu-spice-module.h b/include/ui/qemu-spice-module.h
new file mode 100644
index 0000000000..1f22d557ea
--- /dev/null
+++ b/include/ui/qemu-spice-module.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_SPICE_MODULE_H
+#define QEMU_SPICE_MODULE_H
+
+#ifdef CONFIG_SPICE
+#include <spice.h>
+#endif
+
+typedef struct SpiceInfo SpiceInfo;
+
+struct QemuSpiceOps {
+ void (*init)(void);
+ void (*display_init)(void);
+ int (*migrate_info)(const char *h, int p, int t, const char *s);
+ int (*set_passwd)(const char *passwd,
+ bool fail_if_connected, bool disconnect_if_connected);
+ int (*set_pw_expire)(time_t expires);
+ int (*display_add_client)(int csock, int skipauth, int tls);
+#ifdef CONFIG_SPICE
+ int (*add_interface)(SpiceBaseInstance *sin);
+ SpiceInfo* (*qmp_query)(Error **errp);
+#endif
+};
+
+extern int using_spice;
+extern struct QemuSpiceOps qemu_spice;
+
+#endif
diff --git a/include/ui/qemu-spice.h b/include/ui/qemu-spice.h
index 8c23dfe717..b7d493742c 100644
--- a/include/ui/qemu-spice.h
+++ b/include/ui/qemu-spice.h
@@ -19,73 +19,32 @@
#define QEMU_SPICE_H
#include "qapi/error.h"
+#include "ui/qemu-spice-module.h"
#ifdef CONFIG_SPICE
#include <spice.h>
#include "qemu/config-file.h"
-extern int using_spice;
-
-void qemu_spice_init(void);
void qemu_spice_input_init(void);
-void qemu_spice_audio_init(void);
void qemu_spice_display_init(void);
-int qemu_spice_display_add_client(int csock, int skipauth, int tls);
-int qemu_spice_add_interface(SpiceBaseInstance *sin);
+void qemu_spice_display_init_done(void);
bool qemu_spice_have_display_interface(QemuConsole *con);
int qemu_spice_add_display_interface(QXLInstance *qxlin, QemuConsole *con);
-int qemu_spice_set_passwd(const char *passwd,
- bool fail_if_connected, bool disconnect_if_connected);
-int qemu_spice_set_pw_expire(time_t expires);
int qemu_spice_migrate_info(const char *hostname, int port, int tls_port,
const char *subject);
-#if !defined(SPICE_SERVER_VERSION) || (SPICE_SERVER_VERSION < 0xc06)
-#define SPICE_NEEDS_SET_MM_TIME 1
+#if SPICE_SERVER_VERSION >= 0x000f00 /* release 0.15.0 */
+#define SPICE_HAS_ATTACHED_WORKER 1
#else
-#define SPICE_NEEDS_SET_MM_TIME 0
+#define SPICE_HAS_ATTACHED_WORKER 0
#endif
-void qemu_spice_register_ports(void);
#else /* CONFIG_SPICE */
#include "qemu/error-report.h"
-#define using_spice 0
#define spice_displays 0
-static inline int qemu_spice_set_passwd(const char *passwd,
- bool fail_if_connected,
- bool disconnect_if_connected)
-{
- return -1;
-}
-static inline int qemu_spice_set_pw_expire(time_t expires)
-{
- return -1;
-}
-static inline int qemu_spice_migrate_info(const char *h, int p, int t,
- const char *s)
-{
- return -1;
-}
-
-static inline int qemu_spice_display_add_client(int csock, int skipauth,
- int tls)
-{
- return -1;
-}
-
-static inline void qemu_spice_display_init(void)
-{
- /* This must never be called if CONFIG_SPICE is disabled */
- error_report("spice support is disabled");
- abort();
-}
-
-static inline void qemu_spice_init(void)
-{
-}
#endif /* CONFIG_SPICE */
diff --git a/include/ui/rect.h b/include/ui/rect.h
new file mode 100644
index 0000000000..7ebf47ebcd
--- /dev/null
+++ b/include/ui/rect.h
@@ -0,0 +1,57 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef QEMU_RECT_H
+#define QEMU_RECT_H
+
+
+typedef struct QemuRect {
+ int16_t x;
+ int16_t y;
+ uint16_t width;
+ uint16_t height;
+} QemuRect;
+
+static inline void qemu_rect_init(QemuRect *rect,
+ int16_t x, int16_t y,
+ uint16_t width, uint16_t height)
+{
+ rect->x = x;
+ rect->y = y;
+ rect->width = width;
+ rect->height = height;
+}
+
+static inline void qemu_rect_translate(QemuRect *rect,
+ int16_t dx, int16_t dy)
+{
+ rect->x += dx;
+ rect->y += dy;
+}
+
+static inline bool qemu_rect_intersect(const QemuRect *a, const QemuRect *b,
+ QemuRect *res)
+{
+ int16_t x1, x2, y1, y2;
+
+ x1 = MAX(a->x, b->x);
+ y1 = MAX(a->y, b->y);
+ x2 = MIN(a->x + a->width, b->x + b->width);
+ y2 = MIN(a->y + a->height, b->y + b->height);
+
+ if (x1 >= x2 || y1 >= y2) {
+ if (res) {
+ qemu_rect_init(res, 0, 0, 0, 0);
+ }
+
+ return false;
+ }
+
+ if (res) {
+ qemu_rect_init(res, x1, y1, x2 - x1, y2 - y1);
+ }
+
+ return true;
+}
+
+#endif
diff --git a/include/ui/sdl2.h b/include/ui/sdl2.h
index f43eecdbd6..e3acc7c82a 100644
--- a/include/ui/sdl2.h
+++ b/include/ui/sdl2.h
@@ -5,13 +5,29 @@
#undef WIN32_LEAN_AND_MEAN
#include <SDL.h>
+
+/* with Alpine / muslc SDL headers pull in directfb headers
+ * which in turn trigger warning about redundant decls for
+ * direct_waitqueue_deinit.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
+
#include <SDL_syswm.h>
+#pragma GCC diagnostic pop
+
+#ifdef CONFIG_SDL_IMAGE
+# include <SDL_image.h>
+#endif
+
+#include "ui/kbd-state.h"
#ifdef CONFIG_OPENGL
# include "ui/egl-helpers.h"
#endif
struct sdl2_console {
+ DisplayGLCtx dgc;
DisplayChangeListener dcl;
DisplaySurface *surface;
DisplayOptions *opts;
@@ -27,6 +43,7 @@ struct sdl2_console {
int idle_counter;
int ignore_hotkeys;
SDL_GLContext winctx;
+ QKbdState *kbd;
#ifdef CONFIG_OPENGL
QemuGLShader *gls;
egl_fb guest_fb;
@@ -41,7 +58,6 @@ void sdl2_window_destroy(struct sdl2_console *scon);
void sdl2_window_resize(struct sdl2_console *scon);
void sdl2_poll_events(struct sdl2_console *scon);
-void sdl2_reset_keys(struct sdl2_console *scon);
void sdl2_process_key(struct sdl2_console *scon,
SDL_KeyboardEvent *ev);
@@ -61,12 +77,11 @@ void sdl2_gl_switch(DisplayChangeListener *dcl,
void sdl2_gl_refresh(DisplayChangeListener *dcl);
void sdl2_gl_redraw(struct sdl2_console *scon);
-QEMUGLContext sdl2_gl_create_context(DisplayChangeListener *dcl,
+QEMUGLContext sdl2_gl_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params);
-void sdl2_gl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx);
-int sdl2_gl_make_context_current(DisplayChangeListener *dcl,
+void sdl2_gl_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx);
+int sdl2_gl_make_context_current(DisplayGLCtx *dgc,
QEMUGLContext ctx);
-QEMUGLContext sdl2_gl_get_current_context(DisplayChangeListener *dcl);
void sdl2_gl_scanout_disable(DisplayChangeListener *dcl);
void sdl2_gl_scanout_texture(DisplayChangeListener *dcl,
@@ -75,7 +90,8 @@ void sdl2_gl_scanout_texture(DisplayChangeListener *dcl,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void sdl2_gl_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
diff --git a/include/ui/spice-display.h b/include/ui/spice-display.h
index 87a84a59d4..e1a9b36185 100644
--- a/include/ui/spice-display.h
+++ b/include/ui/spice-display.h
@@ -15,6 +15,10 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#ifndef UI_SPICE_DISPLAY_H
+#define UI_SPICE_DISPLAY_H
+
+#include <spice.h>
#include <spice/ipc_ring.h>
#include <spice/enums.h>
#include <spice/qxl_dev.h>
@@ -22,14 +26,11 @@
#include "qemu/thread.h"
#include "ui/qemu-pixman.h"
#include "ui/console.h"
-#include "sysemu/sysemu.h"
-#if defined(CONFIG_OPENGL_DMABUF)
-# if SPICE_SERVER_VERSION >= 0x000d01 /* release 0.13.1 */
+#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM)
# define HAVE_SPICE_GL 1
# include "ui/egl-helpers.h"
# include "ui/egl-context.h"
-# endif
#endif
#define NUM_MEMSLOTS 8
@@ -41,7 +42,7 @@
#define NUM_MEMSLOTS_GROUPS 2
/*
- * Internal enum to differenciate between options for
+ * Internal enum to differentiate between options for
* io calls that have a sync (old) version and an _async (new)
* version:
* QXL_SYNC: use the old version
@@ -83,6 +84,7 @@ typedef struct SimpleSpiceCursor SimpleSpiceCursor;
struct SimpleSpiceDisplay {
DisplaySurface *ds;
+ DisplayGLCtx dgc;
DisplayChangeListener dcl;
void *buf;
int bufsize;
@@ -179,3 +181,5 @@ void qemu_spice_wakeup(SimpleSpiceDisplay *ssd);
void qemu_spice_display_start(void);
void qemu_spice_display_stop(void);
int qemu_spice_display_is_running(SimpleSpiceDisplay *ssd);
+
+#endif
diff --git a/include/ui/surface.h b/include/ui/surface.h
new file mode 100644
index 0000000000..4244e0ca4a
--- /dev/null
+++ b/include/ui/surface.h
@@ -0,0 +1,95 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * QEMU UI Console
+ */
+#ifndef SURFACE_H
+#define SURFACE_H
+
+#include "ui/qemu-pixman.h"
+
+#ifdef CONFIG_OPENGL
+# include <epoxy/gl.h>
+# include "ui/shader.h"
+#endif
+
+#define QEMU_ALLOCATED_FLAG 0x01
+#define QEMU_PLACEHOLDER_FLAG 0x02
+
+typedef struct DisplaySurface {
+ pixman_image_t *image;
+ uint8_t flags;
+#ifdef CONFIG_OPENGL
+ GLenum glformat;
+ GLenum gltype;
+ GLuint texture;
+#endif
+#ifdef WIN32
+ HANDLE handle;
+ uint32_t handle_offset;
+#endif
+} DisplaySurface;
+
+PixelFormat qemu_default_pixelformat(int bpp);
+
+DisplaySurface *qemu_create_displaysurface_from(int width, int height,
+ pixman_format_code_t format,
+ int linesize, uint8_t *data);
+DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image);
+DisplaySurface *qemu_create_placeholder_surface(int w, int h,
+ const char *msg);
+#ifdef WIN32
+void qemu_displaysurface_win32_set_handle(DisplaySurface *surface,
+ HANDLE h, uint32_t offset);
+#endif
+
+DisplaySurface *qemu_create_displaysurface(int width, int height);
+void qemu_free_displaysurface(DisplaySurface *surface);
+
+static inline int is_buffer_shared(DisplaySurface *surface)
+{
+ return !(surface->flags & QEMU_ALLOCATED_FLAG);
+}
+
+static inline int is_placeholder(DisplaySurface *surface)
+{
+ return surface->flags & QEMU_PLACEHOLDER_FLAG;
+}
+
+static inline int surface_stride(DisplaySurface *s)
+{
+ return pixman_image_get_stride(s->image);
+}
+
+static inline void *surface_data(DisplaySurface *s)
+{
+ return pixman_image_get_data(s->image);
+}
+
+static inline int surface_width(DisplaySurface *s)
+{
+ return pixman_image_get_width(s->image);
+}
+
+static inline int surface_height(DisplaySurface *s)
+{
+ return pixman_image_get_height(s->image);
+}
+
+static inline pixman_format_code_t surface_format(DisplaySurface *s)
+{
+ return pixman_image_get_format(s->image);
+}
+
+static inline int surface_bits_per_pixel(DisplaySurface *s)
+{
+ int bits = PIXMAN_FORMAT_BPP(surface_format(s));
+ return bits;
+}
+
+static inline int surface_bytes_per_pixel(DisplaySurface *s)
+{
+ int bits = PIXMAN_FORMAT_BPP(surface_format(s));
+ return DIV_ROUND_UP(bits, 8);
+}
+
+#endif
diff --git a/include/ui/win32-kbd-hook.h b/include/ui/win32-kbd-hook.h
new file mode 100644
index 0000000000..4bd9f00f97
--- /dev/null
+++ b/include/ui/win32-kbd-hook.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef UI_WIN32_KBD_HOOK_H
+#define UI_WIN32_KBD_HOOK_H
+
+void win32_kbd_set_window(void *hwnd);
+void win32_kbd_set_grab(bool grab);
+
+#endif
diff --git a/include/user/safe-syscall.h b/include/user/safe-syscall.h
new file mode 100644
index 0000000000..aa075f4d5c
--- /dev/null
+++ b/include/user/safe-syscall.h
@@ -0,0 +1,140 @@
+/*
+ * safe-syscall.h: prototypes for linux-user signal-race-safe syscalls
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef LINUX_USER_SAFE_SYSCALL_H
+#define LINUX_USER_SAFE_SYSCALL_H
+
+/**
+ * safe_syscall:
+ * @int number: number of system call to make
+ * ...: arguments to the system call
+ *
+ * Call a system call if guest signal not pending.
+ * This has the same API as the libc syscall() function, except that it
+ * may return -1 with errno == QEMU_ERESTARTSYS if a signal was pending.
+ *
+ * Returns: the system call result, or -1 with an error code in errno
+ * (Errnos are host errnos; we rely on QEMU_ERESTARTSYS not clashing
+ * with any of the host errno values.)
+ */
+
+/*
+ * A guide to using safe_syscall() to handle interactions between guest
+ * syscalls and guest signals:
+ *
+ * Guest syscalls come in two flavours:
+ *
+ * (1) Non-interruptible syscalls
+ *
+ * These are guest syscalls that never get interrupted by signals and
+ * so never return EINTR. They can be implemented straightforwardly in
+ * QEMU: just make sure that if the implementation code has to make any
+ * blocking calls that those calls are retried if they return EINTR.
+ * It's also OK to implement these with safe_syscall, though it will be
+ * a little less efficient if a signal is delivered at the 'wrong' moment.
+ *
+ * Some non-interruptible syscalls need to be handled using block_signals()
+ * to block signals for the duration of the syscall. This mainly applies
+ * to code which needs to modify the data structures used by the
+ * host_signal_handler() function and the functions it calls, including
+ * all syscalls which change the thread's signal mask.
+ *
+ * (2) Interruptible syscalls
+ *
+ * These are guest syscalls that can be interrupted by signals and
+ * for which we need to either return EINTR or arrange for the guest
+ * syscall to be restarted. This category includes both syscalls which
+ * always restart (and in the kernel return -ERESTARTNOINTR), ones
+ * which only restart if there is no handler (kernel returns -ERESTARTNOHAND
+ * or -ERESTART_RESTARTBLOCK), and the most common kind which restart
+ * if the handler was registered with SA_RESTART (kernel returns
+ * -ERESTARTSYS). System calls which are only interruptible in some
+ * situations (like 'open') also need to be handled this way.
+ *
+ * Here it is important that the host syscall is made
+ * via this safe_syscall() function, and *not* via the host libc.
+ * If the host libc is used then the implementation will appear to work
+ * most of the time, but there will be a race condition where a
+ * signal could arrive just before we make the host syscall inside libc,
+ * and then the guest syscall will not correctly be interrupted.
+ * Instead the implementation of the guest syscall can use the safe_syscall
+ * function but otherwise just return the result or errno in the usual
+ * way; the main loop code will take care of restarting the syscall
+ * if appropriate.
+ *
+ * (If the implementation needs to make multiple host syscalls this is
+ * OK; any which might really block must be via safe_syscall(); for those
+ * which are only technically blocking (ie which we know in practice won't
+ * stay in the host kernel indefinitely) it's OK to use libc if necessary.
+ * You must be able to cope with backing out correctly if some safe_syscall
+ * you make in the implementation returns either -QEMU_ERESTARTSYS or
+ * EINTR though.)
+ *
+ * block_signals() cannot be used for interruptible syscalls.
+ *
+ *
+ * How and why the safe_syscall implementation works:
+ *
+ * The basic setup is that we make the host syscall via a known
+ * section of host native assembly. If a signal occurs, our signal
+ * handler checks the interrupted host PC against the address of that
+ * known section. If the PC is before or at the address of the syscall
+ * instruction then we change the PC to point at a "return
+ * -QEMU_ERESTARTSYS" code path instead, and then exit the signal handler
+ * (causing the safe_syscall() call to immediately return that value).
+ * Then in the main.c loop if we see this magic return value we adjust
+ * the guest PC to wind it back to before the system call, and invoke
+ * the guest signal handler as usual.
+ *
+ * This winding-back will happen in two cases:
+ * (1) signal came in just before we took the host syscall (a race);
+ * in this case we'll take the guest signal and have another go
+ * at the syscall afterwards, and this is indistinguishable for the
+ * guest from the timing having been different such that the guest
+ * signal really did win the race
+ * (2) signal came in while the host syscall was blocking, and the
+ * host kernel decided the syscall should be restarted;
+ * in this case we want to restart the guest syscall also, and so
+ * rewinding is the right thing. (Note that "restart" semantics mean
+ * "first call the signal handler, then reattempt the syscall".)
+ * The other situation to consider is when a signal came in while the
+ * host syscall was blocking, and the host kernel decided that the syscall
+ * should not be restarted; in this case QEMU's host signal handler will
+ * be invoked with the PC pointing just after the syscall instruction,
+ * with registers indicating an EINTR return; the special code in the
+ * handler will not kick in, and we will return EINTR to the guest as
+ * we should.
+ *
+ * Notice that we can leave the host kernel to make the decision for
+ * us about whether to do a restart of the syscall or not; we do not
+ * need to check SA_RESTART flags in QEMU or distinguish the various
+ * kinds of restartability.
+ */
+
+/* The core part of this function is implemented in assembly */
+long safe_syscall_base(int *pending, long number, ...);
+long safe_syscall_set_errno_tail(int value);
+
+/* These are defined by the safe-syscall.inc.S file */
+extern char safe_syscall_start[];
+extern char safe_syscall_end[];
+
+#define safe_syscall(...) \
+ safe_syscall_base(&get_task_state(thread_cpu)->signal_pending, \
+ __VA_ARGS__)
+
+#endif
diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h
new file mode 100644
index 0000000000..b48b2b2d0a
--- /dev/null
+++ b/include/user/syscall-trace.h
@@ -0,0 +1,43 @@
+/*
+ * Common System Call Tracing Wrappers for *-user
+ *
+ * Copyright (c) 2019 Linaro
+ * Written by Alex Bennée <alex.bennee@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SYSCALL_TRACE_H
+#define SYSCALL_TRACE_H
+
+#include "exec/user/abitypes.h"
+#include "gdbstub/user.h"
+#include "qemu/plugin.h"
+#include "trace/trace-root.h"
+
+/*
+ * These helpers just provide a common place for the various
+ * subsystems that want to track syscalls to put their hooks in. We
+ * could potentially unify the -strace code here as well.
+ */
+
+static inline void record_syscall_start(CPUState *cpu, int num,
+ abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4,
+ abi_long arg5, abi_long arg6,
+ abi_long arg7, abi_long arg8)
+{
+ qemu_plugin_vcpu_syscall(cpu, num,
+ arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7, arg8);
+ gdb_syscall_entry(cpu, num);
+}
+
+static inline void record_syscall_return(CPUState *cpu, int num, abi_long ret)
+{
+ qemu_plugin_vcpu_syscall_ret(cpu, num, ret);
+ gdb_syscall_return(cpu, num);
+}
+
+
+#endif /* SYSCALL_TRACE_H */